From 472c933cb8d8435ec43447b6cec7a9363d947bdc Mon Sep 17 00:00:00 2001 From: JordonPhillips Date: Thu, 29 Oct 2020 15:00:11 -0700 Subject: [PATCH] Update generated code --- .../awsrestjson/api_op_GreetingWithErrors.go | 18 +- .../awsrestjson/api_op_JsonLists.go | 10 +- .../ec2query/api_op_GreetingWithErrors.go | 10 +- .../ec2query/api_op_QueryTimestamps.go | 10 +- .../protocoltest/ec2query/api_op_XmlLists.go | 18 +- .../jsonrpc/api_op_GreetingWithErrors.go | 14 +- .../jsonrpc10/api_op_GreetingWithErrors.go | 14 +- .../query/api_op_GreetingWithErrors.go | 10 +- .../query/api_op_QueryTimestamps.go | 10 +- .../protocoltest/query/api_op_XmlLists.go | 18 +- .../restxml/api_op_GreetingWithErrors.go | 16 +- .../protocoltest/restxml/api_op_XmlLists.go | 18 +- service/accessanalyzer/types/enums.go | 24 +- service/acm/api_op_ImportCertificate.go | 55 +- service/acm/api_op_RequestCertificate.go | 18 +- service/acm/api_op_ResendValidationEmail.go | 16 +- service/acm/types/enums.go | 146 +- service/acm/types/types.go | 44 +- service/acmpca/api_op_CreatePermission.go | 22 +- service/acmpca/api_op_DeletePermission.go | 22 +- service/acmpca/api_op_DeletePolicy.go | 18 +- .../api_op_DescribeCertificateAuthority.go | 35 +- service/acmpca/api_op_GetPolicy.go | 30 +- ...p_ImportCertificateAuthorityCertificate.go | 100 +- service/acmpca/api_op_IssueCertificate.go | 34 +- service/acmpca/api_op_ListPermissions.go | 22 +- service/acmpca/api_op_PutPolicy.go | 18 +- service/acmpca/types/enums.go | 50 +- service/acmpca/types/types.go | 81 +- .../api_op_StartDeviceSync.go | 20 +- service/alexaforbusiness/types/enums.go | 114 +- service/alexaforbusiness/types/types.go | 12 +- service/amplify/types/enums.go | 26 +- service/apigateway/api_op_CreateAuthorizer.go | 8 +- service/apigateway/api_op_CreateRestApi.go | 12 +- .../api_op_DeleteGatewayResponse.go | 48 +- service/apigateway/api_op_GetAccount.go | 6 +- service/apigateway/api_op_GetAuthorizer.go | 4 +- .../apigateway/api_op_GetGatewayResponse.go | 95 +- service/apigateway/api_op_GetIntegration.go | 64 +- .../api_op_GetIntegrationResponse.go | 4 +- service/apigateway/api_op_GetRestApi.go | 6 +- service/apigateway/api_op_ImportRestApi.go | 6 +- .../apigateway/api_op_PutGatewayResponse.go | 95 +- service/apigateway/api_op_PutIntegration.go | 90 +- .../api_op_PutIntegrationResponse.go | 8 +- service/apigateway/api_op_PutRestApi.go | 6 +- service/apigateway/api_op_UpdateAccount.go | 6 +- service/apigateway/api_op_UpdateAuthorizer.go | 4 +- .../api_op_UpdateGatewayResponse.go | 95 +- .../apigateway/api_op_UpdateIntegration.go | 64 +- .../api_op_UpdateIntegrationResponse.go | 4 +- service/apigateway/api_op_UpdateRestApi.go | 6 +- service/apigateway/types/enums.go | 122 +- service/apigateway/types/types.go | 125 +- service/apigatewayv2/types/enums.go | 34 +- .../api_op_CreateConfigurationProfile.go | 8 +- service/appconfig/doc.go | 10 +- service/appconfig/types/enums.go | 48 +- service/appflow/doc.go | 8 +- service/appflow/types/enums.go | 490 ++--- .../api_op_DeleteScalingPolicy.go | 131 +- .../api_op_DeleteScheduledAction.go | 131 +- .../api_op_DeregisterScalableTarget.go | 129 +- .../api_op_DescribeScalableTargets.go | 142 +- .../api_op_DescribeScalingActivities.go | 138 +- .../api_op_DescribeScalingPolicies.go | 138 +- .../api_op_DescribeScheduledActions.go | 138 +- .../api_op_PutScalingPolicy.go | 131 +- .../api_op_PutScheduledAction.go | 137 +- .../api_op_RegisterScalableTarget.go | 137 +- service/applicationautoscaling/doc.go | 65 +- service/applicationautoscaling/types/enums.go | 24 +- service/applicationautoscaling/types/types.go | 693 +++--- .../api_op_DescribeConfigurations.go | 20 +- .../api_op_DescribeExportTasks.go | 4 +- .../api_op_DescribeTags.go | 6 +- .../api_op_StartContinuousExport.go | 4 +- .../api_op_StartImportTask.go | 22 +- service/applicationdiscoveryservice/doc.go | 127 +- .../types/enums.go | 54 +- .../types/types.go | 57 +- .../api_op_ListConfigurationHistory.go | 12 +- service/applicationinsights/types/enums.go | 30 +- service/applicationinsights/types/types.go | 22 +- service/appmesh/types/enums.go | 16 +- service/appmesh/types/types.go | 44 +- service/appstream/api_op_CreateFleet.go | 86 +- .../appstream/api_op_CreateImageBuilder.go | 69 +- .../api_op_DescribeUserStackAssociations.go | 8 +- service/appstream/api_op_UpdateFleet.go | 86 +- service/appstream/doc.go | 4 +- service/appstream/types/enums.go | 162 +- service/appstream/types/types.go | 188 +- service/appsync/api_op_CreateApiCache.go | 55 +- service/appsync/api_op_CreateResolver.go | 13 +- service/appsync/api_op_UpdateApiCache.go | 55 +- service/appsync/api_op_UpdateResolver.go | 13 +- service/appsync/types/enums.go | 66 +- service/appsync/types/types.go | 217 +- service/athena/api_op_CreateDataCatalog.go | 20 +- service/athena/api_op_UpdateDataCatalog.go | 20 +- service/athena/types/enums.go | 10 +- service/athena/types/types.go | 30 +- .../api_op_CompleteLifecycleAction.go | 16 +- .../api_op_DescribeAdjustmentTypes.go | 8 +- .../api_op_DescribeInstanceRefreshes.go | 28 +- .../api_op_DescribeLifecycleHookTypes.go | 4 +- .../api_op_DisableMetricsCollection.go | 33 +- .../api_op_EnableMetricsCollection.go | 39 +- .../autoscaling/api_op_PutLifecycleHook.go | 16 +- .../autoscaling/api_op_PutScalingPolicy.go | 24 +- .../api_op_RecordLifecycleActionHeartbeat.go | 31 +- service/autoscaling/api_op_ResumeProcesses.go | 24 +- .../autoscaling/api_op_SuspendProcesses.go | 24 +- .../api_op_UpdateAutoScalingGroup.go | 25 +- service/autoscaling/types/enums.go | 26 +- service/autoscaling/types/types.go | 271 ++- ...i_op_GetScalingPlanResourceForecastData.go | 43 +- service/autoscalingplans/types/types.go | 182 +- .../backup/api_op_CreateBackupSelection.go | 7 +- .../api_op_GetSupportedResourceTypes.go | 19 +- service/backup/api_op_ListBackupJobs.go | 16 +- service/backup/api_op_ListCopyJobs.go | 16 +- service/backup/api_op_StartRestoreJob.go | 49 +- service/backup/types/enums.go | 30 +- .../batch/api_op_CreateComputeEnvironment.go | 12 +- service/batch/api_op_ListJobs.go | 8 +- service/batch/types/enums.go | 22 +- service/batch/types/types.go | 36 +- service/budgets/doc.go | 58 +- service/budgets/types/enums.go | 34 +- service/budgets/types/types.go | 37 +- service/chime/types/enums.go | 4 +- .../api_op_CreateEnvironmentMembership.go | 7 +- .../api_op_DescribeEnvironmentMemberships.go | 10 +- .../api_op_DescribeEnvironmentStatus.go | 20 +- .../api_op_UpdateEnvironmentMembership.go | 6 +- service/cloud9/doc.go | 47 +- service/cloud9/types/enums.go | 24 +- service/cloud9/types/types.go | 34 +- service/clouddirectory/api_op_AttachObject.go | 4 +- service/clouddirectory/api_op_CreateFacet.go | 14 +- service/clouddirectory/api_op_CreateSchema.go | 8 +- service/clouddirectory/api_op_UpdateFacet.go | 10 +- service/clouddirectory/types/enums.go | 34 +- service/clouddirectory/types/types.go | 17 +- .../cloudformation/api_op_CreateChangeSet.go | 56 +- service/cloudformation/api_op_CreateStack.go | 56 +- .../api_op_CreateStackInstances.go | 34 +- .../cloudformation/api_op_CreateStackSet.go | 66 +- ...pi_op_DescribeStackDriftDetectionStatus.go | 39 +- .../api_op_DescribeStackEvents.go | 8 +- .../api_op_DescribeStackResource.go | 8 +- .../api_op_DescribeStackResourceDrifts.go | 17 +- .../api_op_DescribeStackResources.go | 12 +- .../cloudformation/api_op_DescribeStacks.go | 8 +- service/cloudformation/api_op_DescribeType.go | 38 +- .../api_op_DetectStackSetDrift.go | 16 +- service/cloudformation/api_op_GetTemplate.go | 8 +- .../api_op_ListStackResources.go | 8 +- .../cloudformation/api_op_ListTypeVersions.go | 14 +- service/cloudformation/api_op_ListTypes.go | 30 +- service/cloudformation/api_op_RegisterType.go | 27 +- service/cloudformation/api_op_UpdateStack.go | 56 +- .../api_op_UpdateStackInstances.go | 39 +- .../cloudformation/api_op_UpdateStackSet.go | 118 +- service/cloudformation/types/enums.go | 188 +- service/cloudformation/types/types.go | 467 ++-- .../cloudfront/api_op_CreateCachePolicy.go | 6 +- .../api_op_CreateOriginRequestPolicy.go | 24 +- .../cloudfront/api_op_DeleteDistribution.go | 52 +- .../api_op_DeleteStreamingDistribution.go | 55 +- service/cloudfront/api_op_GetCachePolicy.go | 8 +- .../api_op_GetOriginRequestPolicy.go | 18 +- .../cloudfront/api_op_ListCachePolicies.go | 6 +- .../api_op_ListOriginRequestPolicies.go | 6 +- .../cloudfront/api_op_UpdateCachePolicy.go | 12 +- .../cloudfront/api_op_UpdateDistribution.go | 67 +- service/cloudfront/api_op_UpdateKeyGroup.go | 14 +- .../api_op_UpdateOriginRequestPolicy.go | 12 +- .../api_op_UpdateRealtimeLogConfig.go | 8 +- service/cloudfront/types/enums.go | 18 +- service/cloudfront/types/types.go | 569 +++-- service/cloudhsm/api_op_CreateHsm.go | 8 +- service/cloudhsm/api_op_DescribeHsm.go | 8 +- service/cloudhsm/types/enums.go | 4 +- service/cloudhsmv2/api_op_CreateCluster.go | 7 +- service/cloudhsmv2/types/enums.go | 36 +- service/cloudsearch/types/enums.go | 4 +- service/cloudsearch/types/types.go | 12 +- service/cloudsearchdomain/api_op_Search.go | 168 +- .../api_op_UploadDocuments.go | 4 +- service/cloudtrail/api_op_CreateTrail.go | 29 +- service/cloudtrail/api_op_DescribeTrails.go | 14 +- .../cloudtrail/api_op_GetEventSelectors.go | 32 +- .../cloudtrail/api_op_GetInsightSelectors.go | 24 +- service/cloudtrail/api_op_LookupEvents.go | 40 +- .../cloudtrail/api_op_PutEventSelectors.go | 49 +- service/cloudtrail/api_op_UpdateTrail.go | 34 +- service/cloudtrail/types/enums.go | 16 +- service/cloudtrail/types/errors.go | 26 +- service/cloudtrail/types/types.go | 84 +- .../cloudwatch/api_op_GetInsightRuleReport.go | 68 +- service/cloudwatch/api_op_GetMetricData.go | 79 +- .../cloudwatch/api_op_GetMetricStatistics.go | 78 +- .../cloudwatch/api_op_GetMetricWidgetImage.go | 6 +- .../cloudwatch/api_op_PutCompositeAlarm.go | 58 +- service/cloudwatch/api_op_PutMetricAlarm.go | 24 +- service/cloudwatch/api_op_PutMetricData.go | 4 +- service/cloudwatch/types/enums.go | 22 +- service/cloudwatch/types/types.go | 8 +- service/cloudwatchevents/api_op_PutTargets.go | 120 +- service/cloudwatchevents/doc.go | 15 +- service/cloudwatchevents/types/types.go | 33 +- .../cloudwatchlogs/api_op_CreateLogGroup.go | 20 +- .../cloudwatchlogs/api_op_CreateLogStream.go | 8 +- service/cloudwatchlogs/api_op_PutLogEvents.go | 45 +- .../api_op_PutSubscriptionFilter.go | 52 +- service/cloudwatchlogs/doc.go | 20 +- service/cloudwatchlogs/types/enums.go | 12 +- .../api_op_AssociateExternalConnection.go | 21 +- .../api_op_CopyPackageVersions.go | 34 +- .../api_op_DeletePackageVersions.go | 41 +- .../api_op_DescribePackageVersion.go | 16 +- .../api_op_DisposePackageVersions.go | 40 +- .../api_op_GetPackageVersionAsset.go | 16 +- .../api_op_GetPackageVersionReadme.go | 32 +- .../api_op_GetRepositoryEndpoint.go | 12 +- .../api_op_ListPackageVersionAssets.go | 30 +- .../api_op_ListPackageVersionDependencies.go | 36 +- .../api_op_ListPackageVersions.go | 56 +- service/codeartifact/api_op_ListPackages.go | 22 +- .../api_op_UpdatePackageVersionsStatus.go | 16 +- service/codeartifact/doc.go | 198 +- service/codeartifact/types/enums.go | 34 +- service/codeartifact/types/types.go | 105 +- service/codebuild/api_op_CreateProject.go | 30 +- service/codebuild/api_op_ListBuildBatches.go | 7 +- .../api_op_ListBuildBatchesForProject.go | 7 +- service/codebuild/api_op_ListBuilds.go | 8 +- .../codebuild/api_op_ListBuildsForProject.go | 8 +- service/codebuild/api_op_ListProjects.go | 25 +- service/codebuild/api_op_ListReportGroups.go | 10 +- service/codebuild/api_op_ListReports.go | 8 +- .../codebuild/api_op_ListSharedProjects.go | 8 +- .../api_op_ListSharedReportGroups.go | 8 +- service/codebuild/api_op_UpdateProject.go | 30 +- service/codebuild/api_op_UpdateReportGroup.go | 6 +- service/codebuild/doc.go | 151 +- service/codebuild/types/enums.go | 180 +- service/codebuild/types/types.go | 545 +++-- .../api_op_CreateApprovalRuleTemplate.go | 23 +- .../api_op_CreatePullRequestApprovalRule.go | 16 +- ...op_UpdatePullRequestApprovalRuleContent.go | 19 +- service/codecommit/doc.go | 355 ++-- service/codecommit/types/enums.go | 64 +- service/codecommit/types/types.go | 12 +- .../api_op_BatchGetDeploymentTargets.go | 45 +- service/codedeploy/api_op_CreateDeployment.go | 10 +- .../api_op_CreateDeploymentConfig.go | 22 +- .../api_op_ListApplicationRevisions.go | 38 +- .../api_op_ListDeploymentInstances.go | 16 +- .../api_op_ListDeploymentTargets.go | 4 +- service/codedeploy/api_op_ListDeployments.go | 20 +- .../api_op_ListOnPremisesInstances.go | 8 +- service/codedeploy/api_op_StopDeployment.go | 6 +- service/codedeploy/doc.go | 65 +- service/codedeploy/types/enums.go | 214 +- service/codedeploy/types/errors.go | 30 +- service/codedeploy/types/types.go | 241 ++- service/codeguruprofiler/types/enums.go | 36 +- .../api_op_ListCodeReviews.go | 13 +- .../api_op_ListRepositoryAssociations.go | 30 +- service/codegurureviewer/types/enums.go | 16 +- service/codegurureviewer/types/types.go | 97 +- service/codepipeline/doc.go | 170 +- service/codepipeline/types/enums.go | 4 +- service/codepipeline/types/types.go | 74 +- service/codestar/doc.go | 57 +- service/codestarconnections/doc.go | 30 +- service/codestarconnections/types/enums.go | 6 +- service/codestarnotifications/doc.go | 60 +- service/codestarnotifications/types/enums.go | 18 +- .../api_op_CreateIdentityPool.go | 12 +- service/cognitoidentity/api_op_GetId.go | 18 +- service/cognitoidentity/types/enums.go | 16 +- .../api_op_AdminConfirmSignUp.go | 18 +- .../api_op_AdminCreateUser.go | 34 +- .../api_op_AdminGetUser.go | 27 +- .../api_op_AdminInitiateAuth.go | 141 +- .../api_op_AdminResetUserPassword.go | 18 +- .../api_op_AdminRespondToAuthChallenge.go | 32 +- .../api_op_AdminUpdateUserAttributes.go | 18 +- .../api_op_ConfirmForgotPassword.go | 18 +- .../api_op_ConfirmSignUp.go | 18 +- .../api_op_CreateIdentityProvider.go | 74 +- .../api_op_CreateUserPoolClient.go | 55 +- .../api_op_ForgotPassword.go | 18 +- ...api_op_GetUserAttributeVerificationCode.go | 18 +- .../api_op_GetUserPoolMfaConfig.go | 10 +- .../api_op_InitiateAuth.go | 119 +- .../api_op_ListUsers.go | 53 +- .../api_op_ResendConfirmationCode.go | 18 +- .../api_op_RespondToAuthChallenge.go | 44 +- .../api_op_SetUserPoolMfaConfig.go | 20 +- .../cognitoidentityprovider/api_op_SignUp.go | 18 +- .../api_op_UpdateUserAttributes.go | 18 +- .../api_op_UpdateUserPool.go | 12 +- .../api_op_UpdateUserPoolClient.go | 55 +- .../cognitoidentityprovider/types/enums.go | 120 +- .../cognitoidentityprovider/types/types.go | 254 ++- .../api_op_GetBulkPublishDetails.go | 17 +- service/cognitosync/types/enums.go | 16 +- service/cognitosync/types/types.go | 4 +- .../api_op_CreateDocumentClassifier.go | 4 +- .../api_op_CreateEntityRecognizer.go | 4 +- .../api_op_StartDocumentClassificationJob.go | 24 +- ...pi_op_StartDominantLanguageDetectionJob.go | 19 +- .../api_op_StartEntitiesDetectionJob.go | 28 +- .../api_op_StartKeyPhrasesDetectionJob.go | 19 +- .../api_op_StartSentimentDetectionJob.go | 19 +- .../api_op_StartTopicsDetectionJob.go | 18 +- service/comprehend/types/enums.go | 148 +- service/comprehend/types/types.go | 100 +- service/comprehendmedical/types/enums.go | 162 +- service/computeoptimizer/types/enums.go | 184 +- service/computeoptimizer/types/types.go | 20 +- .../api_op_BatchGetAggregateResourceConfig.go | 8 +- .../api_op_BatchGetResourceConfig.go | 8 +- .../api_op_DeliverConfigSnapshot.go | 12 +- .../api_op_DescribeComplianceByConfigRule.go | 10 +- .../api_op_DescribeComplianceByResource.go | 12 +- ...ibeConfigurationAggregatorSourcesStatus.go | 8 +- .../api_op_GetDiscoveredResourceCounts.go | 49 +- .../api_op_StartConfigRulesEvaluation.go | 20 +- service/configservice/types/enums.go | 152 +- service/configservice/types/errors.go | 63 +- service/configservice/types/types.go | 368 ++-- service/connect/api_op_StartChatContact.go | 4 +- service/connect/types/enums.go | 108 +- .../connectparticipant/api_op_SendEvent.go | 4 +- service/connectparticipant/types/enums.go | 10 +- service/costandusagereportservice/doc.go | 2 +- .../costandusagereportservice/types/enums.go | 50 +- .../costexplorer/api_op_GetCostForecast.go | 13 +- .../costexplorer/api_op_GetDimensionValues.go | 266 ++- .../api_op_GetReservationCoverage.go | 85 +- .../api_op_GetReservationUtilization.go | 27 +- .../api_op_GetRightsizingRecommendation.go | 53 +- .../api_op_GetSavingsPlansCoverage.go | 24 +- .../api_op_GetSavingsPlansUtilization.go | 18 +- ...pi_op_GetSavingsPlansUtilizationDetails.go | 15 +- .../costexplorer/api_op_GetUsageForecast.go | 4 +- service/costexplorer/doc.go | 2 +- service/costexplorer/types/enums.go | 146 +- service/costexplorer/types/types.go | 106 +- .../api_op_ApplyPendingMaintenanceAction.go | 13 +- .../api_op_CreateEndpoint.go | 18 +- .../api_op_CreateReplicationInstance.go | 10 +- .../api_op_CreateReplicationTask.go | 8 +- .../api_op_ModifyEndpoint.go | 14 +- .../api_op_ModifyReplicationInstance.go | 8 +- .../api_op_ModifyReplicationTask.go | 10 +- ...pi_op_StartReplicationTaskAssessmentRun.go | 10 +- .../databasemigrationservice/types/enums.go | 58 +- .../databasemigrationservice/types/types.go | 249 ++- service/dataexchange/types/enums.go | 70 +- .../api_op_PutPipelineDefinition.go | 18 +- service/datapipeline/types/types.go | 50 +- service/datasync/api_op_CreateLocationEfs.go | 22 +- service/datasync/types/enums.go | 72 +- service/datasync/types/types.go | 6 +- service/dax/api_op_CreateCluster.go | 28 +- service/dax/types/enums.go | 14 +- service/dax/types/types.go | 18 +- service/detective/api_op_CreateMembers.go | 14 +- .../detective/api_op_StartMonitoringMember.go | 6 +- service/detective/doc.go | 33 +- service/detective/types/enums.go | 14 +- service/detective/types/errors.go | 10 +- service/detective/types/types.go | 46 +- .../api_op_CreateRemoteAccessSession.go | 12 +- service/devicefarm/api_op_CreateUpload.go | 93 +- .../api_op_GetDevicePoolCompatibility.go | 47 +- .../devicefarm/api_op_GetTestGridSession.go | 6 +- service/devicefarm/api_op_ListArtifacts.go | 7 +- service/devicefarm/api_op_ListDevicePools.go | 8 +- service/devicefarm/api_op_ListDevices.go | 88 +- .../devicefarm/api_op_ListUniqueProblems.go | 17 +- service/devicefarm/api_op_ListUploads.go | 88 +- service/devicefarm/doc.go | 12 +- service/devicefarm/types/enums.go | 310 +-- service/devicefarm/types/types.go | 898 ++++---- ...api_op_AllocateConnectionOnInterconnect.go | 41 +- .../api_op_AllocateHostedConnection.go | 41 +- .../api_op_AllocatePrivateVirtualInterface.go | 37 +- .../api_op_AllocatePublicVirtualInterface.go | 37 +- .../api_op_AssociateConnectionWithLag.go | 41 +- .../api_op_AssociateHostedConnection.go | 41 +- .../api_op_AssociateVirtualInterface.go | 37 +- .../directconnect/api_op_ConfirmConnection.go | 41 +- .../api_op_ConfirmPrivateVirtualInterface.go | 37 +- .../api_op_ConfirmPublicVirtualInterface.go | 37 +- .../api_op_ConfirmTransitVirtualInterface.go | 37 +- .../directconnect/api_op_CreateConnection.go | 41 +- .../api_op_CreateInterconnect.go | 22 +- service/directconnect/api_op_CreateLag.go | 26 +- .../api_op_CreatePrivateVirtualInterface.go | 37 +- .../api_op_CreatePublicVirtualInterface.go | 37 +- .../directconnect/api_op_DeleteConnection.go | 41 +- .../api_op_DeleteInterconnect.go | 22 +- service/directconnect/api_op_DeleteLag.go | 26 +- .../api_op_DeleteVirtualInterface.go | 37 +- .../api_op_DisassociateConnectionFromLag.go | 41 +- service/directconnect/api_op_UpdateLag.go | 46 +- ...api_op_UpdateVirtualInterfaceAttributes.go | 37 +- service/directconnect/types/types.go | 237 +-- .../directoryservice/api_op_AddIpRoutes.go | 75 +- .../api_op_ResetUserPassword.go | 15 +- service/directoryservice/types/enums.go | 112 +- service/directoryservice/types/types.go | 7 +- service/dlm/types/enums.go | 2 +- .../api_op_ApplyPendingMaintenanceAction.go | 13 +- .../api_op_CopyDBClusterParameterGroup.go | 32 +- service/docdb/api_op_CopyDBClusterSnapshot.go | 30 +- service/docdb/api_op_CreateDBCluster.go | 36 +- .../api_op_CreateDBClusterParameterGroup.go | 8 +- .../docdb/api_op_CreateDBClusterSnapshot.go | 10 +- service/docdb/api_op_CreateDBInstance.go | 10 +- service/docdb/api_op_DeleteDBCluster.go | 11 +- .../api_op_DeleteDBClusterParameterGroup.go | 8 +- service/docdb/api_op_DeleteDBInstance.go | 2 +- service/docdb/api_op_DescribeCertificates.go | 7 +- ...api_op_DescribeDBClusterParameterGroups.go | 2 +- .../api_op_DescribeDBClusterParameters.go | 2 +- .../api_op_DescribeDBClusterSnapshots.go | 42 +- service/docdb/api_op_DescribeDBClusters.go | 6 +- .../docdb/api_op_DescribeDBEngineVersions.go | 2 +- service/docdb/api_op_DescribeDBInstances.go | 8 +- service/docdb/api_op_DescribeEvents.go | 22 +- ...pi_op_DescribePendingMaintenanceActions.go | 13 +- service/docdb/api_op_FailoverDBCluster.go | 3 +- service/docdb/api_op_ModifyDBCluster.go | 27 +- .../api_op_ModifyDBClusterParameterGroup.go | 8 +- service/docdb/api_op_ModifyDBInstance.go | 12 +- service/docdb/api_op_RebootDBInstance.go | 2 +- .../api_op_ResetDBClusterParameterGroup.go | 8 +- .../api_op_RestoreDBClusterFromSnapshot.go | 27 +- .../api_op_RestoreDBClusterToPointInTime.go | 38 +- service/docdb/types/types.go | 15 +- service/dynamodb/api_op_BatchGetItem.go | 85 +- service/dynamodb/api_op_BatchWriteItem.go | 136 +- service/dynamodb/api_op_CreateBackup.go | 11 +- service/dynamodb/api_op_CreateGlobalTable.go | 54 +- service/dynamodb/api_op_CreateTable.go | 165 +- service/dynamodb/api_op_DeleteItem.go | 66 +- .../api_op_DescribeContributorInsights.go | 12 +- service/dynamodb/api_op_DescribeLimits.go | 53 +- service/dynamodb/api_op_GetItem.go | 50 +- service/dynamodb/api_op_ListBackups.go | 10 +- service/dynamodb/api_op_PutItem.go | 105 +- service/dynamodb/api_op_Query.go | 160 +- .../dynamodb/api_op_RestoreTableFromBackup.go | 16 +- .../api_op_RestoreTableToPointInTime.go | 36 +- service/dynamodb/api_op_Scan.go | 112 +- service/dynamodb/api_op_TransactGetItems.go | 12 +- service/dynamodb/api_op_TransactWriteItems.go | 90 +- service/dynamodb/api_op_UpdateGlobalTable.go | 11 +- .../api_op_UpdateGlobalTableSettings.go | 8 +- service/dynamodb/api_op_UpdateItem.go | 196 +- service/dynamodb/api_op_UpdateTable.go | 53 +- service/dynamodb/types/enums.go | 106 +- service/dynamodb/types/errors.go | 184 +- service/dynamodb/types/types.go | 949 ++++----- .../api_op_GetShardIterator.go | 22 +- service/dynamodbstreams/types/enums.go | 16 +- service/dynamodbstreams/types/errors.go | 4 +- service/dynamodbstreams/types/types.go | 90 +- service/ebs/api_op_StartSnapshot.go | 10 +- service/ebs/types/enums.go | 40 +- service/ec2/api_op_AttachVolume.go | 18 +- service/ec2/api_op_CopyImage.go | 10 +- service/ec2/api_op_CopySnapshot.go | 14 +- .../ec2/api_op_CreateCapacityReservation.go | 36 +- service/ec2/api_op_CreateClientVpnEndpoint.go | 12 +- service/ec2/api_op_CreateClientVpnRoute.go | 15 +- service/ec2/api_op_CreateCustomerGateway.go | 12 +- service/ec2/api_op_CreateDhcpOptions.go | 18 +- service/ec2/api_op_CreateRoute.go | 14 +- service/ec2/api_op_CreateVolume.go | 14 +- .../ec2/api_op_DescribeAccountAttributes.go | 20 +- service/ec2/api_op_DescribeAddresses.go | 53 +- .../ec2/api_op_DescribeAvailabilityZones.go | 54 +- service/ec2/api_op_DescribeBundleTasks.go | 37 +- .../api_op_DescribeCapacityReservations.go | 107 +- service/ec2/api_op_DescribeCarrierGateways.go | 31 +- .../api_op_DescribeClassicLinkInstances.go | 26 +- ..._op_DescribeClientVpnAuthorizationRules.go | 13 +- .../api_op_DescribeClientVpnConnections.go | 4 +- .../ec2/api_op_DescribeClientVpnEndpoints.go | 8 +- service/ec2/api_op_DescribeClientVpnRoutes.go | 10 +- .../api_op_DescribeClientVpnTargetNetworks.go | 10 +- service/ec2/api_op_DescribeCoipPools.go | 6 +- .../ec2/api_op_DescribeCustomerGateways.go | 36 +- service/ec2/api_op_DescribeDhcpOptions.go | 30 +- ...i_op_DescribeEgressOnlyInternetGateways.go | 16 +- service/ec2/api_op_DescribeElasticGpus.go | 21 +- .../api_op_DescribeFastSnapshotRestores.go | 14 +- service/ec2/api_op_DescribeFleetInstances.go | 2 +- service/ec2/api_op_DescribeFleets.go | 18 +- service/ec2/api_op_DescribeFlowLogs.go | 37 +- service/ec2/api_op_DescribeFpgaImages.go | 47 +- ...api_op_DescribeHostReservationOfferings.go | 6 +- .../ec2/api_op_DescribeHostReservations.go | 24 +- service/ec2/api_op_DescribeHosts.go | 28 +- ..._DescribeIamInstanceProfileAssociations.go | 6 +- service/ec2/api_op_DescribeImages.go | 126 +- ...op_DescribeInstanceCreditSpecifications.go | 2 +- service/ec2/api_op_DescribeInstanceStatus.go | 72 +- .../api_op_DescribeInstanceTypeOfferings.go | 6 +- service/ec2/api_op_DescribeInstanceTypes.go | 120 +- service/ec2/api_op_DescribeInstances.go | 351 ++- .../ec2/api_op_DescribeInternetGateways.go | 20 +- service/ec2/api_op_DescribeIpv6Pools.go | 16 +- service/ec2/api_op_DescribeKeyPairs.go | 22 +- .../api_op_DescribeLaunchTemplateVersions.go | 28 +- service/ec2/api_op_DescribeLaunchTemplates.go | 20 +- ...eTableVirtualInterfaceGroupAssociations.go | 12 +- ...beLocalGatewayRouteTableVpcAssociations.go | 10 +- .../api_op_DescribeLocalGatewayRouteTables.go | 8 +- ...cribeLocalGatewayVirtualInterfaceGroups.go | 6 +- service/ec2/api_op_DescribeLocalGateways.go | 16 +- .../ec2/api_op_DescribeManagedPrefixLists.go | 8 +- service/ec2/api_op_DescribeMovingAddresses.go | 2 +- service/ec2/api_op_DescribeNatGateways.go | 27 +- service/ec2/api_op_DescribeNetworkAcls.go | 78 +- ..._op_DescribeNetworkInterfacePermissions.go | 16 +- .../ec2/api_op_DescribeNetworkInterfaces.go | 162 +- service/ec2/api_op_DescribePlacementGroups.go | 18 +- service/ec2/api_op_DescribePrefixLists.go | 4 +- service/ec2/api_op_DescribePublicIpv4Pools.go | 16 +- service/ec2/api_op_DescribeRegions.go | 10 +- .../ec2/api_op_DescribeReservedInstances.go | 71 +- ...pi_op_DescribeReservedInstancesListings.go | 13 +- ..._DescribeReservedInstancesModifications.go | 47 +- ...i_op_DescribeReservedInstancesOfferings.go | 25 +- service/ec2/api_op_DescribeRouteTables.go | 99 +- ...p_DescribeScheduledInstanceAvailability.go | 12 +- .../ec2/api_op_DescribeScheduledInstances.go | 12 +- service/ec2/api_op_DescribeSecurityGroups.go | 105 +- service/ec2/api_op_DescribeSnapshots.go | 67 +- .../api_op_DescribeSpotInstanceRequests.go | 143 +- .../ec2/api_op_DescribeSpotPriceHistory.go | 26 +- service/ec2/api_op_DescribeSubnets.go | 70 +- service/ec2/api_op_DescribeTags.go | 29 +- .../api_op_DescribeTrafficMirrorFilters.go | 8 +- .../api_op_DescribeTrafficMirrorSessions.go | 26 +- .../api_op_DescribeTrafficMirrorTargets.go | 20 +- ...pi_op_DescribeTransitGatewayAttachments.go | 37 +- ..._DescribeTransitGatewayMulticastDomains.go | 8 +- ...escribeTransitGatewayPeeringAttachments.go | 23 +- ...pi_op_DescribeTransitGatewayRouteTables.go | 21 +- ...op_DescribeTransitGatewayVpcAttachments.go | 10 +- service/ec2/api_op_DescribeTransitGateways.go | 49 +- service/ec2/api_op_DescribeVolumeStatus.go | 49 +- service/ec2/api_op_DescribeVolumes.go | 73 +- .../api_op_DescribeVolumesModifications.go | 39 +- service/ec2/api_op_DescribeVpcClassicLink.go | 12 +- ...cribeVpcEndpointConnectionNotifications.go | 20 +- .../api_op_DescribeVpcEndpointConnections.go | 10 +- ...escribeVpcEndpointServiceConfigurations.go | 26 +- ...p_DescribeVpcEndpointServicePermissions.go | 7 +- .../ec2/api_op_DescribeVpcEndpointServices.go | 14 +- service/ec2/api_op_DescribeVpcEndpoints.go | 31 +- .../api_op_DescribeVpcPeeringConnections.go | 48 +- service/ec2/api_op_DescribeVpcs.go | 62 +- service/ec2/api_op_DescribeVpnConnections.go | 39 +- service/ec2/api_op_DescribeVpnGateways.go | 42 +- service/ec2/api_op_DetachNetworkInterface.go | 16 +- ...i_op_DisassociateClientVpnTargetNetwork.go | 10 +- .../ec2/api_op_ExportTransitGatewayRoutes.go | 37 +- .../ec2/api_op_GetCapacityReservationUsage.go | 28 +- service/ec2/api_op_GetCoipPoolUsage.go | 8 +- ...GetTransitGatewayAttachmentPropagations.go | 4 +- ...ansitGatewayMulticastDomainAssociations.go | 18 +- ...p_GetTransitGatewayPrefixListReferences.go | 27 +- ...GetTransitGatewayRouteTableAssociations.go | 8 +- ...GetTransitGatewayRouteTablePropagations.go | 8 +- service/ec2/api_op_ImportImage.go | 24 +- service/ec2/api_op_ImportSnapshot.go | 24 +- .../ec2/api_op_ModifyCapacityReservation.go | 12 +- service/ec2/api_op_ModifyClientVpnEndpoint.go | 12 +- .../ec2/api_op_ModifyEbsDefaultKmsKeyId.go | 14 +- service/ec2/api_op_ModifyInstancePlacement.go | 14 +- ...pi_op_ModifyVpcPeeringConnectionOptions.go | 8 +- service/ec2/api_op_ModifyVpnConnection.go | 18 +- service/ec2/api_op_RegisterImage.go | 16 +- service/ec2/api_op_ReportInstanceStatus.go | 37 +- service/ec2/api_op_RequestSpotInstances.go | 6 +- service/ec2/api_op_RunInstances.go | 28 +- ..._op_SearchTransitGatewayMulticastGroups.go | 36 +- .../ec2/api_op_SearchTransitGatewayRoutes.go | 43 +- service/ec2/doc.go | 12 +- service/ec2/types/enums.go | 194 +- service/ec2/types/types.go | 404 ++-- service/ecr/types/enums.go | 14 +- service/ecs/api_op_CreateCapacityProvider.go | 35 +- service/ecs/api_op_CreateCluster.go | 35 +- service/ecs/api_op_CreateService.go | 104 +- service/ecs/api_op_CreateTaskSet.go | 36 +- service/ecs/api_op_DescribeClusters.go | 26 +- service/ecs/api_op_DescribeTaskDefinition.go | 35 +- .../ecs/api_op_RegisterContainerInstance.go | 35 +- service/ecs/api_op_RegisterTaskDefinition.go | 91 +- service/ecs/api_op_RunTask.go | 49 +- service/ecs/api_op_StartTask.go | 35 +- service/ecs/api_op_TagResource.go | 35 +- .../api_op_UpdateContainerInstancesState.go | 48 +- service/ecs/api_op_UpdateService.go | 50 +- service/ecs/types/enums.go | 62 +- service/ecs/types/types.go | 617 +++--- service/efs/api_op_CreateFileSystem.go | 38 +- service/efs/api_op_CreateMountTarget.go | 135 +- service/efs/api_op_DeleteMountTarget.go | 17 +- ...pi_op_DescribeMountTargetSecurityGroups.go | 4 +- .../api_op_ModifyMountTargetSecurityGroups.go | 9 +- .../efs/api_op_PutLifecycleConfiguration.go | 21 +- service/efs/types/enums.go | 14 +- service/efs/types/types.go | 8 +- service/eks/types/enums.go | 140 +- service/eks/types/types.go | 112 +- ...i_op_AuthorizeCacheSecurityGroupIngress.go | 6 +- service/elasticache/api_op_CopySnapshot.go | 36 +- .../elasticache/api_op_CreateCacheCluster.go | 94 +- .../api_op_CreateCacheParameterGroup.go | 4 +- .../api_op_CreateCacheSecurityGroup.go | 6 +- .../api_op_CreateCacheSubnetGroup.go | 4 +- .../api_op_CreateGlobalReplicationGroup.go | 8 +- .../api_op_CreateReplicationGroup.go | 105 +- ...reaseNodeGroupsInGlobalReplicationGroup.go | 4 +- .../api_op_DecreaseReplicaCount.go | 14 +- .../elasticache/api_op_DeleteCacheCluster.go | 16 +- .../api_op_DeleteGlobalReplicationGroup.go | 8 +- .../api_op_DescribeCacheEngineVersions.go | 8 +- .../api_op_DescribeReservedCacheNodes.go | 65 +- ..._op_DescribeReservedCacheNodesOfferings.go | 65 +- ...i_op_DisassociateGlobalReplicationGroup.go | 4 +- .../api_op_FailoverGlobalReplicationGroup.go | 4 +- ...reaseNodeGroupsInGlobalReplicationGroup.go | 4 +- .../elasticache/api_op_ModifyCacheCluster.go | 95 +- .../api_op_ModifyCacheParameterGroup.go | 4 +- .../api_op_ModifyCacheSubnetGroup.go | 4 +- .../api_op_ModifyGlobalReplicationGroup.go | 4 +- .../api_op_ModifyReplicationGroup.go | 44 +- ..._RebalanceSlotsInGlobalReplicationGroup.go | 4 +- .../api_op_ResetCacheParameterGroup.go | 4 +- .../api_op_RevokeCacheSecurityGroupIngress.go | 6 +- service/elasticache/api_op_TestFailover.go | 53 +- service/elasticache/types/enums.go | 44 +- service/elasticache/types/errors.go | 8 +- service/elasticache/types/types.go | 370 ++-- .../api_op_CheckDNSAvailability.go | 4 +- .../api_op_CreateConfigurationTemplate.go | 20 +- .../api_op_CreateEnvironment.go | 42 +- .../api_op_DescribeConfigurationSettings.go | 2 +- .../api_op_ListPlatformBranches.go | 29 +- .../api_op_RequestEnvironmentInfo.go | 2 +- .../api_op_RetrieveEnvironmentInfo.go | 2 +- .../api_op_TerminateEnvironment.go | 48 +- .../api_op_UpdateConfigurationTemplate.go | 16 +- .../api_op_UpdateEnvironment.go | 42 +- service/elasticbeanstalk/types/enums.go | 6 +- service/elasticbeanstalk/types/errors.go | 6 +- service/elasticbeanstalk/types/types.go | 168 +- .../api_op_ModifyLoadBalancerAttributes.go | 14 +- service/elasticloadbalancing/types/types.go | 61 +- .../api_op_CreateListener.go | 36 +- .../api_op_CreateTargetGroup.go | 8 +- .../api_op_ModifyListener.go | 36 +- service/elasticloadbalancingv2/types/enums.go | 62 +- service/elasticloadbalancingv2/types/types.go | 285 ++- ...DescribeElasticsearchInstanceTypeLimits.go | 10 +- ...ibeInboundCrossClusterSearchConnections.go | 10 +- ...beOutboundCrossClusterSearchConnections.go | 12 +- .../api_op_GetUpgradeStatus.go | 15 +- service/elasticsearchservice/types/enums.go | 96 +- service/elasticsearchservice/types/types.go | 148 +- .../api_op_CreatePipeline.go | 211 +- .../api_op_UpdatePipeline.go | 172 +- .../api_op_UpdatePipelineNotifications.go | 30 +- .../api_op_UpdatePipelineStatus.go | 4 +- service/elastictranscoder/types/types.go | 1389 ++++++------ service/emr/api_op_DescribeJobFlows.go | 6 +- service/emr/api_op_ListNotebookExecutions.go | 43 +- service/emr/api_op_RunJobFlow.go | 26 +- service/emr/types/enums.go | 220 +- service/emr/types/types.go | 80 +- service/eventbridge/api_op_PutTargets.go | 120 +- service/eventbridge/doc.go | 15 +- service/eventbridge/types/types.go | 33 +- .../firehose/api_op_CreateDeliveryStream.go | 28 +- .../firehose/api_op_ListDeliveryStreams.go | 6 +- service/firehose/types/enums.go | 80 +- service/firehose/types/types.go | 50 +- service/fms/api_op_DeletePolicy.go | 40 +- service/fms/api_op_GetProtectionStatus.go | 18 +- service/fms/api_op_PutPolicy.go | 30 +- service/fms/types/enums.go | 16 +- service/fms/types/types.go | 51 +- service/forecast/api_op_CreateDataset.go | 69 +- service/forecast/api_op_CreateDatasetGroup.go | 37 +- .../forecast/api_op_CreateDatasetImportJob.go | 50 +- service/forecast/api_op_CreateForecast.go | 39 +- .../api_op_CreateForecastExportJob.go | 38 +- service/forecast/api_op_CreatePredictor.go | 75 +- service/forecast/api_op_DescribeDataset.go | 20 +- .../forecast/api_op_DescribeDatasetGroup.go | 31 +- .../api_op_DescribeDatasetImportJob.go | 42 +- service/forecast/api_op_DescribeForecast.go | 16 +- .../api_op_DescribeForecastExportJob.go | 16 +- service/forecast/api_op_DescribePredictor.go | 38 +- .../forecast/api_op_ListDatasetImportJobs.go | 16 +- .../forecast/api_op_ListForecastExportJobs.go | 14 +- service/forecast/api_op_ListForecasts.go | 16 +- service/forecast/api_op_ListPredictors.go | 16 +- service/forecast/api_op_TagResource.go | 39 +- service/forecast/types/enums.go | 24 +- service/forecast/types/types.go | 237 +-- service/forecastquery/types/types.go | 10 +- .../api_op_UpdateModelVersionStatus.go | 6 +- service/frauddetector/types/enums.go | 14 +- service/frauddetector/types/errors.go | 14 +- .../fsx/api_op_CancelDataRepositoryTask.go | 28 +- service/fsx/api_op_CreateBackup.go | 37 +- service/fsx/api_op_CreateFileSystem.go | 30 +- .../fsx/api_op_CreateFileSystemFromBackup.go | 37 +- service/fsx/api_op_DescribeBackups.go | 8 +- service/fsx/api_op_DescribeFileSystems.go | 8 +- service/fsx/api_op_ListTagsForResource.go | 8 +- service/fsx/api_op_UpdateFileSystem.go | 28 +- service/fsx/types/enums.go | 70 +- service/fsx/types/types.go | 211 +- service/gamelift/api_op_AcceptMatch.go | 10 +- service/gamelift/api_op_ClaimGameServer.go | 16 +- service/gamelift/api_op_CreateAlias.go | 14 +- service/gamelift/api_op_CreateBuild.go | 30 +- service/gamelift/api_op_CreateFleet.go | 60 +- .../gamelift/api_op_CreateGameServerGroup.go | 47 +- service/gamelift/api_op_CreateGameSession.go | 25 +- .../gamelift/api_op_CreateGameSessionQueue.go | 11 +- .../api_op_CreateMatchmakingConfiguration.go | 16 +- .../api_op_CreateMatchmakingRuleSet.go | 26 +- .../gamelift/api_op_CreatePlayerSession.go | 18 +- .../gamelift/api_op_CreatePlayerSessions.go | 16 +- service/gamelift/api_op_CreateScript.go | 20 +- .../api_op_CreateVpcPeeringAuthorization.go | 12 +- .../api_op_CreateVpcPeeringConnection.go | 12 +- service/gamelift/api_op_DeleteAlias.go | 15 +- service/gamelift/api_op_DeleteBuild.go | 12 +- service/gamelift/api_op_DeleteFleet.go | 16 +- .../gamelift/api_op_DeleteGameServerGroup.go | 65 +- .../gamelift/api_op_DeleteGameSessionQueue.go | 11 +- .../api_op_DeleteMatchmakingConfiguration.go | 16 +- .../api_op_DeleteMatchmakingRuleSet.go | 18 +- .../gamelift/api_op_DeleteScalingPolicy.go | 24 +- service/gamelift/api_op_DeleteScript.go | 12 +- .../api_op_DeleteVpcPeeringAuthorization.go | 19 +- .../api_op_DeleteVpcPeeringConnection.go | 12 +- .../gamelift/api_op_DeregisterGameServer.go | 12 +- service/gamelift/api_op_DescribeAlias.go | 14 +- service/gamelift/api_op_DescribeBuild.go | 12 +- .../api_op_DescribeEC2InstanceLimits.go | 22 +- .../api_op_DescribeFleetAttributes.go | 36 +- .../gamelift/api_op_DescribeFleetCapacity.go | 36 +- .../gamelift/api_op_DescribeFleetEvents.go | 36 +- .../api_op_DescribeFleetPortSettings.go | 36 +- .../api_op_DescribeFleetUtilization.go | 36 +- service/gamelift/api_op_DescribeGameServer.go | 12 +- .../api_op_DescribeGameServerGroup.go | 21 +- .../api_op_DescribeGameServerInstances.go | 21 +- .../api_op_DescribeGameSessionDetails.go | 28 +- .../api_op_DescribeGameSessionPlacement.go | 26 +- .../api_op_DescribeGameSessionQueues.go | 11 +- .../gamelift/api_op_DescribeGameSessions.go | 23 +- service/gamelift/api_op_DescribeInstances.go | 4 +- .../gamelift/api_op_DescribeMatchmaking.go | 10 +- ...pi_op_DescribeMatchmakingConfigurations.go | 16 +- .../api_op_DescribeMatchmakingRuleSets.go | 18 +- .../gamelift/api_op_DescribePlayerSessions.go | 29 +- .../api_op_DescribeRuntimeConfiguration.go | 36 +- .../api_op_DescribeScalingPolicies.go | 45 +- service/gamelift/api_op_DescribeScript.go | 12 +- ...api_op_DescribeVpcPeeringAuthorizations.go | 12 +- .../api_op_DescribeVpcPeeringConnections.go | 12 +- .../gamelift/api_op_GetGameSessionLogUrl.go | 25 +- service/gamelift/api_op_GetInstanceAccess.go | 4 +- service/gamelift/api_op_ListAliases.go | 22 +- service/gamelift/api_op_ListBuilds.go | 23 +- service/gamelift/api_op_ListFleets.go | 16 +- .../gamelift/api_op_ListGameServerGroups.go | 21 +- service/gamelift/api_op_ListGameServers.go | 12 +- service/gamelift/api_op_ListScripts.go | 12 +- .../gamelift/api_op_ListTagsForResource.go | 28 +- service/gamelift/api_op_PutScalingPolicy.go | 98 +- service/gamelift/api_op_RegisterGameServer.go | 12 +- .../api_op_RequestUploadCredentials.go | 12 +- service/gamelift/api_op_ResolveAlias.go | 15 +- .../gamelift/api_op_ResumeGameServerGroup.go | 21 +- service/gamelift/api_op_SearchGameSessions.go | 151 +- service/gamelift/api_op_StartFleetActions.go | 16 +- .../api_op_StartGameSessionPlacement.go | 48 +- service/gamelift/api_op_StartMatchBackfill.go | 20 +- service/gamelift/api_op_StartMatchmaking.go | 42 +- service/gamelift/api_op_StopFleetActions.go | 16 +- .../api_op_StopGameSessionPlacement.go | 25 +- service/gamelift/api_op_StopMatchmaking.go | 10 +- .../gamelift/api_op_SuspendGameServerGroup.go | 31 +- service/gamelift/api_op_TagResource.go | 31 +- service/gamelift/api_op_UntagResource.go | 33 +- service/gamelift/api_op_UpdateAlias.go | 14 +- service/gamelift/api_op_UpdateBuild.go | 12 +- .../gamelift/api_op_UpdateFleetAttributes.go | 33 +- .../gamelift/api_op_UpdateFleetCapacity.go | 25 +- .../api_op_UpdateFleetPortSettings.go | 25 +- service/gamelift/api_op_UpdateGameServer.go | 30 +- .../gamelift/api_op_UpdateGameServerGroup.go | 41 +- service/gamelift/api_op_UpdateGameSession.go | 29 +- .../gamelift/api_op_UpdateGameSessionQueue.go | 11 +- .../api_op_UpdateMatchmakingConfiguration.go | 16 +- .../api_op_UpdateRuntimeConfiguration.go | 25 +- service/gamelift/api_op_UpdateScript.go | 12 +- .../api_op_ValidateMatchmakingRuleSet.go | 18 +- service/gamelift/doc.go | 12 +- service/gamelift/types/enums.go | 146 +- service/gamelift/types/types.go | 899 ++++---- service/glacier/api_op_CreateVault.go | 20 +- service/glacier/api_op_DeleteArchive.go | 6 +- service/glacier/api_op_DescribeJob.go | 14 +- service/glacier/api_op_GetJobOutput.go | 44 +- service/glacier/api_op_GetVaultLock.go | 14 +- service/glacier/api_op_InitiateVaultLock.go | 18 +- .../glacier/api_op_SetVaultNotifications.go | 4 +- service/glacier/api_op_UploadMultipartPart.go | 18 +- service/glacier/doc.go | 4 +- service/glacier/types/enums.go | 10 +- service/glacier/types/types.go | 14 +- service/globalaccelerator/types/enums.go | 30 +- service/globalaccelerator/types/types.go | 88 +- service/glue/api_op_CreateDevEndpoint.go | 30 +- service/glue/api_op_CreateJob.go | 20 +- service/glue/api_op_CreateMLTransform.go | 52 +- service/glue/api_op_GetDatabases.go | 4 +- service/glue/api_op_GetMLTransform.go | 15 +- service/glue/api_op_GetPartitions.go | 28 +- service/glue/api_op_GetPlan.go | 2 +- service/glue/api_op_SearchTables.go | 6 +- service/glue/api_op_StartJobRun.go | 15 +- service/glue/api_op_UpdateDevEndpoint.go | 6 +- service/glue/api_op_UpdateMLTransform.go | 15 +- service/glue/types/enums.go | 128 +- service/glue/types/types.go | 255 ++- service/greengrass/types/enums.go | 14 +- service/groundstation/types/enums.go | 46 +- service/guardduty/api_op_CreateFilter.go | 127 +- service/guardduty/api_op_ListFindings.go | 121 +- service/guardduty/types/enums.go | 92 +- ...DescribeAffectedAccountsForOrganization.go | 10 +- ..._op_DescribeEventDetailsForOrganization.go | 16 +- service/health/api_op_DescribeEvents.go | 8 +- .../api_op_DescribeEventsForOrganization.go | 10 +- service/health/doc.go | 6 +- service/health/types/enums.go | 14 +- service/health/types/types.go | 54 +- .../iam/api_op_CreateOpenIDConnectProvider.go | 20 +- service/iam/api_op_CreatePolicy.go | 8 +- service/iam/api_op_CreatePolicyVersion.go | 10 +- service/iam/api_op_CreateRole.go | 6 +- service/iam/api_op_DeletePolicy.go | 16 +- service/iam/api_op_DeleteUser.go | 29 +- ...pi_op_GenerateOrganizationsAccessReport.go | 44 +- ...i_op_GenerateServiceLastAccessedDetails.go | 18 +- .../api_op_GetContextKeysForCustomPolicy.go | 10 +- ...api_op_GetContextKeysForPrincipalPolicy.go | 8 +- .../api_op_GetServiceLastAccessedDetails.go | 22 +- ...tServiceLastAccessedDetailsWithEntities.go | 22 +- ...pi_op_ListPoliciesGrantingServiceAccess.go | 23 +- service/iam/api_op_PutGroupPolicy.go | 10 +- service/iam/api_op_PutRolePolicy.go | 10 +- service/iam/api_op_PutUserPolicy.go | 10 +- service/iam/api_op_SimulateCustomPolicy.go | 48 +- service/iam/api_op_SimulatePrincipalPolicy.go | 60 +- service/iam/api_op_TagRole.go | 45 +- service/iam/api_op_TagUser.go | 31 +- .../iam/api_op_UpdateAccountPasswordPolicy.go | 4 +- service/iam/api_op_UpdateAssumeRolePolicy.go | 10 +- service/iam/api_op_UpdateLoginProfile.go | 10 +- service/iam/api_op_UploadSSHPublicKey.go | 14 +- service/iam/api_op_UploadServerCertificate.go | 26 +- .../iam/api_op_UploadSigningCertificate.go | 10 +- service/iam/types/enums.go | 70 +- service/iam/types/types.go | 76 +- service/identitystore/types/enums.go | 6 +- .../api_op_ListDistributionConfigurations.go | 2 +- service/imagebuilder/types/enums.go | 4 +- service/inspector/types/enums.go | 232 +- service/iot/api_op_AssociateTargetsWithJob.go | 10 +- service/iot/api_op_DescribeEndpoint.go | 18 +- service/iot/api_op_DescribeIndex.go | 14 +- service/iot/types/enums.go | 224 +- service/iot/types/types.go | 22 +- .../iotanalytics/api_op_BatchPutMessage.go | 20 +- service/iotanalytics/types/enums.go | 4 +- service/iotevents/types/types.go | 52 +- service/iotjobsdataplane/types/enums.go | 16 +- .../api_op_BatchPutAssetPropertyValue.go | 6 +- service/iotsitewise/api_op_CreatePortal.go | 19 +- ..._DescribeGatewayCapabilityConfiguration.go | 6 +- .../api_op_GetAssetPropertyAggregates.go | 4 +- .../api_op_GetAssetPropertyValue.go | 4 +- .../api_op_GetAssetPropertyValueHistory.go | 4 +- service/iotsitewise/api_op_ListAssets.go | 21 +- .../api_op_ListAssociatedAssets.go | 10 +- ...op_UpdateGatewayCapabilityConfiguration.go | 6 +- service/iotsitewise/api_op_UpdatePortal.go | 8 +- service/iotsitewise/types/enums.go | 34 +- service/iotsitewise/types/types.go | 14 +- service/iotthingsgraph/api_op_GetEntities.go | 24 +- service/iotthingsgraph/types/enums.go | 102 +- service/ivs/api_op_CreateChannel.go | 12 +- service/ivs/api_op_UpdateChannel.go | 12 +- service/ivs/doc.go | 169 +- service/ivs/types/types.go | 12 +- service/kafka/api_op_UntagResource.go | 21 +- service/kafka/types/enums.go | 18 +- service/kendra/api_op_BatchPutDocument.go | 8 +- service/kendra/api_op_Query.go | 15 +- service/kendra/types/enums.go | 96 +- service/kendra/types/types.go | 38 +- service/kinesis/api_op_CreateStream.go | 12 +- .../api_op_DisableEnhancedMonitoring.go | 22 +- .../api_op_EnableEnhancedMonitoring.go | 22 +- service/kinesis/api_op_GetShardIterator.go | 12 +- service/kinesis/api_op_PutRecord.go | 8 +- service/kinesis/api_op_PutRecords.go | 6 +- .../kinesis/api_op_StartStreamEncryption.go | 16 +- .../kinesis/api_op_StopStreamEncryption.go | 16 +- service/kinesis/api_op_UpdateShardCount.go | 43 +- service/kinesis/types/enums.go | 40 +- service/kinesis/types/types.go | 125 +- service/kinesisanalytics/types/enums.go | 6 +- service/kinesisanalytics/types/types.go | 17 +- .../api_op_AddApplicationVpcConfiguration.go | 10 +- service/kinesisanalyticsv2/types/enums.go | 34 +- service/kinesisanalyticsv2/types/types.go | 34 +- .../api_op_UpdateDataRetention.go | 16 +- service/kinesisvideo/types/enums.go | 22 +- .../api_op_GetClip.go | 18 +- .../api_op_GetDASHStreamingSessionURL.go | 130 +- .../api_op_GetHLSStreamingSessionURL.go | 192 +- .../api_op_GetMediaForFragmentList.go | 62 +- .../api_op_ListFragments.go | 6 +- .../kinesisvideoarchivedmedia/types/enums.go | 38 +- .../kinesisvideoarchivedmedia/types/types.go | 14 +- service/kinesisvideomedia/api_op_GetMedia.go | 65 +- service/kinesisvideomedia/types/enums.go | 12 +- service/kinesisvideomedia/types/types.go | 41 +- service/kms/api_op_CancelKeyDeletion.go | 4 +- service/kms/api_op_CreateAlias.go | 36 +- service/kms/api_op_CreateGrant.go | 26 +- service/kms/api_op_CreateKey.go | 77 +- service/kms/api_op_Decrypt.go | 24 +- .../kms/api_op_DeleteImportedKeyMaterial.go | 4 +- service/kms/api_op_DescribeKey.go | 32 +- service/kms/api_op_DisableKey.go | 4 +- service/kms/api_op_DisableKeyRotation.go | 6 +- service/kms/api_op_EnableKey.go | 4 +- service/kms/api_op_EnableKeyRotation.go | 6 +- service/kms/api_op_Encrypt.go | 77 +- service/kms/api_op_GenerateDataKey.go | 36 +- service/kms/api_op_GenerateDataKeyPair.go | 10 +- ..._op_GenerateDataKeyPairWithoutPlaintext.go | 10 +- .../api_op_GenerateDataKeyWithoutPlaintext.go | 10 +- service/kms/api_op_GetKeyPolicy.go | 4 +- service/kms/api_op_GetKeyRotationStatus.go | 22 +- service/kms/api_op_GetParametersForImport.go | 4 +- service/kms/api_op_GetPublicKey.go | 18 +- service/kms/api_op_ImportKeyMaterial.go | 40 +- service/kms/api_op_ListGrants.go | 4 +- service/kms/api_op_ListKeyPolicies.go | 4 +- service/kms/api_op_ListResourceTags.go | 4 +- service/kms/api_op_PutKeyPolicy.go | 18 +- service/kms/api_op_ReEncrypt.go | 83 +- service/kms/api_op_RetireGrant.go | 24 +- service/kms/api_op_RevokeGrant.go | 6 +- service/kms/api_op_ScheduleKeyDeletion.go | 4 +- service/kms/api_op_Sign.go | 49 +- service/kms/api_op_TagResource.go | 4 +- service/kms/api_op_UntagResource.go | 4 +- service/kms/api_op_UpdateAlias.go | 5 +- service/kms/api_op_UpdateCustomKeyStore.go | 20 +- service/kms/api_op_UpdateKeyDescription.go | 4 +- service/kms/api_op_Verify.go | 10 +- service/kms/doc.go | 16 +- service/kms/types/enums.go | 98 +- service/kms/types/errors.go | 40 +- service/kms/types/types.go | 69 +- service/lakeformation/types/enums.go | 56 +- service/lambda/api_op_AddPermission.go | 16 +- service/lambda/api_op_CreateAlias.go | 17 +- .../lambda/api_op_CreateEventSourceMapping.go | 88 +- service/lambda/api_op_CreateFunction.go | 17 +- service/lambda/api_op_DeleteAlias.go | 17 +- service/lambda/api_op_DeleteFunction.go | 6 +- .../api_op_DeleteFunctionConcurrency.go | 17 +- .../api_op_DeleteFunctionEventInvokeConfig.go | 16 +- ...i_op_DeleteProvisionedConcurrencyConfig.go | 17 +- service/lambda/api_op_GetAlias.go | 17 +- service/lambda/api_op_GetFunction.go | 16 +- .../lambda/api_op_GetFunctionConcurrency.go | 17 +- .../lambda/api_op_GetFunctionConfiguration.go | 16 +- .../api_op_GetFunctionEventInvokeConfig.go | 26 +- service/lambda/api_op_GetPolicy.go | 16 +- .../api_op_GetProvisionedConcurrencyConfig.go | 17 +- service/lambda/api_op_Invoke.go | 29 +- service/lambda/api_op_InvokeAsync.go | 17 +- service/lambda/api_op_ListAliases.go | 17 +- .../lambda/api_op_ListEventSourceMappings.go | 37 +- .../api_op_ListFunctionEventInvokeConfigs.go | 17 +- ...pi_op_ListProvisionedConcurrencyConfigs.go | 17 +- .../lambda/api_op_ListVersionsByFunction.go | 17 +- service/lambda/api_op_PublishLayerVersion.go | 10 +- service/lambda/api_op_PublishVersion.go | 17 +- .../lambda/api_op_PutFunctionConcurrency.go | 17 +- .../api_op_PutFunctionEventInvokeConfig.go | 36 +- .../api_op_PutProvisionedConcurrencyConfig.go | 17 +- service/lambda/api_op_RemovePermission.go | 16 +- service/lambda/api_op_UpdateAlias.go | 17 +- .../lambda/api_op_UpdateEventSourceMapping.go | 61 +- service/lambda/api_op_UpdateFunctionCode.go | 17 +- .../api_op_UpdateFunctionConfiguration.go | 17 +- .../api_op_UpdateFunctionEventInvokeConfig.go | 36 +- service/lambda/types/enums.go | 12 +- service/lambda/types/types.go | 10 +- .../api_op_GetBotChannelAssociation.go | 12 +- .../lexmodelbuildingservice/api_op_GetBots.go | 15 +- .../api_op_GetExport.go | 9 +- .../api_op_GetIntents.go | 14 +- .../api_op_GetSlotTypes.go | 4 +- .../lexmodelbuildingservice/api_op_PutBot.go | 57 +- .../api_op_PutIntent.go | 85 +- .../api_op_PutSlotType.go | 16 +- .../api_op_StartImport.go | 20 +- .../lexmodelbuildingservice/types/enums.go | 82 +- .../lexmodelbuildingservice/types/types.go | 50 +- .../lexruntimeservice/api_op_PostContent.go | 180 +- service/lexruntimeservice/api_op_PostText.go | 154 +- .../lexruntimeservice/api_op_PutSession.go | 89 +- service/lexruntimeservice/types/enums.go | 36 +- service/lexruntimeservice/types/errors.go | 8 +- service/lexruntimeservice/types/types.go | 91 +- .../api_op_CreateLicenseConfiguration.go | 8 +- .../api_op_ListLicenseConfigurations.go | 16 +- .../api_op_ListResourceInventory.go | 22 +- ...api_op_ListUsageForLicenseConfiguration.go | 12 +- service/licensemanager/types/enums.go | 18 +- service/licensemanager/types/types.go | 44 +- service/lightsail/api_op_CopySnapshot.go | 28 +- .../lightsail/api_op_CreateContactMethod.go | 24 +- .../api_op_CreateDiskFromSnapshot.go | 38 +- .../api_op_CreateInstancesFromSnapshot.go | 29 +- .../api_op_CreateRelationalDatabase.go | 58 +- ...op_CreateRelationalDatabaseFromSnapshot.go | 26 +- ...api_op_CreateRelationalDatabaseSnapshot.go | 8 +- .../api_op_DeleteRelationalDatabase.go | 4 +- .../api_op_GetDistributionMetricData.go | 96 +- .../lightsail/api_op_GetInstanceMetricData.go | 95 +- .../api_op_GetLoadBalancerMetricData.go | 148 +- .../api_op_GetRelationalDatabaseLogEvents.go | 8 +- .../api_op_GetRelationalDatabaseMetricData.go | 80 +- service/lightsail/api_op_PutAlarm.go | 68 +- service/lightsail/api_op_TestAlarm.go | 8 +- .../api_op_UpdateRelationalDatabase.go | 23 +- service/lightsail/types/enums.go | 96 +- service/lightsail/types/types.go | 613 +++--- .../api_op_CreateDataSourceFromRDS.go | 56 +- .../api_op_CreateDataSourceFromRedshift.go | 50 +- .../api_op_CreateDataSourceFromS3.go | 12 +- .../machinelearning/api_op_CreateMLModel.go | 28 +- .../api_op_DescribeBatchPredictions.go | 45 +- .../api_op_DescribeDataSources.go | 37 +- .../api_op_DescribeEvaluations.go | 43 +- .../api_op_DescribeMLModels.go | 54 +- .../api_op_GetBatchPrediction.go | 18 +- .../machinelearning/api_op_GetDataSource.go | 18 +- .../machinelearning/api_op_GetEvaluation.go | 26 +- service/machinelearning/api_op_GetMLModel.go | 41 +- service/machinelearning/api_op_Predict.go | 12 +- service/machinelearning/types/enums.go | 76 +- service/machinelearning/types/types.go | 179 +- .../macie2/api_op_CreateClassificationJob.go | 10 +- .../api_op_DescribeClassificationJob.go | 37 +- service/macie2/api_op_GetFindingStatistics.go | 15 +- .../macie2/api_op_UpdateClassificationJob.go | 8 +- service/macie2/types/enums.go | 108 +- service/macie2/types/types.go | 160 +- service/managedblockchain/types/enums.go | 52 +- service/managedblockchain/types/types.go | 129 +- service/marketplacecatalog/types/types.go | 34 +- .../api_op_GenerateDataSet.go | 85 +- .../api_op_StartSupportDataExport.go | 4 +- .../types/enums.go | 54 +- service/marketplaceentitlementservice/doc.go | 2 +- .../types/enums.go | 4 +- .../api_op_RegisterUsage.go | 30 +- service/marketplacemetering/doc.go | 44 +- service/marketplacemetering/types/enums.go | 6 +- service/marketplacemetering/types/types.go | 10 +- service/mediaconnect/types/enums.go | 16 +- service/mediaconvert/types/enums.go | 1008 ++++----- service/medialive/types/enums.go | 698 +++--- service/medialive/types/types.go | 150 +- service/mediapackage/types/enums.go | 56 +- service/mediapackagevod/types/enums.go | 26 +- .../mediastore/api_op_PutContainerPolicy.go | 6 +- service/mediastore/api_op_PutMetricPolicy.go | 12 +- service/mediatailor/types/enums.go | 8 +- .../api_op_AssociateCreatedArtifact.go | 18 +- .../api_op_DeleteProgressUpdateStream.go | 27 +- .../api_op_DisassociateCreatedArtifact.go | 18 +- .../api_op_ListCreatedArtifacts.go | 10 +- .../migrationhub/api_op_ListMigrationTasks.go | 12 +- .../api_op_NotifyMigrationTaskState.go | 10 +- .../api_op_PutResourceAttributes.go | 22 +- service/migrationhub/types/enums.go | 34 +- service/migrationhubconfig/doc.go | 18 +- service/mq/types/enums.go | 20 +- service/mturk/api_op_ApproveAssignment.go | 24 +- ...pi_op_CreateAdditionalAssignmentsForHIT.go | 18 +- service/mturk/api_op_DeleteHIT.go | 16 +- service/mturk/api_op_UpdateHITReviewStatus.go | 6 +- service/mturk/types/types.go | 8 +- ...pi_op_AddSourceIdentifierToSubscription.go | 16 +- .../api_op_ApplyPendingMaintenanceAction.go | 13 +- .../api_op_CopyDBClusterParameterGroup.go | 32 +- .../neptune/api_op_CopyDBClusterSnapshot.go | 16 +- .../neptune/api_op_CopyDBParameterGroup.go | 17 +- service/neptune/api_op_CreateDBCluster.go | 43 +- .../api_op_CreateDBClusterParameterGroup.go | 8 +- .../neptune/api_op_CreateDBClusterSnapshot.go | 15 +- service/neptune/api_op_CreateDBInstance.go | 22 +- .../neptune/api_op_CreateDBParameterGroup.go | 14 +- .../neptune/api_op_CreateEventSubscription.go | 21 +- service/neptune/api_op_DeleteDBCluster.go | 8 +- .../api_op_DeleteDBClusterParameterGroup.go | 10 +- service/neptune/api_op_DeleteDBInstance.go | 16 +- .../neptune/api_op_DeleteDBParameterGroup.go | 8 +- ...api_op_DescribeDBClusterParameterGroups.go | 2 +- .../api_op_DescribeDBClusterParameters.go | 2 +- .../api_op_DescribeDBClusterSnapshots.go | 46 +- service/neptune/api_op_DescribeDBClusters.go | 19 +- .../api_op_DescribeDBEngineVersions.go | 2 +- service/neptune/api_op_DescribeDBInstances.go | 20 +- .../api_op_DescribeDBParameterGroups.go | 4 +- .../neptune/api_op_DescribeDBParameters.go | 4 +- service/neptune/api_op_DescribeEvents.go | 22 +- ...pi_op_DescribePendingMaintenanceActions.go | 6 +- service/neptune/api_op_FailoverDBCluster.go | 2 +- service/neptune/api_op_ModifyDBCluster.go | 30 +- .../api_op_ModifyDBClusterParameterGroup.go | 12 +- service/neptune/api_op_ModifyDBInstance.go | 28 +- .../neptune/api_op_ModifyDBParameterGroup.go | 4 +- service/neptune/api_op_RebootDBInstance.go | 2 +- .../api_op_ResetDBClusterParameterGroup.go | 12 +- .../neptune/api_op_ResetDBParameterGroup.go | 4 +- .../api_op_RestoreDBClusterFromSnapshot.go | 28 +- .../api_op_RestoreDBClusterToPointInTime.go | 52 +- service/neptune/types/types.go | 24 +- service/networkmanager/api_op_CreateSite.go | 9 +- service/networkmanager/api_op_UpdateSite.go | 8 +- service/networkmanager/types/enums.go | 8 +- service/opsworks/api_op_AssignInstance.go | 18 +- service/opsworks/api_op_CloneStack.go | 134 +- service/opsworks/api_op_CreateInstance.go | 40 +- service/opsworks/api_op_CreateStack.go | 117 +- .../opsworks/api_op_DescribePermissions.go | 14 +- service/opsworks/api_op_SetPermission.go | 16 +- service/opsworks/api_op_TagResource.go | 16 +- service/opsworks/api_op_UpdateInstance.go | 48 +- service/opsworks/api_op_UpdateStack.go | 104 +- service/opsworks/doc.go | 84 +- service/opsworks/types/enums.go | 216 +- service/opsworks/types/types.go | 211 +- service/opsworkscm/api_op_AssociateNode.go | 18 +- service/opsworkscm/api_op_CreateBackup.go | 23 +- service/opsworkscm/api_op_CreateServer.go | 94 +- .../api_op_DescribeNodeAssociationStatus.go | 10 +- service/opsworkscm/api_op_DisassociateNode.go | 2 +- .../api_op_ExportServerEngineAttribute.go | 25 +- service/opsworkscm/api_op_StartMaintenance.go | 2 +- service/opsworkscm/api_op_TagResource.go | 23 +- service/opsworkscm/doc.go | 36 +- service/opsworkscm/types/enums.go | 40 +- service/opsworkscm/types/types.go | 22 +- .../organizations/api_op_AcceptHandshake.go | 9 +- service/organizations/api_op_AttachPolicy.go | 28 +- service/organizations/api_op_CreateAccount.go | 42 +- .../api_op_CreateGovCloudAccount.go | 73 +- .../api_op_CreateOrganization.go | 14 +- .../api_op_CreateOrganizationalUnit.go | 8 +- service/organizations/api_op_CreatePolicy.go | 16 +- .../api_op_DescribeEffectivePolicy.go | 10 +- service/organizations/api_op_DetachPolicy.go | 14 +- .../organizations/api_op_DisablePolicyType.go | 14 +- .../organizations/api_op_EnablePolicyType.go | 14 +- .../api_op_InviteAccountToOrganization.go | 14 +- .../organizations/api_op_LeaveOrganization.go | 50 +- service/organizations/api_op_ListChildren.go | 13 +- ...api_op_ListOrganizationalUnitsForParent.go | 13 +- service/organizations/api_op_ListParents.go | 4 +- service/organizations/api_op_ListPolicies.go | 14 +- .../api_op_ListPoliciesForTarget.go | 26 +- .../api_op_ListTagsForResource.go | 32 +- service/organizations/api_op_MoveAccount.go | 12 +- .../api_op_RemoveAccountFromOrganization.go | 4 +- service/organizations/api_op_TagResource.go | 40 +- service/organizations/api_op_UntagResource.go | 31 +- service/organizations/types/enums.go | 206 +- service/organizations/types/errors.go | 293 ++- service/organizations/types/types.go | 131 +- service/personalize/api_op_CreateCampaign.go | 14 +- service/personalize/api_op_CreateDataset.go | 39 +- .../personalize/api_op_CreateDatasetGroup.go | 55 +- .../api_op_CreateDatasetImportJob.go | 7 +- .../personalize/api_op_CreateEventTracker.go | 11 +- service/personalize/api_op_CreateSchema.go | 7 +- service/personalize/api_op_CreateSolution.go | 18 +- .../api_op_CreateSolutionVersion.go | 24 +- .../personalize/api_op_DeleteDatasetGroup.go | 8 +- .../personalize/api_op_DescribeCampaign.go | 10 +- .../api_op_DescribeDatasetImportJob.go | 10 +- service/personalize/api_op_DescribeRecipe.go | 21 +- service/personalize/types/types.go | 134 +- .../api_op_GetRecommendations.go | 4 +- service/pi/api_op_DescribeDimensionKeys.go | 27 +- service/pi/api_op_GetResourceMetrics.go | 18 +- service/pi/types/types.go | 63 +- .../pinpoint/api_op_DeleteEmailTemplate.go | 14 +- service/pinpoint/api_op_DeletePushTemplate.go | 14 +- service/pinpoint/api_op_DeleteSmsTemplate.go | 14 +- .../pinpoint/api_op_DeleteVoiceTemplate.go | 14 +- service/pinpoint/api_op_GetEmailTemplate.go | 14 +- service/pinpoint/api_op_GetPushTemplate.go | 14 +- service/pinpoint/api_op_GetSmsTemplate.go | 14 +- service/pinpoint/api_op_GetVoiceTemplate.go | 14 +- service/pinpoint/api_op_RemoveAttributes.go | 6 +- .../pinpoint/api_op_UpdateEmailTemplate.go | 14 +- service/pinpoint/api_op_UpdatePushTemplate.go | 14 +- service/pinpoint/api_op_UpdateSmsTemplate.go | 14 +- .../pinpoint/api_op_UpdateVoiceTemplate.go | 14 +- service/pinpoint/types/enums.go | 108 +- service/pinpoint/types/types.go | 498 +++-- service/pinpointemail/api_op_GetAccount.go | 12 +- ...i_op_PutEmailIdentityMailFromAttributes.go | 12 +- service/pinpointemail/api_op_SendEmail.go | 8 +- service/pinpointemail/types/enums.go | 64 +- service/pinpointemail/types/types.go | 168 +- service/pinpointsmsvoice/types/enums.go | 14 +- service/polly/api_op_SynthesizeSpeech.go | 16 +- service/polly/types/enums.go | 16 +- service/pricing/doc.go | 4 +- service/pricing/types/enums.go | 2 +- service/qldb/api_op_ExportJournalToS3.go | 4 +- ...i_op_ListJournalKinesisStreamsForLedger.go | 6 +- service/qldb/api_op_ListJournalS3Exports.go | 6 +- .../api_op_ListJournalS3ExportsForLedger.go | 6 +- service/qldb/api_op_ListLedgers.go | 12 +- service/qldb/types/enums.go | 18 +- service/qldb/types/types.go | 12 +- service/qldbsession/api_op_SendCommand.go | 14 +- service/qldbsession/doc.go | 14 +- service/quicksight/api_op_CreateDashboard.go | 23 +- .../api_op_CreateIAMPolicyAssignment.go | 24 +- .../api_op_DescribeAccountCustomization.go | 53 +- .../quicksight/api_op_GetDashboardEmbedUrl.go | 28 +- .../quicksight/api_op_GetSessionEmbedUrl.go | 40 +- service/quicksight/api_op_ListThemes.go | 12 +- service/quicksight/api_op_RegisterUser.go | 41 +- service/quicksight/api_op_TagResource.go | 8 +- service/quicksight/api_op_UpdateDashboard.go | 23 +- .../api_op_UpdateIAMPolicyAssignment.go | 24 +- .../api_op_UpdateThemePermissions.go | 45 +- service/quicksight/api_op_UpdateUser.go | 43 +- service/quicksight/types/enums.go | 270 +-- service/quicksight/types/types.go | 35 +- ...p_PromoteResourceShareCreatedFromPolicy.go | 6 +- service/ram/types/enums.go | 20 +- service/ram/types/types.go | 12 +- ...pi_op_AddSourceIdentifierToSubscription.go | 25 +- .../api_op_ApplyPendingMaintenanceAction.go | 13 +- service/rds/api_op_BacktrackDBCluster.go | 29 +- service/rds/api_op_CancelExportTask.go | 22 +- .../rds/api_op_CopyDBClusterParameterGroup.go | 33 +- service/rds/api_op_CopyDBClusterSnapshot.go | 76 +- service/rds/api_op_CopyDBParameterGroup.go | 22 +- service/rds/api_op_CopyDBSnapshot.go | 49 +- service/rds/api_op_CopyOptionGroup.go | 14 +- service/rds/api_op_CreateDBCluster.go | 93 +- service/rds/api_op_CreateDBClusterEndpoint.go | 12 +- .../api_op_CreateDBClusterParameterGroup.go | 8 +- service/rds/api_op_CreateDBClusterSnapshot.go | 15 +- service/rds/api_op_CreateDBInstance.go | 351 ++- .../rds/api_op_CreateDBInstanceReadReplica.go | 72 +- service/rds/api_op_CreateDBParameterGroup.go | 14 +- service/rds/api_op_CreateDBSecurityGroup.go | 10 +- service/rds/api_op_CreateDBSnapshot.go | 14 +- service/rds/api_op_CreateEventSubscription.go | 31 +- service/rds/api_op_CreateOptionGroup.go | 11 +- service/rds/api_op_DeleteDBCluster.go | 10 +- service/rds/api_op_DeleteDBClusterEndpoint.go | 12 +- .../api_op_DeleteDBClusterParameterGroup.go | 10 +- service/rds/api_op_DeleteDBInstance.go | 20 +- service/rds/api_op_DeleteDBParameterGroup.go | 8 +- service/rds/api_op_DeleteDBSecurityGroup.go | 10 +- service/rds/api_op_DescribeCertificates.go | 2 +- .../rds/api_op_DescribeDBClusterBacktracks.go | 34 +- ...api_op_DescribeDBClusterParameterGroups.go | 2 +- .../rds/api_op_DescribeDBClusterParameters.go | 2 +- .../rds/api_op_DescribeDBClusterSnapshots.go | 58 +- service/rds/api_op_DescribeDBClusters.go | 8 +- .../rds/api_op_DescribeDBEngineVersions.go | 2 +- ...i_op_DescribeDBInstanceAutomatedBackups.go | 32 +- service/rds/api_op_DescribeDBInstances.go | 34 +- service/rds/api_op_DescribeDBLogFiles.go | 2 +- .../rds/api_op_DescribeDBParameterGroups.go | 4 +- service/rds/api_op_DescribeDBParameters.go | 4 +- service/rds/api_op_DescribeDBSnapshots.go | 46 +- service/rds/api_op_DescribeEvents.go | 31 +- service/rds/api_op_DescribeExportTasks.go | 15 +- service/rds/api_op_DescribeGlobalClusters.go | 8 +- .../rds/api_op_DescribeInstallationMedia.go | 14 +- ...pi_op_DescribePendingMaintenanceActions.go | 6 +- service/rds/api_op_DescribeSourceRegions.go | 4 +- .../rds/api_op_DownloadDBLogFilePortion.go | 16 +- service/rds/api_op_FailoverDBCluster.go | 2 +- service/rds/api_op_ImportInstallationMedia.go | 10 +- service/rds/api_op_ModifyCertificates.go | 4 +- .../api_op_ModifyCurrentDBClusterCapacity.go | 11 +- service/rds/api_op_ModifyDBCluster.go | 38 +- service/rds/api_op_ModifyDBClusterEndpoint.go | 12 +- .../api_op_ModifyDBClusterParameterGroup.go | 12 +- service/rds/api_op_ModifyDBInstance.go | 49 +- service/rds/api_op_ModifyDBParameterGroup.go | 4 +- service/rds/api_op_ModifyDBSnapshot.go | 16 +- service/rds/api_op_ModifyGlobalCluster.go | 14 +- service/rds/api_op_PromoteReadReplica.go | 26 +- .../rds/api_op_PromoteReadReplicaDBCluster.go | 2 +- service/rds/api_op_RebootDBInstance.go | 2 +- .../api_op_ResetDBClusterParameterGroup.go | 12 +- service/rds/api_op_ResetDBParameterGroup.go | 4 +- service/rds/api_op_RestoreDBClusterFromS3.go | 47 +- .../api_op_RestoreDBClusterFromSnapshot.go | 38 +- .../api_op_RestoreDBClusterToPointInTime.go | 72 +- .../api_op_RestoreDBInstanceFromDBSnapshot.go | 60 +- service/rds/api_op_RestoreDBInstanceFromS3.go | 41 +- .../api_op_RestoreDBInstanceToPointInTime.go | 57 +- service/rds/api_op_StartExportTask.go | 62 +- service/rds/doc.go | 22 +- service/rds/types/enums.go | 36 +- service/rds/types/types.go | 285 ++- .../rdsdata/api_op_BatchExecuteStatement.go | 6 +- service/rdsdata/types/enums.go | 4 +- service/rdsdata/types/types.go | 22 +- .../redshift/api_op_CopyClusterSnapshot.go | 24 +- service/redshift/api_op_CreateCluster.go | 72 +- .../api_op_CreateClusterParameterGroup.go | 12 +- .../api_op_CreateClusterSecurityGroup.go | 13 +- .../redshift/api_op_CreateClusterSnapshot.go | 14 +- .../api_op_CreateClusterSubnetGroup.go | 13 +- .../api_op_CreateEventSubscription.go | 12 +- .../api_op_CreateSnapshotCopyGrant.go | 13 +- service/redshift/api_op_CreateUsageLimit.go | 8 +- service/redshift/api_op_DeleteCluster.go | 22 +- .../api_op_DeleteClusterParameterGroup.go | 8 +- .../api_op_DescribeClusterSnapshots.go | 10 +- .../api_op_DescribeClusterVersions.go | 9 +- service/redshift/api_op_DescribeEvents.go | 30 +- service/redshift/api_op_DescribeTags.go | 57 +- .../redshift/api_op_DescribeUsageLimits.go | 20 +- service/redshift/api_op_EnableLogging.go | 23 +- .../redshift/api_op_GetClusterCredentials.go | 55 +- service/redshift/api_op_ModifyCluster.go | 42 +- service/redshift/api_op_ModifyUsageLimit.go | 8 +- service/redshift/api_op_ResizeCluster.go | 29 +- .../api_op_RestoreFromClusterSnapshot.go | 24 +- service/redshift/types/enums.go | 58 +- service/redshift/types/types.go | 186 +- .../redshiftdata/api_op_DescribeStatement.go | 21 +- service/redshiftdata/api_op_DescribeTable.go | 8 +- .../redshiftdata/api_op_ExecuteStatement.go | 10 +- service/redshiftdata/api_op_ListDatabases.go | 8 +- service/redshiftdata/api_op_ListSchemas.go | 8 +- service/redshiftdata/api_op_ListStatements.go | 21 +- service/redshiftdata/api_op_ListTables.go | 10 +- .../api_op_DetectProtectiveEquipment.go | 50 +- service/rekognition/api_op_IndexFaces.go | 79 +- service/rekognition/types/enums.go | 74 +- service/rekognition/types/types.go | 30 +- service/resourcegroups/api_op_CreateGroup.go | 5 +- .../api_op_GetGroupConfiguration.go | 5 +- .../api_op_ListGroupResources.go | 2 +- service/resourcegroups/api_op_ListGroups.go | 6 +- service/resourcegroups/doc.go | 19 +- service/resourcegroups/types/enums.go | 14 +- service/resourcegroups/types/types.go | 73 +- .../api_op_DescribeReportCreation.go | 16 +- .../api_op_GetComplianceSummary.go | 10 +- .../api_op_GetResources.go | 75 +- .../api_op_TagResources.go | 22 +- .../api_op_UntagResources.go | 8 +- service/resourcegroupstaggingapi/doc.go | 333 ++- .../resourcegroupstaggingapi/types/enums.go | 10 +- .../resourcegroupstaggingapi/types/errors.go | 30 +- .../resourcegroupstaggingapi/types/types.go | 8 +- service/robomaker/types/enums.go | 6 +- .../api_op_ChangeResourceRecordSets.go | 36 +- .../route53/api_op_ChangeTagsForResource.go | 4 +- service/route53/api_op_CreateHealthCheck.go | 42 +- service/route53/api_op_CreateHostedZone.go | 28 +- .../api_op_CreateQueryLoggingConfig.go | 75 +- .../api_op_CreateReusableDelegationSet.go | 52 +- service/route53/api_op_DeleteHostedZone.go | 10 +- service/route53/api_op_DeleteTrafficPolicy.go | 12 +- .../api_op_DisassociateVPCFromHostedZone.go | 14 +- service/route53/api_op_GetAccountLimit.go | 17 +- service/route53/api_op_GetChange.go | 10 +- service/route53/api_op_GetGeoLocation.go | 16 +- service/route53/api_op_GetHostedZoneLimit.go | 4 +- .../route53/api_op_ListHostedZonesByName.go | 30 +- .../route53/api_op_ListHostedZonesByVPC.go | 14 +- .../route53/api_op_ListResourceRecordSets.go | 29 +- service/route53/api_op_ListTagsForResource.go | 4 +- .../route53/api_op_ListTagsForResources.go | 4 +- service/route53/api_op_TestDNSAnswer.go | 6 +- service/route53/api_op_UpdateHealthCheck.go | 111 +- .../api_op_UpdateTrafficPolicyInstance.go | 12 +- service/route53/types/enums.go | 30 +- service/route53/types/errors.go | 28 +- service/route53/types/types.go | 710 ++++--- .../api_op_CheckDomainAvailability.go | 20 +- .../api_op_CheckDomainTransferability.go | 12 +- .../api_op_GetDomainSuggestions.go | 20 +- .../route53domains/api_op_RegisterDomain.go | 48 +- .../route53domains/api_op_TransferDomain.go | 25 +- ...pi_op_TransferDomainToAnotherAwsAccount.go | 14 +- service/route53domains/types/enums.go | 124 +- service/route53domains/types/types.go | 417 ++-- .../api_op_CreateResolverEndpoint.go | 16 +- .../api_op_CreateResolverQueryLogConfig.go | 12 +- .../api_op_DeleteResolverEndpoint.go | 6 +- ...i_op_DisassociateResolverQueryLogConfig.go | 6 +- ..._ListResolverQueryLogConfigAssociations.go | 57 +- .../api_op_ListResolverQueryLogConfigs.go | 68 +- .../api_op_PutResolverQueryLogConfigPolicy.go | 8 +- .../api_op_PutResolverRulePolicy.go | 11 +- service/route53resolver/api_op_TagResource.go | 22 +- .../route53resolver/api_op_UntagResource.go | 24 +- service/route53resolver/types/types.go | 275 ++- service/s3/api_op_AbortMultipartUpload.go | 14 +- service/s3/api_op_CompleteMultipartUpload.go | 57 +- service/s3/api_op_CopyObject.go | 45 +- service/s3/api_op_CreateBucket.go | 63 +- service/s3/api_op_CreateMultipartUpload.go | 118 +- service/s3/api_op_DeleteBucket.go | 4 +- ...i_op_DeleteBucketAnalyticsConfiguration.go | 12 +- service/s3/api_op_DeleteBucketCors.go | 4 +- service/s3/api_op_DeleteBucketEncryption.go | 6 +- ...i_op_DeleteBucketInventoryConfiguration.go | 10 +- service/s3/api_op_DeleteBucketLifecycle.go | 6 +- ...api_op_DeleteBucketMetricsConfiguration.go | 14 +- .../api_op_DeleteBucketOwnershipControls.go | 4 +- service/s3/api_op_DeleteBucketPolicy.go | 4 +- service/s3/api_op_DeleteBucketReplication.go | 6 +- service/s3/api_op_DeleteBucketTagging.go | 6 +- service/s3/api_op_DeleteBucketWebsite.go | 6 +- service/s3/api_op_DeleteObject.go | 2 +- service/s3/api_op_DeleteObjectTagging.go | 6 +- service/s3/api_op_DeleteObjects.go | 14 +- service/s3/api_op_DeletePublicAccessBlock.go | 16 +- ...api_op_GetBucketAccelerateConfiguration.go | 2 +- service/s3/api_op_GetBucketAcl.go | 2 +- .../api_op_GetBucketAnalyticsConfiguration.go | 10 +- service/s3/api_op_GetBucketCors.go | 4 +- service/s3/api_op_GetBucketEncryption.go | 7 +- .../api_op_GetBucketInventoryConfiguration.go | 10 +- .../api_op_GetBucketLifecycleConfiguration.go | 24 +- service/s3/api_op_GetBucketLocation.go | 4 +- service/s3/api_op_GetBucketLogging.go | 4 +- .../api_op_GetBucketMetricsConfiguration.go | 14 +- ...i_op_GetBucketNotificationConfiguration.go | 2 +- .../s3/api_op_GetBucketOwnershipControls.go | 4 +- service/s3/api_op_GetBucketPolicy.go | 2 +- service/s3/api_op_GetBucketPolicyStatus.go | 16 +- service/s3/api_op_GetBucketReplication.go | 6 +- service/s3/api_op_GetBucketRequestPayment.go | 2 +- service/s3/api_op_GetBucketTagging.go | 12 +- service/s3/api_op_GetBucketVersioning.go | 11 +- service/s3/api_op_GetBucketWebsite.go | 6 +- service/s3/api_op_GetObject.go | 49 +- service/s3/api_op_GetObjectAcl.go | 6 +- service/s3/api_op_GetObjectTagging.go | 2 +- service/s3/api_op_GetObjectTorrent.go | 2 +- service/s3/api_op_GetPublicAccessBlock.go | 16 +- service/s3/api_op_HeadObject.go | 73 +- ...pi_op_ListBucketAnalyticsConfigurations.go | 12 +- ...pi_op_ListBucketInventoryConfigurations.go | 10 +- .../api_op_ListBucketMetricsConfigurations.go | 10 +- service/s3/api_op_ListMultipartUploads.go | 14 +- service/s3/api_op_ListObjectVersions.go | 13 +- service/s3/api_op_ListObjects.go | 15 +- service/s3/api_op_ListObjectsV2.go | 11 +- service/s3/api_op_ListParts.go | 16 +- ...api_op_PutBucketAccelerateConfiguration.go | 12 +- service/s3/api_op_PutBucketAcl.go | 139 +- .../api_op_PutBucketAnalyticsConfiguration.go | 37 +- service/s3/api_op_PutBucketCors.go | 27 +- service/s3/api_op_PutBucketEncryption.go | 6 +- .../api_op_PutBucketInventoryConfiguration.go | 39 +- .../api_op_PutBucketLifecycleConfiguration.go | 41 +- service/s3/api_op_PutBucketLogging.go | 20 +- .../api_op_PutBucketMetricsConfiguration.go | 19 +- ...i_op_PutBucketNotificationConfiguration.go | 2 +- .../s3/api_op_PutBucketOwnershipControls.go | 4 +- service/s3/api_op_PutBucketPolicy.go | 4 +- service/s3/api_op_PutBucketReplication.go | 6 +- service/s3/api_op_PutBucketRequestPayment.go | 4 +- service/s3/api_op_PutBucketTagging.go | 41 +- service/s3/api_op_PutBucketVersioning.go | 6 +- service/s3/api_op_PutBucketWebsite.go | 53 +- service/s3/api_op_PutObject.go | 4 +- service/s3/api_op_PutObjectAcl.go | 92 +- service/s3/api_op_PutObjectLegalHold.go | 2 +- .../s3/api_op_PutObjectLockConfiguration.go | 4 +- service/s3/api_op_PutObjectRetention.go | 2 +- service/s3/api_op_PutObjectTagging.go | 35 +- service/s3/api_op_PutPublicAccessBlock.go | 14 +- service/s3/api_op_RestoreObject.go | 174 +- service/s3/api_op_SelectObjectContent.go | 64 +- service/s3/api_op_UploadPart.go | 36 +- service/s3/api_op_UploadPartCopy.go | 67 +- service/s3/types/enums.go | 60 +- service/s3/types/types.go | 1194 +++++------ service/s3control/api_op_CreateAccessPoint.go | 40 +- service/s3control/api_op_CreateBucket.go | 34 +- service/s3control/api_op_CreateJob.go | 14 +- service/s3control/api_op_DeleteAccessPoint.go | 10 +- .../api_op_DeleteAccessPointPolicy.go | 8 +- service/s3control/api_op_DeleteBucket.go | 10 +- ...i_op_DeleteBucketLifecycleConfiguration.go | 6 +- .../s3control/api_op_DeleteBucketPolicy.go | 6 +- .../s3control/api_op_DeleteBucketTagging.go | 6 +- service/s3control/api_op_DeleteJobTagging.go | 12 +- .../api_op_DeletePublicAccessBlock.go | 6 +- service/s3control/api_op_DescribeJob.go | 14 +- service/s3control/api_op_GetAccessPoint.go | 10 +- .../s3control/api_op_GetAccessPointPolicy.go | 7 +- service/s3control/api_op_GetBucket.go | 8 +- .../api_op_GetBucketLifecycleConfiguration.go | 23 +- service/s3control/api_op_GetBucketPolicy.go | 13 +- service/s3control/api_op_GetBucketTagging.go | 16 +- service/s3control/api_op_GetJobTagging.go | 12 +- .../s3control/api_op_GetPublicAccessBlock.go | 6 +- service/s3control/api_op_ListAccessPoints.go | 10 +- service/s3control/api_op_ListJobs.go | 14 +- .../s3control/api_op_PutAccessPointPolicy.go | 6 +- .../api_op_PutBucketLifecycleConfiguration.go | 6 +- service/s3control/api_op_PutBucketPolicy.go | 6 +- service/s3control/api_op_PutBucketTagging.go | 49 +- service/s3control/api_op_PutJobTagging.go | 38 +- .../s3control/api_op_PutPublicAccessBlock.go | 6 +- service/s3control/api_op_UpdateJobPriority.go | 14 +- service/s3control/api_op_UpdateJobStatus.go | 14 +- service/s3control/types/enums.go | 58 +- service/s3control/types/types.go | 16 +- service/s3outposts/api_op_CreateEndpoint.go | 6 +- service/s3outposts/api_op_DeleteEndpoint.go | 6 +- service/s3outposts/api_op_ListEndpoints.go | 6 +- service/sagemaker/api_op_CreateAlgorithm.go | 40 +- .../sagemaker/api_op_CreateCompilationJob.go | 50 +- service/sagemaker/api_op_CreateDomain.go | 42 +- .../sagemaker/api_op_CreateEndpointConfig.go | 10 +- service/sagemaker/api_op_CreateLabelingJob.go | 24 +- .../sagemaker/api_op_CreateModelPackage.go | 14 +- .../api_op_CreateNotebookInstance.go | 32 +- .../sagemaker/api_op_CreateProcessingJob.go | 6 +- service/sagemaker/api_op_CreateTrainingJob.go | 48 +- .../sagemaker/api_op_CreateTransformJob.go | 36 +- .../sagemaker/api_op_CreateTrialComponent.go | 8 +- service/sagemaker/api_op_DescribeDomain.go | 6 +- service/sagemaker/api_op_DescribeEndpoint.go | 35 +- .../sagemaker/api_op_DescribeLabelingJob.go | 18 +- .../sagemaker/api_op_DescribeTrainingJob.go | 89 +- .../sagemaker/api_op_DescribeTransformJob.go | 6 +- .../api_op_DescribeTrialComponent.go | 8 +- .../sagemaker/api_op_ListCodeRepositories.go | 17 +- .../sagemaker/api_op_ListTrialComponents.go | 6 +- service/sagemaker/doc.go | 6 +- service/sagemaker/types/enums.go | 924 ++++---- service/sagemaker/types/types.go | 1877 ++++++++--------- service/sagemakera2iruntime/doc.go | 4 +- service/sagemakera2iruntime/types/enums.go | 14 +- service/savingsplans/types/enums.go | 88 +- service/schemas/types/enums.go | 6 +- .../api_op_CancelRotateSecret.go | 37 +- service/secretsmanager/api_op_CreateSecret.go | 123 +- .../api_op_DeleteResourcePolicy.go | 15 +- service/secretsmanager/api_op_DeleteSecret.go | 20 +- .../secretsmanager/api_op_DescribeSecret.go | 18 +- .../api_op_GetRandomPassword.go | 2 +- .../api_op_GetResourcePolicy.go | 8 +- .../secretsmanager/api_op_GetSecretValue.go | 8 +- .../api_op_ListSecretVersionIds.go | 6 +- service/secretsmanager/api_op_ListSecrets.go | 6 +- .../api_op_PutResourcePolicy.go | 12 +- .../secretsmanager/api_op_PutSecretValue.go | 104 +- .../secretsmanager/api_op_RestoreSecret.go | 4 +- service/secretsmanager/api_op_RotateSecret.go | 41 +- service/secretsmanager/api_op_TagResource.go | 54 +- .../secretsmanager/api_op_UntagResource.go | 10 +- service/secretsmanager/api_op_UpdateSecret.go | 71 +- .../api_op_UpdateSecretVersionStage.go | 6 +- service/secretsmanager/types/errors.go | 4 +- .../securityhub/api_op_BatchImportFindings.go | 18 +- .../securityhub/api_op_BatchUpdateFindings.go | 51 +- .../securityhub/api_op_EnableSecurityHub.go | 12 +- service/securityhub/doc.go | 22 +- service/securityhub/types/enums.go | 118 +- service/securityhub/types/types.go | 408 ++-- .../serverlessapplicationrepository/doc.go | 2 +- .../types/enums.go | 8 +- .../api_op_AcceptPortfolioShare.go | 24 +- .../api_op_AssociatePrincipalWithPortfolio.go | 7 +- .../api_op_AssociateProductWithPortfolio.go | 7 +- ...teServiceActionWithProvisioningArtifact.go | 7 +- ...teServiceActionWithProvisioningArtifact.go | 7 +- ...teServiceActionFromProvisioningArtifact.go | 7 +- service/servicecatalog/api_op_CopyProduct.go | 7 +- .../servicecatalog/api_op_CreateConstraint.go | 19 +- .../servicecatalog/api_op_CreatePortfolio.go | 7 +- .../api_op_CreatePortfolioShare.go | 7 +- .../servicecatalog/api_op_CreateProduct.go | 7 +- .../api_op_CreateProvisionedProductPlan.go | 7 +- .../api_op_CreateProvisioningArtifact.go | 7 +- .../api_op_CreateServiceAction.go | 7 +- .../servicecatalog/api_op_DeleteConstraint.go | 7 +- .../servicecatalog/api_op_DeletePortfolio.go | 7 +- .../api_op_DeletePortfolioShare.go | 7 +- .../servicecatalog/api_op_DeleteProduct.go | 7 +- .../api_op_DeleteProvisionedProductPlan.go | 7 +- .../api_op_DeleteProvisioningArtifact.go | 7 +- .../api_op_DeleteServiceAction.go | 7 +- .../api_op_DescribeConstraint.go | 7 +- .../api_op_DescribeCopyProductStatus.go | 7 +- .../api_op_DescribePortfolio.go | 7 +- .../servicecatalog/api_op_DescribeProduct.go | 7 +- .../api_op_DescribeProductAsAdmin.go | 7 +- .../api_op_DescribeProductView.go | 7 +- .../api_op_DescribeProvisionedProduct.go | 7 +- .../api_op_DescribeProvisionedProductPlan.go | 7 +- .../api_op_DescribeProvisioningArtifact.go | 7 +- .../api_op_DescribeProvisioningParameters.go | 7 +- .../servicecatalog/api_op_DescribeRecord.go | 7 +- .../api_op_DescribeServiceAction.go | 7 +- ...escribeServiceActionExecutionParameters.go | 7 +- ...i_op_DisassociatePrincipalFromPortfolio.go | 7 +- ...api_op_DisassociateProductFromPortfolio.go | 7 +- ...teServiceActionFromProvisioningArtifact.go | 7 +- .../api_op_ExecuteProvisionedProductPlan.go | 7 +- ..._ExecuteProvisionedProductServiceAction.go | 7 +- .../api_op_GetProvisionedProductOutputs.go | 7 +- .../api_op_ListAcceptedPortfolioShares.go | 15 +- .../api_op_ListBudgetsForResource.go | 7 +- .../api_op_ListConstraintsForPortfolio.go | 7 +- .../servicecatalog/api_op_ListLaunchPaths.go | 7 +- .../api_op_ListOrganizationPortfolioAccess.go | 21 +- .../api_op_ListPortfolioAccess.go | 7 +- .../servicecatalog/api_op_ListPortfolios.go | 7 +- .../api_op_ListPortfoliosForProduct.go | 7 +- .../api_op_ListPrincipalsForPortfolio.go | 7 +- .../api_op_ListProvisionedProductPlans.go | 7 +- .../api_op_ListProvisioningArtifacts.go | 7 +- ...stProvisioningArtifactsForServiceAction.go | 7 +- .../api_op_ListRecordHistory.go | 7 +- .../api_op_ListResourcesForTagOption.go | 4 +- .../api_op_ListServiceActions.go | 7 +- ...stServiceActionsForProvisioningArtifact.go | 7 +- ...ListStackInstancesForProvisionedProduct.go | 7 +- .../servicecatalog/api_op_ProvisionProduct.go | 7 +- .../api_op_RejectPortfolioShare.go | 24 +- .../api_op_ScanProvisionedProducts.go | 7 +- .../servicecatalog/api_op_SearchProducts.go | 7 +- .../api_op_SearchProductsAsAdmin.go | 7 +- .../api_op_SearchProvisionedProducts.go | 7 +- .../api_op_TerminateProvisionedProduct.go | 7 +- .../servicecatalog/api_op_UpdateConstraint.go | 7 +- .../servicecatalog/api_op_UpdatePortfolio.go | 7 +- .../servicecatalog/api_op_UpdateProduct.go | 7 +- .../api_op_UpdateProvisionedProduct.go | 7 +- ...i_op_UpdateProvisionedProductProperties.go | 7 +- .../api_op_UpdateProvisioningArtifact.go | 7 +- .../api_op_UpdateServiceAction.go | 7 +- service/servicecatalog/types/enums.go | 76 +- service/servicecatalog/types/types.go | 202 +- .../servicediscovery/api_op_CreateService.go | 38 +- .../api_op_RegisterInstance.go | 173 +- .../servicediscovery/api_op_UpdateService.go | 24 +- service/servicediscovery/types/enums.go | 28 +- service/servicediscovery/types/types.go | 513 +++-- ...api_op_DisassociateServiceQuotaTemplate.go | 6 +- service/servicequotas/types/enums.go | 18 +- service/ses/api_op_CloneReceiptRuleSet.go | 6 +- service/ses/api_op_CreateReceiptRuleSet.go | 6 +- .../ses/api_op_GetIdentityDkimAttributes.go | 10 +- service/ses/api_op_SendBulkTemplatedEmail.go | 34 +- service/ses/api_op_SendEmail.go | 20 +- service/ses/api_op_SendRawEmail.go | 107 +- service/ses/api_op_SendTemplatedEmail.go | 24 +- service/ses/api_op_VerifyDomainDkim.go | 6 +- service/ses/types/enums.go | 32 +- service/ses/types/errors.go | 8 +- service/ses/types/types.go | 141 +- service/sesv2/api_op_GetAccount.go | 18 +- .../api_op_PutAccountSuppressionAttributes.go | 4 +- ...p_PutConfigurationSetSuppressionOptions.go | 11 +- ...p_PutEmailIdentityDkimSigningAttributes.go | 52 +- ...i_op_PutEmailIdentityMailFromAttributes.go | 12 +- service/sesv2/api_op_SendEmail.go | 16 +- service/sesv2/types/enums.go | 98 +- service/sesv2/types/types.go | 279 ++- service/sfn/api_op_CreateActivity.go | 18 +- service/sfn/api_op_CreateStateMachine.go | 20 +- service/sfn/api_op_DescribeActivity.go | 19 +- service/sfn/api_op_DescribeExecution.go | 19 +- service/sfn/api_op_DescribeStateMachine.go | 20 +- service/sfn/api_op_StartExecution.go | 18 +- service/sfn/types/enums.go | 2 +- service/sfn/types/types.go | 77 +- service/shield/api_op_CreateProtection.go | 22 +- service/shield/types/enums.go | 16 +- service/shield/types/types.go | 40 +- service/signer/api_op_StartSigningJob.go | 16 +- service/sms/api_op_CreateReplicationJob.go | 16 +- service/sms/api_op_UpdateReplicationJob.go | 16 +- service/sms/doc.go | 8 +- service/sms/types/enums.go | 16 +- service/sms/types/types.go | 48 +- service/snowball/api_op_CreateCluster.go | 41 +- service/snowball/api_op_CreateJob.go | 14 +- service/snowball/types/enums.go | 80 +- service/snowball/types/types.go | 26 +- .../api_op_CheckIfPhoneNumberIsOptedOut.go | 8 +- .../sns/api_op_CreatePlatformApplication.go | 31 +- service/sns/api_op_CreateTopic.go | 30 +- service/sns/api_op_GetEndpointAttributes.go | 6 +- ...api_op_GetPlatformApplicationAttributes.go | 20 +- .../sns/api_op_GetSubscriptionAttributes.go | 52 +- service/sns/api_op_GetTopicAttributes.go | 32 +- service/sns/api_op_Publish.go | 71 +- service/sns/api_op_SetEndpointAttributes.go | 8 +- ...api_op_SetPlatformApplicationAttributes.go | 26 +- service/sns/api_op_SetSMSAttributes.go | 34 +- .../sns/api_op_SetSubscriptionAttributes.go | 28 +- service/sns/api_op_SetTopicAttributes.go | 30 +- service/sns/api_op_Subscribe.go | 103 +- service/sns/api_op_TagResource.go | 20 +- service/sqs/api_op_AddPermission.go | 14 +- service/sqs/api_op_ChangeMessageVisibility.go | 38 +- service/sqs/api_op_CreateQueue.go | 186 +- service/sqs/api_op_GetQueueAttributes.go | 95 +- service/sqs/api_op_ReceiveMessage.go | 129 +- service/sqs/api_op_RemovePermission.go | 12 +- service/sqs/api_op_SendMessage.go | 58 +- service/sqs/api_op_SetQueueAttributes.go | 141 +- service/sqs/api_op_TagQueue.go | 12 +- service/sqs/doc.go | 34 +- service/sqs/types/types.go | 85 +- service/ssm/api_op_CreateActivation.go | 8 +- service/ssm/api_op_CreateDocument.go | 20 +- service/ssm/api_op_CreateMaintenanceWindow.go | 6 +- service/ssm/api_op_CreatePatchBaseline.go | 12 +- service/ssm/api_op_DescribeOpsItems.go | 44 +- service/ssm/api_op_DescribePatchGroups.go | 20 +- service/ssm/api_op_GetCommandInvocation.go | 60 +- service/ssm/api_op_LabelParameterVersion.go | 40 +- service/ssm/api_op_PutComplianceItems.go | 50 +- service/ssm/api_op_PutParameter.go | 74 +- ...pi_op_RegisterTaskWithMaintenanceWindow.go | 6 +- .../ssm/api_op_StartAutomationExecution.go | 8 +- .../api_op_UpdateMaintenanceWindowTarget.go | 21 +- .../ssm/api_op_UpdateMaintenanceWindowTask.go | 48 +- service/ssm/api_op_UpdatePatchBaseline.go | 8 +- service/ssm/api_op_UpdateServiceSetting.go | 16 +- service/ssm/types/enums.go | 276 +-- service/ssm/types/types.go | 345 ++- service/ssoadmin/types/enums.go | 16 +- .../storagegateway/api_op_ActivateGateway.go | 17 +- .../api_op_AddTagsToResource.go | 14 +- .../api_op_AddWorkingStorage.go | 2 +- .../api_op_CreateNFSFileShare.go | 11 +- .../storagegateway/api_op_CreateSnapshot.go | 4 +- .../api_op_CreateStorediSCSIVolume.go | 10 +- .../api_op_DeleteBandwidthRateLimit.go | 2 +- .../api_op_DeleteChapCredentials.go | 5 +- .../api_op_DescribeChapCredentials.go | 8 +- .../api_op_DescribeMaintenanceStartTime.go | 10 +- .../api_op_DescribeSMBSettings.go | 36 +- .../api_op_DescribeStorediSCSIVolumes.go | 72 +- service/storagegateway/api_op_JoinDomain.go | 30 +- service/storagegateway/api_op_ListGateways.go | 4 +- .../storagegateway/api_op_ListLocalDisks.go | 2 +- service/storagegateway/api_op_ListTapes.go | 12 +- service/storagegateway/api_op_ListVolumes.go | 10 +- .../api_op_UpdateBandwidthRateLimit.go | 4 +- .../api_op_UpdateChapCredentials.go | 8 +- .../api_op_UpdateMaintenanceStartTime.go | 8 +- .../api_op_UpdateNFSFileShare.go | 27 +- .../api_op_UpdateSnapshotSchedule.go | 8 +- service/storagegateway/doc.go | 12 +- service/storagegateway/types/enums.go | 18 +- service/storagegateway/types/types.go | 9 +- service/sts/api_op_AssumeRole.go | 10 +- service/sts/api_op_AssumeRoleWithSAML.go | 8 +- .../sts/api_op_AssumeRoleWithWebIdentity.go | 10 +- .../sts/api_op_DecodeAuthorizationMessage.go | 18 +- service/sts/api_op_GetFederationToken.go | 6 +- service/sts/api_op_GetSessionToken.go | 8 +- service/support/api_op_AddAttachmentsToSet.go | 12 +- .../support/api_op_AddCommunicationToCase.go | 17 +- service/support/api_op_CreateCase.go | 13 +- service/support/api_op_DescribeAttachment.go | 10 +- service/support/api_op_DescribeCases.go | 24 +- .../support/api_op_DescribeCommunications.go | 12 +- service/support/api_op_DescribeServices.go | 6 +- .../support/api_op_DescribeSeverityLevels.go | 12 +- ...cribeTrustedAdvisorCheckRefreshStatuses.go | 12 +- ...pi_op_DescribeTrustedAdvisorCheckResult.go | 36 +- ...op_DescribeTrustedAdvisorCheckSummaries.go | 12 +- .../api_op_DescribeTrustedAdvisorChecks.go | 4 +- .../api_op_RefreshTrustedAdvisorCheck.go | 4 +- service/support/api_op_ResolveCase.go | 8 +- service/support/doc.go | 47 +- service/support/types/types.go | 121 +- .../api_op_CountClosedWorkflowExecutions.go | 20 +- .../swf/api_op_CountOpenWorkflowExecutions.go | 35 +- .../swf/api_op_CountPendingActivityTasks.go | 25 +- .../swf/api_op_CountPendingDecisionTasks.go | 25 +- service/swf/api_op_DeprecateActivityType.go | 30 +- service/swf/api_op_DeprecateDomain.go | 10 +- service/swf/api_op_DeprecateWorkflowType.go | 24 +- service/swf/api_op_DescribeActivityType.go | 28 +- service/swf/api_op_DescribeDomain.go | 22 +- .../swf/api_op_DescribeWorkflowExecution.go | 22 +- service/swf/api_op_DescribeWorkflowType.go | 35 +- .../swf/api_op_GetWorkflowExecutionHistory.go | 21 +- service/swf/api_op_ListActivityTypes.go | 22 +- .../api_op_ListClosedWorkflowExecutions.go | 35 +- service/swf/api_op_ListDomains.go | 26 +- .../swf/api_op_ListOpenWorkflowExecutions.go | 35 +- service/swf/api_op_ListWorkflowTypes.go | 22 +- service/swf/api_op_PollForActivityTask.go | 8 +- service/swf/api_op_PollForDecisionTask.go | 16 +- .../swf/api_op_RecordActivityTaskHeartbeat.go | 22 +- service/swf/api_op_RegisterActivityType.go | 34 +- service/swf/api_op_RegisterDomain.go | 20 +- service/swf/api_op_RegisterWorkflowType.go | 46 +- .../api_op_RequestCancelWorkflowExecution.go | 22 +- .../swf/api_op_RespondActivityTaskCanceled.go | 22 +- .../api_op_RespondActivityTaskCompleted.go | 22 +- .../swf/api_op_RespondActivityTaskFailed.go | 22 +- service/swf/api_op_SignalWorkflowExecution.go | 22 +- service/swf/api_op_StartWorkflowExecution.go | 72 +- .../swf/api_op_TerminateWorkflowExecution.go | 47 +- service/swf/api_op_UndeprecateActivityType.go | 30 +- service/swf/api_op_UndeprecateDomain.go | 22 +- service/swf/api_op_UndeprecateWorkflowType.go | 24 +- service/swf/types/enums.go | 154 +- service/swf/types/types.go | 755 ++++--- service/synthetics/api_op_CreateCanary.go | 17 +- service/synthetics/api_op_DeleteCanary.go | 33 +- service/synthetics/api_op_UpdateCanary.go | 17 +- service/synthetics/types/enums.go | 6 +- service/textract/api_op_AnalyzeDocument.go | 16 +- .../textract/api_op_GetDocumentAnalysis.go | 8 +- service/textract/types/enums.go | 36 +- service/textract/types/types.go | 63 +- .../api_op_DescribeEndpoints.go | 10 +- service/timestreamquery/api_op_Query.go | 6 +- service/timestreamquery/types/enums.go | 22 +- .../api_op_DescribeEndpoints.go | 10 +- .../timestreamwrite/api_op_UpdateDatabase.go | 10 +- service/timestreamwrite/types/errors.go | 8 +- service/timestreamwrite/types/types.go | 18 +- .../api_op_StartMedicalTranscriptionJob.go | 30 +- .../api_op_StartTranscriptionJob.go | 16 +- service/transcribe/types/enums.go | 100 +- service/transcribe/types/types.go | 78 +- service/transfer/api_op_CreateServer.go | 28 +- .../transfer/api_op_TestIdentityProvider.go | 8 +- service/transfer/api_op_UpdateServer.go | 28 +- service/transfer/types/enums.go | 22 +- service/transfer/types/types.go | 6 +- .../api_op_StartTextTranslationJob.go | 29 +- service/translate/types/enums.go | 14 +- service/translate/types/types.go | 18 +- service/waf/api_op_CreateByteMatchSet.go | 10 +- service/waf/api_op_CreateGeoMatchSet.go | 10 +- service/waf/api_op_CreateIPSet.go | 22 +- service/waf/api_op_CreateRateBasedRule.go | 63 +- service/waf/api_op_CreateRegexMatchSet.go | 22 +- service/waf/api_op_CreateRegexPatternSet.go | 14 +- service/waf/api_op_CreateRule.go | 50 +- service/waf/api_op_CreateRuleGroup.go | 6 +- service/waf/api_op_CreateSizeConstraintSet.go | 24 +- .../waf/api_op_CreateSqlInjectionMatchSet.go | 14 +- service/waf/api_op_CreateWebACL.go | 18 +- .../waf/api_op_CreateWebACLMigrationStack.go | 18 +- service/waf/api_op_CreateXssMatchSet.go | 12 +- service/waf/api_op_DeleteByteMatchSet.go | 6 +- service/waf/api_op_DeleteGeoMatchSet.go | 8 +- service/waf/api_op_DeleteIPSet.go | 10 +- service/waf/api_op_DeleteRateBasedRule.go | 10 +- service/waf/api_op_DeleteRegexMatchSet.go | 8 +- service/waf/api_op_DeleteRule.go | 12 +- service/waf/api_op_DeleteRuleGroup.go | 8 +- service/waf/api_op_DeleteSizeConstraintSet.go | 14 +- .../waf/api_op_DeleteSqlInjectionMatchSet.go | 10 +- service/waf/api_op_DeleteWebACL.go | 13 +- service/waf/api_op_DeleteXssMatchSet.go | 6 +- service/waf/api_op_GetByteMatchSet.go | 8 +- service/waf/api_op_GetChangeTokenStatus.go | 10 +- service/waf/api_op_GetIPSet.go | 4 +- service/waf/api_op_GetRule.go | 8 +- service/waf/api_op_GetSampledRequests.go | 9 +- service/waf/api_op_GetSizeConstraintSet.go | 10 +- service/waf/api_op_GetSqlInjectionMatchSet.go | 10 +- service/waf/api_op_GetWebACL.go | 13 +- service/waf/api_op_GetXssMatchSet.go | 6 +- service/waf/api_op_PutLoggingConfiguration.go | 6 +- service/waf/api_op_PutPermissionPolicy.go | 37 +- service/waf/api_op_UpdateByteMatchSet.go | 65 +- service/waf/api_op_UpdateGeoMatchSet.go | 30 +- service/waf/api_op_UpdateIPSet.go | 56 +- service/waf/api_op_UpdateRateBasedRule.go | 28 +- service/waf/api_op_UpdateRegexMatchSet.go | 53 +- service/waf/api_op_UpdateRegexPatternSet.go | 43 +- service/waf/api_op_UpdateRule.go | 52 +- service/waf/api_op_UpdateRuleGroup.go | 24 +- service/waf/api_op_UpdateSizeConstraintSet.go | 72 +- .../waf/api_op_UpdateSqlInjectionMatchSet.go | 30 +- service/waf/api_op_UpdateWebACL.go | 98 +- service/waf/api_op_UpdateXssMatchSet.go | 46 +- service/waf/types/enums.go | 114 +- service/waf/types/errors.go | 159 +- service/waf/types/types.go | 664 +++--- service/wafregional/api_op_AssociateWebACL.go | 6 +- .../wafregional/api_op_CreateByteMatchSet.go | 10 +- .../wafregional/api_op_CreateGeoMatchSet.go | 10 +- service/wafregional/api_op_CreateIPSet.go | 22 +- .../wafregional/api_op_CreateRateBasedRule.go | 63 +- .../wafregional/api_op_CreateRegexMatchSet.go | 22 +- .../api_op_CreateRegexPatternSet.go | 14 +- service/wafregional/api_op_CreateRule.go | 50 +- service/wafregional/api_op_CreateRuleGroup.go | 6 +- .../api_op_CreateSizeConstraintSet.go | 24 +- .../api_op_CreateSqlInjectionMatchSet.go | 14 +- service/wafregional/api_op_CreateWebACL.go | 18 +- .../api_op_CreateWebACLMigrationStack.go | 18 +- .../wafregional/api_op_CreateXssMatchSet.go | 12 +- .../wafregional/api_op_DeleteByteMatchSet.go | 6 +- .../wafregional/api_op_DeleteGeoMatchSet.go | 8 +- service/wafregional/api_op_DeleteIPSet.go | 10 +- .../wafregional/api_op_DeleteRateBasedRule.go | 10 +- .../wafregional/api_op_DeleteRegexMatchSet.go | 8 +- service/wafregional/api_op_DeleteRule.go | 12 +- service/wafregional/api_op_DeleteRuleGroup.go | 8 +- .../api_op_DeleteSizeConstraintSet.go | 14 +- .../api_op_DeleteSqlInjectionMatchSet.go | 10 +- service/wafregional/api_op_DeleteWebACL.go | 13 +- .../wafregional/api_op_DeleteXssMatchSet.go | 6 +- .../wafregional/api_op_DisassociateWebACL.go | 6 +- service/wafregional/api_op_GetByteMatchSet.go | 8 +- .../api_op_GetChangeTokenStatus.go | 10 +- service/wafregional/api_op_GetIPSet.go | 4 +- service/wafregional/api_op_GetRule.go | 8 +- .../wafregional/api_op_GetSampledRequests.go | 9 +- .../api_op_GetSizeConstraintSet.go | 10 +- .../api_op_GetSqlInjectionMatchSet.go | 10 +- service/wafregional/api_op_GetWebACL.go | 13 +- .../api_op_GetWebACLForResource.go | 6 +- service/wafregional/api_op_GetXssMatchSet.go | 6 +- .../api_op_PutLoggingConfiguration.go | 6 +- .../wafregional/api_op_PutPermissionPolicy.go | 37 +- .../wafregional/api_op_UpdateByteMatchSet.go | 65 +- .../wafregional/api_op_UpdateGeoMatchSet.go | 30 +- service/wafregional/api_op_UpdateIPSet.go | 56 +- .../wafregional/api_op_UpdateRateBasedRule.go | 28 +- .../wafregional/api_op_UpdateRegexMatchSet.go | 53 +- .../api_op_UpdateRegexPatternSet.go | 43 +- service/wafregional/api_op_UpdateRule.go | 52 +- service/wafregional/api_op_UpdateRuleGroup.go | 24 +- .../api_op_UpdateSizeConstraintSet.go | 72 +- .../api_op_UpdateSqlInjectionMatchSet.go | 30 +- service/wafregional/api_op_UpdateWebACL.go | 98 +- .../wafregional/api_op_UpdateXssMatchSet.go | 46 +- service/wafregional/types/enums.go | 118 +- service/wafregional/types/errors.go | 159 +- service/wafregional/types/types.go | 664 +++--- service/wafv2/api_op_AssociateWebACL.go | 10 +- service/wafv2/api_op_CheckCapacity.go | 6 +- service/wafv2/api_op_CreateIPSet.go | 25 +- service/wafv2/api_op_CreateRegexPatternSet.go | 6 +- service/wafv2/api_op_CreateRuleGroup.go | 6 +- service/wafv2/api_op_CreateWebACL.go | 6 +- service/wafv2/api_op_DeleteIPSet.go | 6 +- service/wafv2/api_op_DeleteRegexPatternSet.go | 6 +- service/wafv2/api_op_DeleteRuleGroup.go | 6 +- service/wafv2/api_op_DeleteWebACL.go | 6 +- .../wafv2/api_op_DescribeManagedRuleGroup.go | 6 +- service/wafv2/api_op_DisassociateWebACL.go | 10 +- service/wafv2/api_op_GetIPSet.go | 6 +- ...api_op_GetRateBasedStatementManagedKeys.go | 6 +- service/wafv2/api_op_GetRegexPatternSet.go | 6 +- service/wafv2/api_op_GetRuleGroup.go | 6 +- service/wafv2/api_op_GetSampledRequests.go | 6 +- service/wafv2/api_op_GetWebACL.go | 6 +- .../api_op_ListAvailableManagedRuleGroups.go | 6 +- service/wafv2/api_op_ListIPSets.go | 6 +- .../wafv2/api_op_ListLoggingConfigurations.go | 6 +- service/wafv2/api_op_ListRegexPatternSets.go | 6 +- service/wafv2/api_op_ListRuleGroups.go | 6 +- service/wafv2/api_op_ListWebACLs.go | 6 +- .../wafv2/api_op_PutLoggingConfiguration.go | 14 +- service/wafv2/api_op_PutPermissionPolicy.go | 21 +- service/wafv2/api_op_UpdateIPSet.go | 25 +- service/wafv2/api_op_UpdateRegexPatternSet.go | 6 +- service/wafv2/api_op_UpdateRuleGroup.go | 6 +- service/wafv2/api_op_UpdateWebACL.go | 6 +- service/wafv2/doc.go | 47 +- service/wafv2/types/enums.go | 122 +- service/wafv2/types/errors.go | 36 +- service/wafv2/types/types.go | 243 ++- service/workdocs/doc.go | 22 +- service/workdocs/types/enums.go | 98 +- service/worklink/types/enums.go | 32 +- service/workmail/doc.go | 31 +- service/workmail/types/enums.go | 28 +- .../api_op_UpdateConnectionAliasPermission.go | 8 +- .../api_op_UpdateWorkspaceImagePermission.go | 12 +- service/workspaces/types/enums.go | 76 +- service/workspaces/types/types.go | 4 +- service/xray/api_op_CreateGroup.go | 26 +- service/xray/api_op_CreateSamplingRule.go | 23 +- service/xray/api_op_PutEncryptionConfig.go | 14 +- service/xray/api_op_PutTraceSegments.go | 52 +- service/xray/api_op_TagResource.go | 22 +- service/xray/api_op_UpdateGroup.go | 8 +- service/xray/types/types.go | 40 +- 2067 files changed, 43131 insertions(+), 44243 deletions(-) diff --git a/internal/protocoltest/awsrestjson/api_op_GreetingWithErrors.go b/internal/protocoltest/awsrestjson/api_op_GreetingWithErrors.go index 03b26ca1304..6d5ff6eded7 100644 --- a/internal/protocoltest/awsrestjson/api_op_GreetingWithErrors.go +++ b/internal/protocoltest/awsrestjson/api_op_GreetingWithErrors.go @@ -11,19 +11,19 @@ import ( // This operation has four possible return values: // -// * A successful response in -// the form of GreetingWithErrorsOutput +// * A successful response in the +// form of GreetingWithErrorsOutput // -// * An InvalidGreeting error. +// * An InvalidGreeting error. // -// * A -// BadRequest error. +// * A BadRequest +// error. // -// * A FooError. +// * A FooError. // -// Implementations must be able to -// successfully take a response and properly (de)serialize successful and error -// responses based on the the presence of the +// Implementations must be able to successfully take a +// response and properly (de)serialize successful and error responses based on the +// the presence of the func (c *Client) GreetingWithErrors(ctx context.Context, params *GreetingWithErrorsInput, optFns ...func(*Options)) (*GreetingWithErrorsOutput, error) { if params == nil { params = &GreetingWithErrorsInput{} diff --git a/internal/protocoltest/awsrestjson/api_op_JsonLists.go b/internal/protocoltest/awsrestjson/api_op_JsonLists.go index f5add02ac1c..a132d028e59 100644 --- a/internal/protocoltest/awsrestjson/api_op_JsonLists.go +++ b/internal/protocoltest/awsrestjson/api_op_JsonLists.go @@ -14,14 +14,14 @@ import ( // This test case serializes JSON lists for the following cases for both input and // output: // -// * Normal JSON lists. +// * Normal JSON lists. // -// * Normal JSON sets. +// * Normal JSON sets. // -// * JSON lists of -// lists. +// * JSON lists of lists. // -// * Lists of structures. +// * +// Lists of structures. func (c *Client) JsonLists(ctx context.Context, params *JsonListsInput, optFns ...func(*Options)) (*JsonListsOutput, error) { if params == nil { params = &JsonListsInput{} diff --git a/internal/protocoltest/ec2query/api_op_GreetingWithErrors.go b/internal/protocoltest/ec2query/api_op_GreetingWithErrors.go index 1f97ea47568..52887b06a9b 100644 --- a/internal/protocoltest/ec2query/api_op_GreetingWithErrors.go +++ b/internal/protocoltest/ec2query/api_op_GreetingWithErrors.go @@ -11,13 +11,13 @@ import ( // This operation has three possible return values: // -// * A successful response in -// the form of GreetingWithErrorsOutput +// * A successful response in the +// form of GreetingWithErrorsOutput // -// * An InvalidGreeting error. +// * An InvalidGreeting error. // -// * A -// BadRequest error. +// * A BadRequest +// error. func (c *Client) GreetingWithErrors(ctx context.Context, params *GreetingWithErrorsInput, optFns ...func(*Options)) (*GreetingWithErrorsOutput, error) { if params == nil { params = &GreetingWithErrorsInput{} diff --git a/internal/protocoltest/ec2query/api_op_QueryTimestamps.go b/internal/protocoltest/ec2query/api_op_QueryTimestamps.go index 44b8baf02bc..3541f302e0b 100644 --- a/internal/protocoltest/ec2query/api_op_QueryTimestamps.go +++ b/internal/protocoltest/ec2query/api_op_QueryTimestamps.go @@ -12,14 +12,14 @@ import ( // This test serializes timestamps. // -// * Timestamps are serialized as RFC 3339 +// * Timestamps are serialized as RFC 3339 // date-time values by default. // -// * A timestampFormat trait on a member changes -// the format. +// * A timestampFormat trait on a member changes the +// format. // -// * A timestampFormat trait on the shape targeted by the member -// changes the format. +// * A timestampFormat trait on the shape targeted by the member changes +// the format. func (c *Client) QueryTimestamps(ctx context.Context, params *QueryTimestampsInput, optFns ...func(*Options)) (*QueryTimestampsOutput, error) { if params == nil { params = &QueryTimestampsInput{} diff --git a/internal/protocoltest/ec2query/api_op_XmlLists.go b/internal/protocoltest/ec2query/api_op_XmlLists.go index 6cebad62ce4..0cd3e7942ec 100644 --- a/internal/protocoltest/ec2query/api_op_XmlLists.go +++ b/internal/protocoltest/ec2query/api_op_XmlLists.go @@ -14,21 +14,21 @@ import ( // This test case serializes XML lists for the following cases for both input and // output: // -// * Normal XML lists. +// * Normal XML lists. // -// * Normal XML sets. +// * Normal XML sets. // -// * XML lists of -// lists. +// * XML lists of lists. // -// * XML lists with @xmlName on its members +// * XML +// lists with @xmlName on its members // -// * Flattened XML -// lists. +// * Flattened XML lists. // -// * Flattened XML lists with @xmlName. +// * Flattened XML +// lists with @xmlName. // -// * Lists of structures. +// * Lists of structures. func (c *Client) XmlLists(ctx context.Context, params *XmlListsInput, optFns ...func(*Options)) (*XmlListsOutput, error) { if params == nil { params = &XmlListsInput{} diff --git a/internal/protocoltest/jsonrpc/api_op_GreetingWithErrors.go b/internal/protocoltest/jsonrpc/api_op_GreetingWithErrors.go index dd29c2f30dc..1548dce19ac 100644 --- a/internal/protocoltest/jsonrpc/api_op_GreetingWithErrors.go +++ b/internal/protocoltest/jsonrpc/api_op_GreetingWithErrors.go @@ -12,16 +12,16 @@ import ( // This operation has three possible return values: // -// * A successful response in -// the form of GreetingWithErrorsOutput +// * A successful response in the +// form of GreetingWithErrorsOutput // -// * An InvalidGreeting error. +// * An InvalidGreeting error. // -// * A -// ComplexError error. +// * A ComplexError +// error. // -// Implementations must be able to successfully take a -// response and properly deserialize successful and error responses. +// Implementations must be able to successfully take a response and +// properly deserialize successful and error responses. func (c *Client) GreetingWithErrors(ctx context.Context, params *GreetingWithErrorsInput, optFns ...func(*Options)) (*GreetingWithErrorsOutput, error) { if params == nil { params = &GreetingWithErrorsInput{} diff --git a/internal/protocoltest/jsonrpc10/api_op_GreetingWithErrors.go b/internal/protocoltest/jsonrpc10/api_op_GreetingWithErrors.go index a03bf57ad61..927c69acb5c 100644 --- a/internal/protocoltest/jsonrpc10/api_op_GreetingWithErrors.go +++ b/internal/protocoltest/jsonrpc10/api_op_GreetingWithErrors.go @@ -11,16 +11,16 @@ import ( // This operation has three possible return values: // -// * A successful response in -// the form of GreetingWithErrorsOutput +// * A successful response in the +// form of GreetingWithErrorsOutput // -// * An InvalidGreeting error. +// * An InvalidGreeting error. // -// * A -// ComplexError error. +// * A ComplexError +// error. // -// Implementations must be able to successfully take a -// response and properly deserialize successful and error responses. +// Implementations must be able to successfully take a response and +// properly deserialize successful and error responses. func (c *Client) GreetingWithErrors(ctx context.Context, params *GreetingWithErrorsInput, optFns ...func(*Options)) (*GreetingWithErrorsOutput, error) { if params == nil { params = &GreetingWithErrorsInput{} diff --git a/internal/protocoltest/query/api_op_GreetingWithErrors.go b/internal/protocoltest/query/api_op_GreetingWithErrors.go index 2c8d590f5a6..8297b4f2a6e 100644 --- a/internal/protocoltest/query/api_op_GreetingWithErrors.go +++ b/internal/protocoltest/query/api_op_GreetingWithErrors.go @@ -11,13 +11,13 @@ import ( // This operation has three possible return values: // -// * A successful response in -// the form of GreetingWithErrorsOutput +// * A successful response in the +// form of GreetingWithErrorsOutput // -// * An InvalidGreeting error. +// * An InvalidGreeting error. // -// * A -// BadRequest error. +// * A BadRequest +// error. func (c *Client) GreetingWithErrors(ctx context.Context, params *GreetingWithErrorsInput, optFns ...func(*Options)) (*GreetingWithErrorsOutput, error) { if params == nil { params = &GreetingWithErrorsInput{} diff --git a/internal/protocoltest/query/api_op_QueryTimestamps.go b/internal/protocoltest/query/api_op_QueryTimestamps.go index 93cbf0cc69f..5ffb017aa95 100644 --- a/internal/protocoltest/query/api_op_QueryTimestamps.go +++ b/internal/protocoltest/query/api_op_QueryTimestamps.go @@ -12,14 +12,14 @@ import ( // This test serializes timestamps. // -// * Timestamps are serialized as RFC 3339 +// * Timestamps are serialized as RFC 3339 // date-time values by default. // -// * A timestampFormat trait on a member changes -// the format. +// * A timestampFormat trait on a member changes the +// format. // -// * A timestampFormat trait on the shape targeted by the member -// changes the format. +// * A timestampFormat trait on the shape targeted by the member changes +// the format. func (c *Client) QueryTimestamps(ctx context.Context, params *QueryTimestampsInput, optFns ...func(*Options)) (*QueryTimestampsOutput, error) { if params == nil { params = &QueryTimestampsInput{} diff --git a/internal/protocoltest/query/api_op_XmlLists.go b/internal/protocoltest/query/api_op_XmlLists.go index 752fae59d5e..0da24b6d17c 100644 --- a/internal/protocoltest/query/api_op_XmlLists.go +++ b/internal/protocoltest/query/api_op_XmlLists.go @@ -14,21 +14,21 @@ import ( // This test case serializes XML lists for the following cases for both input and // output: // -// * Normal XML lists. +// * Normal XML lists. // -// * Normal XML sets. +// * Normal XML sets. // -// * XML lists of -// lists. +// * XML lists of lists. // -// * XML lists with @xmlName on its members +// * XML +// lists with @xmlName on its members // -// * Flattened XML -// lists. +// * Flattened XML lists. // -// * Flattened XML lists with @xmlName. +// * Flattened XML +// lists with @xmlName. // -// * Lists of structures. +// * Lists of structures. func (c *Client) XmlLists(ctx context.Context, params *XmlListsInput, optFns ...func(*Options)) (*XmlListsOutput, error) { if params == nil { params = &XmlListsInput{} diff --git a/internal/protocoltest/restxml/api_op_GreetingWithErrors.go b/internal/protocoltest/restxml/api_op_GreetingWithErrors.go index 067711006f9..18970408366 100644 --- a/internal/protocoltest/restxml/api_op_GreetingWithErrors.go +++ b/internal/protocoltest/restxml/api_op_GreetingWithErrors.go @@ -11,17 +11,17 @@ import ( // This operation has three possible return values: // -// * A successful response in -// the form of GreetingWithErrorsOutput +// * A successful response in the +// form of GreetingWithErrorsOutput // -// * An InvalidGreeting error. +// * An InvalidGreeting error. // -// * A -// BadRequest error. +// * A BadRequest +// error. // -// Implementations must be able to successfully take a response -// and properly (de)serialize successful and error responses based on the the -// presence of the +// Implementations must be able to successfully take a response and +// properly (de)serialize successful and error responses based on the the presence +// of the func (c *Client) GreetingWithErrors(ctx context.Context, params *GreetingWithErrorsInput, optFns ...func(*Options)) (*GreetingWithErrorsOutput, error) { if params == nil { params = &GreetingWithErrorsInput{} diff --git a/internal/protocoltest/restxml/api_op_XmlLists.go b/internal/protocoltest/restxml/api_op_XmlLists.go index d994c88572c..337ca37caac 100644 --- a/internal/protocoltest/restxml/api_op_XmlLists.go +++ b/internal/protocoltest/restxml/api_op_XmlLists.go @@ -14,21 +14,21 @@ import ( // This test case serializes XML lists for the following cases for both input and // output: // -// * Normal XML lists. +// * Normal XML lists. // -// * Normal XML sets. +// * Normal XML sets. // -// * XML lists of -// lists. +// * XML lists of lists. // -// * XML lists with @xmlName on its members +// * XML +// lists with @xmlName on its members // -// * Flattened XML -// lists. +// * Flattened XML lists. // -// * Flattened XML lists with @xmlName. +// * Flattened XML +// lists with @xmlName. // -// * Lists of structures. +// * Lists of structures. func (c *Client) XmlLists(ctx context.Context, params *XmlListsInput, optFns ...func(*Options)) (*XmlListsOutput, error) { if params == nil { params = &XmlListsInput{} diff --git a/service/accessanalyzer/types/enums.go b/service/accessanalyzer/types/enums.go index 3f876d74bd3..3940acbdc38 100644 --- a/service/accessanalyzer/types/enums.go +++ b/service/accessanalyzer/types/enums.go @@ -28,10 +28,10 @@ type FindingSourceType string // Enum values for FindingSourceType const ( - FindingSourceTypePolicy FindingSourceType = "POLICY" - FindingSourceTypeBucket_acl FindingSourceType = "BUCKET_ACL" - FindingSourceTypeS3_access_point FindingSourceType = "S3_ACCESS_POINT" - FindingSourceTypeKms_grant FindingSourceType = "KMS_GRANT" + FindingSourceTypePolicy FindingSourceType = "POLICY" + FindingSourceTypeBucketAcl FindingSourceType = "BUCKET_ACL" + FindingSourceTypeS3AccessPoint FindingSourceType = "S3_ACCESS_POINT" + FindingSourceTypeKmsGrant FindingSourceType = "KMS_GRANT" ) // Values returns all known values for FindingSourceType. Note that this can be @@ -106,10 +106,10 @@ type ReasonCode string // Enum values for ReasonCode const ( - ReasonCodeAws_service_access_disabled ReasonCode = "AWS_SERVICE_ACCESS_DISABLED" - ReasonCodeDelegated_administrator_deregistered ReasonCode = "DELEGATED_ADMINISTRATOR_DEREGISTERED" - ReasonCodeOrganization_deleted ReasonCode = "ORGANIZATION_DELETED" - ReasonCodeService_linked_role_creation_failed ReasonCode = "SERVICE_LINKED_ROLE_CREATION_FAILED" + ReasonCodeAwsServiceAccessDisabled ReasonCode = "AWS_SERVICE_ACCESS_DISABLED" + ReasonCodeDelegatedAdministratorDeregistered ReasonCode = "DELEGATED_ADMINISTRATOR_DEREGISTERED" + ReasonCodeOrganizationDeleted ReasonCode = "ORGANIZATION_DELETED" + ReasonCodeServiceLinkedRoleCreationFailed ReasonCode = "SERVICE_LINKED_ROLE_CREATION_FAILED" ) // Values returns all known values for ReasonCode. Note that this can be expanded @@ -172,10 +172,10 @@ type ValidationExceptionReason string // Enum values for ValidationExceptionReason const ( - ValidationExceptionReasonUnknown_operation ValidationExceptionReason = "unknownOperation" - ValidationExceptionReasonCannot_parse ValidationExceptionReason = "cannotParse" - ValidationExceptionReasonField_validation_failed ValidationExceptionReason = "fieldValidationFailed" - ValidationExceptionReasonOther ValidationExceptionReason = "other" + ValidationExceptionReasonUnknownOperation ValidationExceptionReason = "unknownOperation" + ValidationExceptionReasonCannotParse ValidationExceptionReason = "cannotParse" + ValidationExceptionReasonFieldValidationFailed ValidationExceptionReason = "fieldValidationFailed" + ValidationExceptionReasonOther ValidationExceptionReason = "other" ) // Values returns all known values for ValidationExceptionReason. Note that this diff --git a/service/acm/api_op_ImportCertificate.go b/service/acm/api_op_ImportCertificate.go index 2608e7a1b62..5d3c6fa6d47 100644 --- a/service/acm/api_op_ImportCertificate.go +++ b/service/acm/api_op_ImportCertificate.go @@ -25,48 +25,47 @@ import ( // certificates that you import. Note the following guidelines when importing third // party certificates: // -// * You must enter the private key that matches the +// * You must enter the private key that matches the // certificate you are importing. // -// * The private key must be unencrypted. You +// * The private key must be unencrypted. You // cannot import a private key that is protected by a password or a passphrase. // -// -// * If the certificate you are importing is not self-signed, you must enter its +// * +// If the certificate you are importing is not self-signed, you must enter its // certificate chain. // -// * If a certificate chain is included, the issuer must be -// the subject of one of the certificates in the chain. +// * If a certificate chain is included, the issuer must be the +// subject of one of the certificates in the chain. // -// * The certificate, -// private key, and certificate chain must be PEM-encoded. +// * The certificate, private +// key, and certificate chain must be PEM-encoded. // -// * The current time -// must be between the Not Before and Not After certificate fields. +// * The current time must be +// between the Not Before and Not After certificate fields. // -// * The -// Issuer field must not be empty. +// * The Issuer field +// must not be empty. // -// * The OCSP authority URL, if present, must -// not exceed 1000 characters. +// * The OCSP authority URL, if present, must not exceed 1000 +// characters. // -// * To import a new certificate, omit the -// CertificateArn argument. Include this argument only when you want to replace a -// previously imported certifica +// * To import a new certificate, omit the CertificateArn argument. +// Include this argument only when you want to replace a previously imported +// certifica // -// * When you import a certificate by using the -// CLI, you must specify the certificate, the certificate chain, and the private -// key by their file names preceded by file://. For example, you can specify a -// certificate saved in the C:\temp folder as -// file://C:\temp\certificate_to_import.pem. If you are making an HTTP or HTTPS -// Query request, include these arguments as BLOBs. +// * When you import a certificate by using the CLI, you must specify +// the certificate, the certificate chain, and the private key by their file names +// preceded by file://. For example, you can specify a certificate saved in the +// C:\temp folder as file://C:\temp\certificate_to_import.pem. If you are making an +// HTTP or HTTPS Query request, include these arguments as BLOBs. // -// * When you import a -// certificate by using an SDK, you must specify the certificate, the certificate -// chain, and the private key files in the manner required by the programming -// language you're using. +// * When you +// import a certificate by using an SDK, you must specify the certificate, the +// certificate chain, and the private key files in the manner required by the +// programming language you're using. // -// * The cryptographic algorithm of an imported +// * The cryptographic algorithm of an imported // certificate must match the algorithm of the signing CA. For example, if the // signing CA key type is RSA, then the certificate key type must also be // RSA. diff --git a/service/acm/api_op_RequestCertificate.go b/service/acm/api_op_RequestCertificate.go index 135fb325abe..701fcb48bfb 100644 --- a/service/acm/api_op_RequestCertificate.go +++ b/service/acm/api_op_RequestCertificate.go @@ -93,17 +93,17 @@ type RequestCertificateInput struct { // labels separated by periods. No label can be longer than 63 octets. Consider the // following examples: // - // * (63 octets).(63 octets).(63 octets).(61 octets) is - // legal because the total length is 253 octets (63+1+63+1+63+1+61) and no label - // exceeds 63 octets. + // * (63 octets).(63 octets).(63 octets).(61 octets) is legal + // because the total length is 253 octets (63+1+63+1+63+1+61) and no label exceeds + // 63 octets. // - // * (64 octets).(63 octets).(63 octets).(61 octets) is not - // legal because the total length exceeds 253 octets (64+1+63+1+63+1+61) and the - // first label exceeds 63 octets. + // * (64 octets).(63 octets).(63 octets).(61 octets) is not legal + // because the total length exceeds 253 octets (64+1+63+1+63+1+61) and the first + // label exceeds 63 octets. // - // * (63 octets).(63 octets).(63 octets).(62 - // octets) is not legal because the total length of the DNS name - // (63+1+63+1+63+1+62) exceeds 253 octets. + // * (63 octets).(63 octets).(63 octets).(62 octets) is + // not legal because the total length of the DNS name (63+1+63+1+63+1+62) exceeds + // 253 octets. SubjectAlternativeNames []*string // One or more resource tags to associate with the certificate. diff --git a/service/acm/api_op_ResendValidationEmail.go b/service/acm/api_op_ResendValidationEmail.go index 903692699c2..619a11496ab 100644 --- a/service/acm/api_op_ResendValidationEmail.go +++ b/service/acm/api_op_ResendValidationEmail.go @@ -59,20 +59,20 @@ type ResendValidationEmailInput struct { // superdomain of the Domain value. For example, if you requested a certificate for // site.subdomain.example.com and specify a ValidationDomain of // subdomain.example.com, ACM sends email to the domain registrant, technical - // contact, and administrative contact in WHOIS and the following five addresses: - // + // contact, and administrative contact in WHOIS and the following five + // addresses: // // * admin@subdomain.example.com // - // * administrator@subdomain.example.com + // * + // administrator@subdomain.example.com // - // * - // hostmaster@subdomain.example.com + // * hostmaster@subdomain.example.com // - // * postmaster@subdomain.example.com + // * + // postmaster@subdomain.example.com // - // * - // webmaster@subdomain.example.com + // * webmaster@subdomain.example.com // // This member is required. ValidationDomain *string diff --git a/service/acm/types/enums.go b/service/acm/types/enums.go index ba9b0654278..b4cc9632c92 100644 --- a/service/acm/types/enums.go +++ b/service/acm/types/enums.go @@ -6,13 +6,13 @@ type CertificateStatus string // Enum values for CertificateStatus const ( - CertificateStatusPending_validation CertificateStatus = "PENDING_VALIDATION" - CertificateStatusIssued CertificateStatus = "ISSUED" - CertificateStatusInactive CertificateStatus = "INACTIVE" - CertificateStatusExpired CertificateStatus = "EXPIRED" - CertificateStatusValidation_timed_out CertificateStatus = "VALIDATION_TIMED_OUT" - CertificateStatusRevoked CertificateStatus = "REVOKED" - CertificateStatusFailed CertificateStatus = "FAILED" + CertificateStatusPendingValidation CertificateStatus = "PENDING_VALIDATION" + CertificateStatusIssued CertificateStatus = "ISSUED" + CertificateStatusInactive CertificateStatus = "INACTIVE" + CertificateStatusExpired CertificateStatus = "EXPIRED" + CertificateStatusValidationTimedOut CertificateStatus = "VALIDATION_TIMED_OUT" + CertificateStatusRevoked CertificateStatus = "REVOKED" + CertificateStatusFailed CertificateStatus = "FAILED" ) // Values returns all known values for CertificateStatus. Note that this can be @@ -53,9 +53,9 @@ type CertificateType string // Enum values for CertificateType const ( - CertificateTypeImported CertificateType = "IMPORTED" - CertificateTypeAmazon_issued CertificateType = "AMAZON_ISSUED" - CertificateTypePrivate CertificateType = "PRIVATE" + CertificateTypeImported CertificateType = "IMPORTED" + CertificateTypeAmazonIssued CertificateType = "AMAZON_ISSUED" + CertificateTypePrivate CertificateType = "PRIVATE" ) // Values returns all known values for CertificateType. Note that this can be @@ -73,9 +73,9 @@ type DomainStatus string // Enum values for DomainStatus const ( - DomainStatusPending_validation DomainStatus = "PENDING_VALIDATION" - DomainStatusSuccess DomainStatus = "SUCCESS" - DomainStatusFailed DomainStatus = "FAILED" + DomainStatusPendingValidation DomainStatus = "PENDING_VALIDATION" + DomainStatusSuccess DomainStatus = "SUCCESS" + DomainStatusFailed DomainStatus = "FAILED" ) // Values returns all known values for DomainStatus. Note that this can be expanded @@ -93,18 +93,18 @@ type ExtendedKeyUsageName string // Enum values for ExtendedKeyUsageName const ( - ExtendedKeyUsageNameTls_web_server_authentication ExtendedKeyUsageName = "TLS_WEB_SERVER_AUTHENTICATION" - ExtendedKeyUsageNameTls_web_client_authentication ExtendedKeyUsageName = "TLS_WEB_CLIENT_AUTHENTICATION" - ExtendedKeyUsageNameCode_signing ExtendedKeyUsageName = "CODE_SIGNING" - ExtendedKeyUsageNameEmail_protection ExtendedKeyUsageName = "EMAIL_PROTECTION" - ExtendedKeyUsageNameTime_stamping ExtendedKeyUsageName = "TIME_STAMPING" - ExtendedKeyUsageNameOcsp_signing ExtendedKeyUsageName = "OCSP_SIGNING" - ExtendedKeyUsageNameIpsec_end_system ExtendedKeyUsageName = "IPSEC_END_SYSTEM" - ExtendedKeyUsageNameIpsec_tunnel ExtendedKeyUsageName = "IPSEC_TUNNEL" - ExtendedKeyUsageNameIpsec_user ExtendedKeyUsageName = "IPSEC_USER" - ExtendedKeyUsageNameAny ExtendedKeyUsageName = "ANY" - ExtendedKeyUsageNameNone ExtendedKeyUsageName = "NONE" - ExtendedKeyUsageNameCustom ExtendedKeyUsageName = "CUSTOM" + ExtendedKeyUsageNameTlsWebServerAuthentication ExtendedKeyUsageName = "TLS_WEB_SERVER_AUTHENTICATION" + ExtendedKeyUsageNameTlsWebClientAuthentication ExtendedKeyUsageName = "TLS_WEB_CLIENT_AUTHENTICATION" + ExtendedKeyUsageNameCodeSigning ExtendedKeyUsageName = "CODE_SIGNING" + ExtendedKeyUsageNameEmailProtection ExtendedKeyUsageName = "EMAIL_PROTECTION" + ExtendedKeyUsageNameTimeStamping ExtendedKeyUsageName = "TIME_STAMPING" + ExtendedKeyUsageNameOcspSigning ExtendedKeyUsageName = "OCSP_SIGNING" + ExtendedKeyUsageNameIpsecEndSystem ExtendedKeyUsageName = "IPSEC_END_SYSTEM" + ExtendedKeyUsageNameIpsecTunnel ExtendedKeyUsageName = "IPSEC_TUNNEL" + ExtendedKeyUsageNameIpsecUser ExtendedKeyUsageName = "IPSEC_USER" + ExtendedKeyUsageNameAny ExtendedKeyUsageName = "ANY" + ExtendedKeyUsageNameNone ExtendedKeyUsageName = "NONE" + ExtendedKeyUsageNameCustom ExtendedKeyUsageName = "CUSTOM" ) // Values returns all known values for ExtendedKeyUsageName. Note that this can be @@ -131,23 +131,23 @@ type FailureReason string // Enum values for FailureReason const ( - FailureReasonNo_available_contacts FailureReason = "NO_AVAILABLE_CONTACTS" - FailureReasonAdditional_verification_required FailureReason = "ADDITIONAL_VERIFICATION_REQUIRED" - FailureReasonDomain_not_allowed FailureReason = "DOMAIN_NOT_ALLOWED" - FailureReasonInvalid_public_domain FailureReason = "INVALID_PUBLIC_DOMAIN" - FailureReasonDomain_validation_denied FailureReason = "DOMAIN_VALIDATION_DENIED" - FailureReasonCaa_error FailureReason = "CAA_ERROR" - FailureReasonPca_limit_exceeded FailureReason = "PCA_LIMIT_EXCEEDED" - FailureReasonPca_invalid_arn FailureReason = "PCA_INVALID_ARN" - FailureReasonPca_invalid_state FailureReason = "PCA_INVALID_STATE" - FailureReasonPca_request_failed FailureReason = "PCA_REQUEST_FAILED" - FailureReasonPca_name_constraints_validation FailureReason = "PCA_NAME_CONSTRAINTS_VALIDATION" - FailureReasonPca_resource_not_found FailureReason = "PCA_RESOURCE_NOT_FOUND" - FailureReasonPca_invalid_args FailureReason = "PCA_INVALID_ARGS" - FailureReasonPca_invalid_duration FailureReason = "PCA_INVALID_DURATION" - FailureReasonPca_access_denied FailureReason = "PCA_ACCESS_DENIED" - FailureReasonSlr_not_found FailureReason = "SLR_NOT_FOUND" - FailureReasonOther FailureReason = "OTHER" + FailureReasonNoAvailableContacts FailureReason = "NO_AVAILABLE_CONTACTS" + FailureReasonAdditionalVerificationRequired FailureReason = "ADDITIONAL_VERIFICATION_REQUIRED" + FailureReasonDomainNotAllowed FailureReason = "DOMAIN_NOT_ALLOWED" + FailureReasonInvalidPublicDomain FailureReason = "INVALID_PUBLIC_DOMAIN" + FailureReasonDomainValidationDenied FailureReason = "DOMAIN_VALIDATION_DENIED" + FailureReasonCaaError FailureReason = "CAA_ERROR" + FailureReasonPcaLimitExceeded FailureReason = "PCA_LIMIT_EXCEEDED" + FailureReasonPcaInvalidArn FailureReason = "PCA_INVALID_ARN" + FailureReasonPcaInvalidState FailureReason = "PCA_INVALID_STATE" + FailureReasonPcaRequestFailed FailureReason = "PCA_REQUEST_FAILED" + FailureReasonPcaNameConstraintsValidation FailureReason = "PCA_NAME_CONSTRAINTS_VALIDATION" + FailureReasonPcaResourceNotFound FailureReason = "PCA_RESOURCE_NOT_FOUND" + FailureReasonPcaInvalidArgs FailureReason = "PCA_INVALID_ARGS" + FailureReasonPcaInvalidDuration FailureReason = "PCA_INVALID_DURATION" + FailureReasonPcaAccessDenied FailureReason = "PCA_ACCESS_DENIED" + FailureReasonSlrNotFound FailureReason = "SLR_NOT_FOUND" + FailureReasonOther FailureReason = "OTHER" ) // Values returns all known values for FailureReason. Note that this can be @@ -179,12 +179,12 @@ type KeyAlgorithm string // Enum values for KeyAlgorithm const ( - KeyAlgorithmRsa_2048 KeyAlgorithm = "RSA_2048" - KeyAlgorithmRsa_1024 KeyAlgorithm = "RSA_1024" - KeyAlgorithmRsa_4096 KeyAlgorithm = "RSA_4096" - KeyAlgorithmEc_prime256v1 KeyAlgorithm = "EC_prime256v1" - KeyAlgorithmEc_secp384r1 KeyAlgorithm = "EC_secp384r1" - KeyAlgorithmEc_secp521r1 KeyAlgorithm = "EC_secp521r1" + KeyAlgorithmRsa2048 KeyAlgorithm = "RSA_2048" + KeyAlgorithmRsa1024 KeyAlgorithm = "RSA_1024" + KeyAlgorithmRsa4096 KeyAlgorithm = "RSA_4096" + KeyAlgorithmEcPrime256v1 KeyAlgorithm = "EC_prime256v1" + KeyAlgorithmEcSecp384r1 KeyAlgorithm = "EC_secp384r1" + KeyAlgorithmEcSecp521r1 KeyAlgorithm = "EC_secp521r1" ) // Values returns all known values for KeyAlgorithm. Note that this can be expanded @@ -205,17 +205,17 @@ type KeyUsageName string // Enum values for KeyUsageName const ( - KeyUsageNameDigital_signature KeyUsageName = "DIGITAL_SIGNATURE" - KeyUsageNameNon_repudation KeyUsageName = "NON_REPUDIATION" - KeyUsageNameKey_encipherment KeyUsageName = "KEY_ENCIPHERMENT" - KeyUsageNameData_encipherment KeyUsageName = "DATA_ENCIPHERMENT" - KeyUsageNameKey_agreement KeyUsageName = "KEY_AGREEMENT" - KeyUsageNameCertificate_signing KeyUsageName = "CERTIFICATE_SIGNING" - KeyUsageNameCrl_signing KeyUsageName = "CRL_SIGNING" - KeyUsageNameEnchiper_only KeyUsageName = "ENCIPHER_ONLY" - KeyUsageNameDecipher_only KeyUsageName = "DECIPHER_ONLY" - KeyUsageNameAny KeyUsageName = "ANY" - KeyUsageNameCustom KeyUsageName = "CUSTOM" + KeyUsageNameDigitalSignature KeyUsageName = "DIGITAL_SIGNATURE" + KeyUsageNameNonRepudation KeyUsageName = "NON_REPUDIATION" + KeyUsageNameKeyEncipherment KeyUsageName = "KEY_ENCIPHERMENT" + KeyUsageNameDataEncipherment KeyUsageName = "DATA_ENCIPHERMENT" + KeyUsageNameKeyAgreement KeyUsageName = "KEY_AGREEMENT" + KeyUsageNameCertificateSigning KeyUsageName = "CERTIFICATE_SIGNING" + KeyUsageNameCrlSigning KeyUsageName = "CRL_SIGNING" + KeyUsageNameEnchiperOnly KeyUsageName = "ENCIPHER_ONLY" + KeyUsageNameDecipherOnly KeyUsageName = "DECIPHER_ONLY" + KeyUsageNameAny KeyUsageName = "ANY" + KeyUsageNameCustom KeyUsageName = "CUSTOM" ) // Values returns all known values for KeyUsageName. Note that this can be expanded @@ -275,10 +275,10 @@ type RenewalStatus string // Enum values for RenewalStatus const ( - RenewalStatusPending_auto_renewal RenewalStatus = "PENDING_AUTO_RENEWAL" - RenewalStatusPending_validation RenewalStatus = "PENDING_VALIDATION" - RenewalStatusSuccess RenewalStatus = "SUCCESS" - RenewalStatusFailed RenewalStatus = "FAILED" + RenewalStatusPendingAutoRenewal RenewalStatus = "PENDING_AUTO_RENEWAL" + RenewalStatusPendingValidation RenewalStatus = "PENDING_VALIDATION" + RenewalStatusSuccess RenewalStatus = "SUCCESS" + RenewalStatusFailed RenewalStatus = "FAILED" ) // Values returns all known values for RenewalStatus. Note that this can be @@ -297,16 +297,16 @@ type RevocationReason string // Enum values for RevocationReason const ( - RevocationReasonUnspecified RevocationReason = "UNSPECIFIED" - RevocationReasonKey_compromise RevocationReason = "KEY_COMPROMISE" - RevocationReasonCa_compromise RevocationReason = "CA_COMPROMISE" - RevocationReasonAffiliation_changed RevocationReason = "AFFILIATION_CHANGED" - RevocationReasonSuperceded RevocationReason = "SUPERCEDED" - RevocationReasonCessation_of_operation RevocationReason = "CESSATION_OF_OPERATION" - RevocationReasonCertificate_hold RevocationReason = "CERTIFICATE_HOLD" - RevocationReasonRemove_from_crl RevocationReason = "REMOVE_FROM_CRL" - RevocationReasonPrivilege_withdrawn RevocationReason = "PRIVILEGE_WITHDRAWN" - RevocationReasonA_a_compromise RevocationReason = "A_A_COMPROMISE" + RevocationReasonUnspecified RevocationReason = "UNSPECIFIED" + RevocationReasonKeyCompromise RevocationReason = "KEY_COMPROMISE" + RevocationReasonCaCompromise RevocationReason = "CA_COMPROMISE" + RevocationReasonAffiliationChanged RevocationReason = "AFFILIATION_CHANGED" + RevocationReasonSuperceded RevocationReason = "SUPERCEDED" + RevocationReasonCessationOfOperation RevocationReason = "CESSATION_OF_OPERATION" + RevocationReasonCertificateHold RevocationReason = "CERTIFICATE_HOLD" + RevocationReasonRemoveFromCrl RevocationReason = "REMOVE_FROM_CRL" + RevocationReasonPrivilegeWithdrawn RevocationReason = "PRIVILEGE_WITHDRAWN" + RevocationReasonAACompromise RevocationReason = "A_A_COMPROMISE" ) // Values returns all known values for RevocationReason. Note that this can be diff --git a/service/acm/types/types.go b/service/acm/types/types.go index 46d59d3fae2..4c57daccb30 100644 --- a/service/acm/types/types.go +++ b/service/acm/types/types.go @@ -193,11 +193,11 @@ type DomainValidation struct { // The validation status of the domain name. This can be one of the following // values: // - // * PENDING_VALIDATION + // * PENDING_VALIDATION // - // * SUCCESS + // * SUCCESS // - // * FAILED + // * FAILED ValidationStatus DomainStatus } @@ -217,17 +217,17 @@ type DomainValidationOption struct { // can specify example.com for this value. In that case, ACM sends domain // validation emails to the following five addresses: // - // * admin@example.com + // * admin@example.com // + // * + // administrator@example.com // - // * administrator@example.com + // * hostmaster@example.com // - // * hostmaster@example.com + // * postmaster@example.com // - // * - // postmaster@example.com - // - // * webmaster@example.com + // * + // webmaster@example.com // // This member is required. ValidationDomain *string @@ -242,30 +242,30 @@ type ExtendedKeyUsage struct { Name ExtendedKeyUsageName // An object identifier (OID) for the extension value. OIDs are strings of numbers - // separated by periods. The following OIDs are defined in RFC 3280 and RFC 5280. - // + // separated by periods. The following OIDs are defined in RFC 3280 and RFC + // 5280. // // * 1.3.6.1.5.5.7.3.1 (TLS_WEB_SERVER_AUTHENTICATION) // - // * 1.3.6.1.5.5.7.3.2 + // * 1.3.6.1.5.5.7.3.2 // (TLS_WEB_CLIENT_AUTHENTICATION) // - // * 1.3.6.1.5.5.7.3.3 (CODE_SIGNING) + // * 1.3.6.1.5.5.7.3.3 (CODE_SIGNING) // - // * + // * // 1.3.6.1.5.5.7.3.4 (EMAIL_PROTECTION) // - // * 1.3.6.1.5.5.7.3.8 (TIME_STAMPING) - // - // - // * 1.3.6.1.5.5.7.3.9 (OCSP_SIGNING) + // * 1.3.6.1.5.5.7.3.8 (TIME_STAMPING) // - // * 1.3.6.1.5.5.7.3.5 (IPSEC_END_SYSTEM) + // * + // 1.3.6.1.5.5.7.3.9 (OCSP_SIGNING) // + // * 1.3.6.1.5.5.7.3.5 (IPSEC_END_SYSTEM) // - // * 1.3.6.1.5.5.7.3.6 (IPSEC_TUNNEL) + // * + // 1.3.6.1.5.5.7.3.6 (IPSEC_TUNNEL) // - // * 1.3.6.1.5.5.7.3.7 (IPSEC_USER) + // * 1.3.6.1.5.5.7.3.7 (IPSEC_USER) OID *string } diff --git a/service/acmpca/api_op_CreatePermission.go b/service/acmpca/api_op_CreatePermission.go index f12258e1559..0e10b0a899f 100644 --- a/service/acmpca/api_op_CreatePermission.go +++ b/service/acmpca/api_op_CreatePermission.go @@ -20,19 +20,19 @@ import ( // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html) // action. About Permissions // -// * If the private CA and the certificates it -// issues reside in the same account, you can use CreatePermission to grant -// permissions for ACM to carry out automatic certificate renewals. +// * If the private CA and the certificates it issues +// reside in the same account, you can use CreatePermission to grant permissions +// for ACM to carry out automatic certificate renewals. // -// * For -// automatic certificate renewal to succeed, the ACM service principal needs -// permissions to create, retrieve, and list certificates. +// * For automatic +// certificate renewal to succeed, the ACM service principal needs permissions to +// create, retrieve, and list certificates. // -// * If the private CA -// and the ACM certificates reside in different accounts, then permissions cannot -// be used to enable automatic renewals. Instead, the ACM certificate owner must -// set up a resource-based policy to enable cross-account issuance and renewals. -// For more information, see Using a Resource Based Policy with ACM Private CA. +// * If the private CA and the ACM +// certificates reside in different accounts, then permissions cannot be used to +// enable automatic renewals. Instead, the ACM certificate owner must set up a +// resource-based policy to enable cross-account issuance and renewals. For more +// information, see Using a Resource Based Policy with ACM Private CA. func (c *Client) CreatePermission(ctx context.Context, params *CreatePermissionInput, optFns ...func(*Options)) (*CreatePermissionOutput, error) { if params == nil { params = &CreatePermissionInput{} diff --git a/service/acmpca/api_op_DeletePermission.go b/service/acmpca/api_op_DeletePermission.go index bcfc6a90bcb..adbdc6c9083 100644 --- a/service/acmpca/api_op_DeletePermission.go +++ b/service/acmpca/api_op_DeletePermission.go @@ -20,19 +20,19 @@ import ( // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListPermissions.html) // action. About Permissions // -// * If the private CA and the certificates it -// issues reside in the same account, you can use CreatePermission to grant -// permissions for ACM to carry out automatic certificate renewals. +// * If the private CA and the certificates it issues +// reside in the same account, you can use CreatePermission to grant permissions +// for ACM to carry out automatic certificate renewals. // -// * For -// automatic certificate renewal to succeed, the ACM service principal needs -// permissions to create, retrieve, and list certificates. +// * For automatic +// certificate renewal to succeed, the ACM service principal needs permissions to +// create, retrieve, and list certificates. // -// * If the private CA -// and the ACM certificates reside in different accounts, then permissions cannot -// be used to enable automatic renewals. Instead, the ACM certificate owner must -// set up a resource-based policy to enable cross-account issuance and renewals. -// For more information, see Using a Resource Based Policy with ACM Private CA. +// * If the private CA and the ACM +// certificates reside in different accounts, then permissions cannot be used to +// enable automatic renewals. Instead, the ACM certificate owner must set up a +// resource-based policy to enable cross-account issuance and renewals. For more +// information, see Using a Resource Based Policy with ACM Private CA. func (c *Client) DeletePermission(ctx context.Context, params *DeletePermissionInput, optFns ...func(*Options)) (*DeletePermissionOutput, error) { if params == nil { params = &DeletePermissionInput{} diff --git a/service/acmpca/api_op_DeletePolicy.go b/service/acmpca/api_op_DeletePolicy.go index 8c684108209..65ac3ae1c13 100644 --- a/service/acmpca/api_op_DeletePolicy.go +++ b/service/acmpca/api_op_DeletePolicy.go @@ -22,23 +22,23 @@ import ( // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html). // About Policies // -// * A policy grants access on a private CA to an AWS customer +// * A policy grants access on a private CA to an AWS customer // account, to AWS Organizations, or to an AWS Organizations unit. Policies are // under the control of a CA administrator. For more information, see Using a // Resource Based Policy with ACM Private CA. // -// * A policy permits a user of AWS +// * A policy permits a user of AWS // Certificate Manager (ACM) to issue ACM certificates signed by a CA in another // account. // -// * For ACM to manage automatic renewal of these certificates, the -// ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM -// service to assume the identity of the user, subject to confirmation against the -// ACM Private CA policy. For more information, see Using a Service Linked Role -// with ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). +// * For ACM to manage automatic renewal of these certificates, the ACM +// user must configure a Service Linked Role (SLR). The SLR allows the ACM service +// to assume the identity of the user, subject to confirmation against the ACM +// Private CA policy. For more information, see Using a Service Linked Role with +// ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). // -// * -// Updates made in AWS Resource Manager (RAM) are reflected in policies. For more +// * Updates +// made in AWS Resource Manager (RAM) are reflected in policies. For more // information, see Using AWS Resource Access Manager (RAM) with ACM Private CA. func (c *Client) DeletePolicy(ctx context.Context, params *DeletePolicyInput, optFns ...func(*Options)) (*DeletePolicyOutput, error) { if params == nil { diff --git a/service/acmpca/api_op_DescribeCertificateAuthority.go b/service/acmpca/api_op_DescribeCertificateAuthority.go index 5e80e16d7b0..d429e1c6f7c 100644 --- a/service/acmpca/api_op_DescribeCertificateAuthority.go +++ b/service/acmpca/api_op_DescribeCertificateAuthority.go @@ -16,30 +16,29 @@ import ( // Resource Name). The output contains the status of your CA. This can be any of // the following: // -// * CREATING - ACM Private CA is creating your private -// certificate authority. +// * CREATING - ACM Private CA is creating your private certificate +// authority. // -// * PENDING_CERTIFICATE - The certificate is pending. -// You must use your ACM Private CA-hosted or on-premises root or subordinate CA to -// sign your private CA CSR and then import it into PCA. +// * PENDING_CERTIFICATE - The certificate is pending. You must use +// your ACM Private CA-hosted or on-premises root or subordinate CA to sign your +// private CA CSR and then import it into PCA. // -// * ACTIVE - Your -// private CA is active. +// * ACTIVE - Your private CA is +// active. // -// * DISABLED - Your private CA has been disabled. +// * DISABLED - Your private CA has been disabled. // +// * EXPIRED - Your +// private CA certificate has expired. // -// * EXPIRED - Your private CA certificate has expired. +// * FAILED - Your private CA has failed. Your +// CA can fail because of problems such a network outage or backend AWS failure or +// other errors. A failed CA can never return to the pending state. You must create +// a new CA. // -// * FAILED - Your -// private CA has failed. Your CA can fail because of problems such a network -// outage or backend AWS failure or other errors. A failed CA can never return to -// the pending state. You must create a new CA. -// -// * DELETED - Your private CA is -// within the restoration period, after which it is permanently deleted. The length -// of time remaining in the CA's restoration period is also included in this -// action's output. +// * DELETED - Your private CA is within the restoration period, after +// which it is permanently deleted. The length of time remaining in the CA's +// restoration period is also included in this action's output. func (c *Client) DescribeCertificateAuthority(ctx context.Context, params *DescribeCertificateAuthorityInput, optFns ...func(*Options)) (*DescribeCertificateAuthorityOutput, error) { if params == nil { params = &DescribeCertificateAuthorityInput{} diff --git a/service/acmpca/api_op_GetPolicy.go b/service/acmpca/api_op_GetPolicy.go index 8ab9e5fde3d..369c2c3aa0d 100644 --- a/service/acmpca/api_op_GetPolicy.go +++ b/service/acmpca/api_op_GetPolicy.go @@ -16,25 +16,25 @@ import ( // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html) and // removed with DeletePolicy. About Policies // -// * A policy grants access on a -// private CA to an AWS customer account, to AWS Organizations, or to an AWS -// Organizations unit. Policies are under the control of a CA administrator. For -// more information, see Using a Resource Based Policy with ACM Private CA. +// * A policy grants access on a private +// CA to an AWS customer account, to AWS Organizations, or to an AWS Organizations +// unit. Policies are under the control of a CA administrator. For more +// information, see Using a Resource Based Policy with ACM Private CA. // -// * -// A policy permits a user of AWS Certificate Manager (ACM) to issue ACM -// certificates signed by a CA in another account. +// * A policy +// permits a user of AWS Certificate Manager (ACM) to issue ACM certificates signed +// by a CA in another account. // -// * For ACM to manage -// automatic renewal of these certificates, the ACM user must configure a Service -// Linked Role (SLR). The SLR allows the ACM service to assume the identity of the -// user, subject to confirmation against the ACM Private CA policy. For more -// information, see Using a Service Linked Role with ACM +// * For ACM to manage automatic renewal of these +// certificates, the ACM user must configure a Service Linked Role (SLR). The SLR +// allows the ACM service to assume the identity of the user, subject to +// confirmation against the ACM Private CA policy. For more information, see Using +// a Service Linked Role with ACM // (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). // -// * Updates -// made in AWS Resource Manager (RAM) are reflected in policies. For more -// information, see Using AWS Resource Access Manager (RAM) with ACM Private CA. +// * Updates made +// in AWS Resource Manager (RAM) are reflected in policies. For more information, +// see Using AWS Resource Access Manager (RAM) with ACM Private CA. func (c *Client) GetPolicy(ctx context.Context, params *GetPolicyInput, optFns ...func(*Options)) (*GetPolicyOutput, error) { if params == nil { params = &GetPolicyInput{} diff --git a/service/acmpca/api_op_ImportCertificateAuthorityCertificate.go b/service/acmpca/api_op_ImportCertificateAuthorityCertificate.go index 0c8dd60a48a..6f70c294b3b 100644 --- a/service/acmpca/api_op_ImportCertificateAuthorityCertificate.go +++ b/service/acmpca/api_op_ImportCertificateAuthorityCertificate.go @@ -12,101 +12,99 @@ import ( // Imports a signed private CA certificate into ACM Private CA. This action is used // when you are using a chain of trust whose root is located outside ACM Private -// CA. Before you can call this action, the following preparations must in place: -// +// CA. Before you can call this action, the following preparations must in +// place: // // * In ACM Private CA, call the CreateCertificateAuthority // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action to create the private CA that that you plan to back with the imported // certificate. // -// * Call the GetCertificateAuthorityCsr +// * Call the GetCertificateAuthorityCsr // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificateAuthorityCsr.html) // action to generate a certificate signing request (CSR). // -// * Sign the CSR -// using a root or intermediate CA hosted by either an on-premises PKI hierarchy or -// by a commercial CA. -// -// * Create a certificate chain and copy the signed -// certificate and the certificate chain to your working directory. +// * Sign the CSR using a +// root or intermediate CA hosted by either an on-premises PKI hierarchy or by a +// commercial CA. // -// The following -// requirements apply when you import a CA certificate. +// * Create a certificate chain and copy the signed certificate and +// the certificate chain to your working directory. // -// * You cannot import a -// non-self-signed certificate for use as a root CA. +// The following requirements +// apply when you import a CA certificate. // -// * You cannot import a -// self-signed certificate for use as a subordinate CA. +// * You cannot import a non-self-signed +// certificate for use as a root CA. // -// * Your certificate -// chain must not include the private CA certificate that you are importing. +// * You cannot import a self-signed certificate +// for use as a subordinate CA. // -// * -// Your ACM Private CA-hosted or on-premises CA certificate must be the last -// certificate in your chain. The subordinate certificate, if any, that your root -// CA signed must be next to last. The subordinate certificate signed by the -// preceding subordinate CA must come next, and so on until your chain is built. +// * Your certificate chain must not include the +// private CA certificate that you are importing. // +// * Your ACM Private CA-hosted or +// on-premises CA certificate must be the last certificate in your chain. The +// subordinate certificate, if any, that your root CA signed must be next to last. +// The subordinate certificate signed by the preceding subordinate CA must come +// next, and so on until your chain is built. // // * The chain must be PEM-encoded. // -// * The maximum allowed size of a -// certificate is 32 KB. -// -// * The maximum allowed size of a certificate chain is -// 2 MB. +// * +// The maximum allowed size of a certificate is 32 KB. // -// Enforcement of Critical Constraints ACM Private CA allows the following -// extensions to be marked critical in the imported CA certificate or chain. +// * The maximum allowed size +// of a certificate chain is 2 MB. // -// * -// Basic constraints (must be marked critical) +// Enforcement of Critical Constraints ACM Private +// CA allows the following extensions to be marked critical in the imported CA +// certificate or chain. // -// * Subject alternative names +// * Basic constraints (must be marked critical) // +// * Subject +// alternative names // // * Key usage // -// * Extended key usage +// * Extended key usage // -// * Authority key identifier +// * Authority key +// identifier // -// * -// Subject key identifier +// * Subject key identifier // -// * Issuer alternative name +// * Issuer alternative name // -// * Subject directory -// attributes +// * Subject +// directory attributes // -// * Subject information access +// * Subject information access // -// * Certificate policies +// * Certificate policies // -// * +// * // Policy mappings // -// * Inhibit anyPolicy +// * Inhibit anyPolicy // // ACM Private CA rejects the following // extensions when they are marked critical in an imported CA certificate or // chain. // -// * Name constraints +// * Name constraints // -// * Policy constraints +// * Policy constraints // -// * CRL distribution -// points +// * CRL distribution points // -// * Authority information access +// * +// Authority information access // -// * Freshest CRL +// * Freshest CRL // -// * Any other -// extension +// * Any other extension func (c *Client) ImportCertificateAuthorityCertificate(ctx context.Context, params *ImportCertificateAuthorityCertificateInput, optFns ...func(*Options)) (*ImportCertificateAuthorityCertificateOutput, error) { if params == nil { params = &ImportCertificateAuthorityCertificateInput{} diff --git a/service/acmpca/api_op_IssueCertificate.go b/service/acmpca/api_op_IssueCertificate.go index 23a0672185f..42b1105c4f1 100644 --- a/service/acmpca/api_op_IssueCertificate.go +++ b/service/acmpca/api_op_IssueCertificate.go @@ -94,49 +94,49 @@ type IssueCertificateInput struct { // the limit set by its parents in the CA hierarchy. The following service-owned // TemplateArn values are supported by ACM Private CA: // - // * + // * // arn:aws:acm-pca:::template/CodeSigningCertificate/V1 // - // * + // * // arn:aws:acm-pca:::template/CodeSigningCertificate_CSRPassthrough/V1 // - // * + // * // arn:aws:acm-pca:::template/EndEntityCertificate/V1 // - // * + // * // arn:aws:acm-pca:::template/EndEntityCertificate_CSRPassthrough/V1 // - // * + // * // arn:aws:acm-pca:::template/EndEntityClientAuthCertificate/V1 // - // * + // * // arn:aws:acm-pca:::template/EndEntityClientAuthCertificate_CSRPassthrough/V1 // + // * + // arn:aws:acm-pca:::template/EndEntityServerAuthCertificate/V1 // - // * arn:aws:acm-pca:::template/EndEntityServerAuthCertificate/V1 - // - // * + // * // arn:aws:acm-pca:::template/EndEntityServerAuthCertificate_CSRPassthrough/V1 // + // * + // arn:aws:acm-pca:::template/OCSPSigningCertificate/V1 // - // * arn:aws:acm-pca:::template/OCSPSigningCertificate/V1 - // - // * + // * // arn:aws:acm-pca:::template/OCSPSigningCertificate_CSRPassthrough/V1 // - // * + // * // arn:aws:acm-pca:::template/RootCACertificate/V1 // - // * + // * // arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen0/V1 // - // * + // * // arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen1/V1 // - // * + // * // arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen2/V1 // - // * + // * // arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen3/V1 // // For more diff --git a/service/acmpca/api_op_ListPermissions.go b/service/acmpca/api_op_ListPermissions.go index 666ce6f3579..9b2c3ebc705 100644 --- a/service/acmpca/api_op_ListPermissions.go +++ b/service/acmpca/api_op_ListPermissions.go @@ -20,19 +20,19 @@ import ( // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html) // action. About Permissions // -// * If the private CA and the certificates it -// issues reside in the same account, you can use CreatePermission to grant -// permissions for ACM to carry out automatic certificate renewals. +// * If the private CA and the certificates it issues +// reside in the same account, you can use CreatePermission to grant permissions +// for ACM to carry out automatic certificate renewals. // -// * For -// automatic certificate renewal to succeed, the ACM service principal needs -// permissions to create, retrieve, and list certificates. +// * For automatic +// certificate renewal to succeed, the ACM service principal needs permissions to +// create, retrieve, and list certificates. // -// * If the private CA -// and the ACM certificates reside in different accounts, then permissions cannot -// be used to enable automatic renewals. Instead, the ACM certificate owner must -// set up a resource-based policy to enable cross-account issuance and renewals. -// For more information, see Using a Resource Based Policy with ACM Private CA. +// * If the private CA and the ACM +// certificates reside in different accounts, then permissions cannot be used to +// enable automatic renewals. Instead, the ACM certificate owner must set up a +// resource-based policy to enable cross-account issuance and renewals. For more +// information, see Using a Resource Based Policy with ACM Private CA. func (c *Client) ListPermissions(ctx context.Context, params *ListPermissionsInput, optFns ...func(*Options)) (*ListPermissionsOutput, error) { if params == nil { params = &ListPermissionsInput{} diff --git a/service/acmpca/api_op_PutPolicy.go b/service/acmpca/api_op_PutPolicy.go index 5fe25c9120f..25140188257 100644 --- a/service/acmpca/api_op_PutPolicy.go +++ b/service/acmpca/api_op_PutPolicy.go @@ -19,23 +19,23 @@ import ( // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePolicy.html). // About Policies // -// * A policy grants access on a private CA to an AWS customer +// * A policy grants access on a private CA to an AWS customer // account, to AWS Organizations, or to an AWS Organizations unit. Policies are // under the control of a CA administrator. For more information, see Using a // Resource Based Policy with ACM Private CA. // -// * A policy permits a user of AWS +// * A policy permits a user of AWS // Certificate Manager (ACM) to issue ACM certificates signed by a CA in another // account. // -// * For ACM to manage automatic renewal of these certificates, the -// ACM user must configure a Service Linked Role (SLR). The SLR allows the ACM -// service to assume the identity of the user, subject to confirmation against the -// ACM Private CA policy. For more information, see Using a Service Linked Role -// with ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). +// * For ACM to manage automatic renewal of these certificates, the ACM +// user must configure a Service Linked Role (SLR). The SLR allows the ACM service +// to assume the identity of the user, subject to confirmation against the ACM +// Private CA policy. For more information, see Using a Service Linked Role with +// ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). // -// * -// Updates made in AWS Resource Manager (RAM) are reflected in policies. For more +// * Updates +// made in AWS Resource Manager (RAM) are reflected in policies. For more // information, see Using AWS Resource Access Manager (RAM) with ACM Private CA. func (c *Client) PutPolicy(ctx context.Context, params *PutPolicyInput, optFns ...func(*Options)) (*PutPolicyOutput, error) { if params == nil { diff --git a/service/acmpca/types/enums.go b/service/acmpca/types/enums.go index 7730d39a091..c0250b2bf5c 100644 --- a/service/acmpca/types/enums.go +++ b/service/acmpca/types/enums.go @@ -64,13 +64,13 @@ type CertificateAuthorityStatus string // Enum values for CertificateAuthorityStatus const ( - CertificateAuthorityStatusCreating CertificateAuthorityStatus = "CREATING" - CertificateAuthorityStatusPending_certificate CertificateAuthorityStatus = "PENDING_CERTIFICATE" - CertificateAuthorityStatusActive CertificateAuthorityStatus = "ACTIVE" - CertificateAuthorityStatusDeleted CertificateAuthorityStatus = "DELETED" - CertificateAuthorityStatusDisabled CertificateAuthorityStatus = "DISABLED" - CertificateAuthorityStatusExpired CertificateAuthorityStatus = "EXPIRED" - CertificateAuthorityStatusFailed CertificateAuthorityStatus = "FAILED" + CertificateAuthorityStatusCreating CertificateAuthorityStatus = "CREATING" + CertificateAuthorityStatusPendingCertificate CertificateAuthorityStatus = "PENDING_CERTIFICATE" + CertificateAuthorityStatusActive CertificateAuthorityStatus = "ACTIVE" + CertificateAuthorityStatusDeleted CertificateAuthorityStatus = "DELETED" + CertificateAuthorityStatusDisabled CertificateAuthorityStatus = "DISABLED" + CertificateAuthorityStatusExpired CertificateAuthorityStatus = "EXPIRED" + CertificateAuthorityStatusFailed CertificateAuthorityStatus = "FAILED" ) // Values returns all known values for CertificateAuthorityStatus. Note that this @@ -110,9 +110,9 @@ type FailureReason string // Enum values for FailureReason const ( - FailureReasonRequest_timed_out FailureReason = "REQUEST_TIMED_OUT" - FailureReasonUnsupported_algorithm FailureReason = "UNSUPPORTED_ALGORITHM" - FailureReasonOther FailureReason = "OTHER" + FailureReasonRequestTimedOut FailureReason = "REQUEST_TIMED_OUT" + FailureReasonUnsupportedAlgorithm FailureReason = "UNSUPPORTED_ALGORITHM" + FailureReasonOther FailureReason = "OTHER" ) // Values returns all known values for FailureReason. Note that this can be @@ -130,10 +130,10 @@ type KeyAlgorithm string // Enum values for KeyAlgorithm const ( - KeyAlgorithmRsa_2048 KeyAlgorithm = "RSA_2048" - KeyAlgorithmRsa_4096 KeyAlgorithm = "RSA_4096" - KeyAlgorithmEc_prime256v1 KeyAlgorithm = "EC_prime256v1" - KeyAlgorithmEc_secp384r1 KeyAlgorithm = "EC_secp384r1" + KeyAlgorithmRsa2048 KeyAlgorithm = "RSA_2048" + KeyAlgorithmRsa4096 KeyAlgorithm = "RSA_4096" + KeyAlgorithmEcPrime256v1 KeyAlgorithm = "EC_prime256v1" + KeyAlgorithmEcSecp384r1 KeyAlgorithm = "EC_secp384r1" ) // Values returns all known values for KeyAlgorithm. Note that this can be expanded @@ -152,8 +152,8 @@ type ResourceOwner string // Enum values for ResourceOwner const ( - ResourceOwnerSelf ResourceOwner = "SELF" - ResourceOwnerOther_accounts ResourceOwner = "OTHER_ACCOUNTS" + ResourceOwnerSelf ResourceOwner = "SELF" + ResourceOwnerOtherAccounts ResourceOwner = "OTHER_ACCOUNTS" ) // Values returns all known values for ResourceOwner. Note that this can be @@ -170,14 +170,14 @@ type RevocationReason string // Enum values for RevocationReason const ( - RevocationReasonUnspecified RevocationReason = "UNSPECIFIED" - RevocationReasonKey_compromise RevocationReason = "KEY_COMPROMISE" - RevocationReasonCertificate_authority_compromise RevocationReason = "CERTIFICATE_AUTHORITY_COMPROMISE" - RevocationReasonAffiliation_changed RevocationReason = "AFFILIATION_CHANGED" - RevocationReasonSuperseded RevocationReason = "SUPERSEDED" - RevocationReasonCessation_of_operation RevocationReason = "CESSATION_OF_OPERATION" - RevocationReasonPrivilege_withdrawn RevocationReason = "PRIVILEGE_WITHDRAWN" - RevocationReasonA_a_compromise RevocationReason = "A_A_COMPROMISE" + RevocationReasonUnspecified RevocationReason = "UNSPECIFIED" + RevocationReasonKeyCompromise RevocationReason = "KEY_COMPROMISE" + RevocationReasonCertificateAuthorityCompromise RevocationReason = "CERTIFICATE_AUTHORITY_COMPROMISE" + RevocationReasonAffiliationChanged RevocationReason = "AFFILIATION_CHANGED" + RevocationReasonSuperseded RevocationReason = "SUPERSEDED" + RevocationReasonCessationOfOperation RevocationReason = "CESSATION_OF_OPERATION" + RevocationReasonPrivilegeWithdrawn RevocationReason = "PRIVILEGE_WITHDRAWN" + RevocationReasonAACompromise RevocationReason = "A_A_COMPROMISE" ) // Values returns all known values for RevocationReason. Note that this can be @@ -226,7 +226,7 @@ type ValidityPeriodType string // Enum values for ValidityPeriodType const ( - ValidityPeriodTypeEnd_date ValidityPeriodType = "END_DATE" + ValidityPeriodTypeEndDate ValidityPeriodType = "END_DATE" ValidityPeriodTypeAbsolute ValidityPeriodType = "ABSOLUTE" ValidityPeriodTypeDays ValidityPeriodType = "DAYS" ValidityPeriodTypeMonths ValidityPeriodType = "MONTHS" diff --git a/service/acmpca/types/types.go b/service/acmpca/types/types.go index 9235d472cf1..41fc7676c69 100644 --- a/service/acmpca/types/types.go +++ b/service/acmpca/types/types.go @@ -180,55 +180,54 @@ type CertificateAuthorityConfiguration struct { // time valid certificates are listed in the CRL. Expired certificates are not // included. CRLs contain the following fields: // -// * Version: The current version +// * Version: The current version // number defined in RFC 5280 is V2. The integer value is 0x1. // -// * Signature +// * Signature // Algorithm: The name of the algorithm used to sign the CRL. // -// * Issuer: The -// X.500 distinguished name of your private CA that issued the CRL. +// * Issuer: The X.500 +// distinguished name of your private CA that issued the CRL. // -// * Last -// Update: The issue date and time of this CRL. +// * Last Update: The +// issue date and time of this CRL. // -// * Next Update: The day and -// time by which the next CRL will be issued. +// * Next Update: The day and time by which the +// next CRL will be issued. // -// * Revoked Certificates: List of -// revoked certificates. Each list item contains the following information. +// * Revoked Certificates: List of revoked certificates. +// Each list item contains the following information. // +// * Serial Number: The serial +// number, in hexadecimal format, of the revoked certificate. // -// * Serial Number: The serial number, in hexadecimal format, of the revoked -// certificate. +// * Revocation Date: +// Date and time the certificate was revoked. // -// * Revocation Date: Date and time the certificate was -// revoked. +// * CRL Entry Extensions: Optional +// extensions for the CRL entry. // -// * CRL Entry Extensions: Optional extensions for the CRL -// entry. +// * X509v3 CRL Reason Code: Reason the certificate +// was revoked. // -// * X509v3 CRL Reason Code: Reason the certificate was -// revoked. +// * CRL Extensions: Optional extensions for the CRL. // -// * CRL Extensions: Optional extensions for the CRL. +// * X509v3 +// Authority Key Identifier: Identifies the public key associated with the private +// key used to sign the certificate. // -// * -// X509v3 Authority Key Identifier: Identifies the public key associated with the -// private key used to sign the certificate. +// * X509v3 CRL Number:: Decimal sequence number +// for the CRL. // -// * X509v3 CRL Number:: Decimal -// sequence number for the CRL. +// * Signature Algorithm: Algorithm used by your private CA to sign +// the CRL. // -// * Signature Algorithm: Algorithm used by your -// private CA to sign the CRL. +// * Signature Value: Signature computed over the CRL. // -// * Signature Value: Signature computed over the -// CRL. -// -// Certificate revocation lists created by ACM Private CA are DER-encoded. -// You can use the following OpenSSL command to list a CRL. openssl crl -inform DER -// -text -in crl_path -noout +// Certificate +// revocation lists created by ACM Private CA are DER-encoded. You can use the +// following OpenSSL command to list a CRL. openssl crl -inform DER -text -in +// crl_path -noout type CrlConfiguration struct { // Boolean value that specifies whether certificate revocation lists (CRLs) are @@ -346,29 +345,29 @@ type Validity struct { // if the year field (YY) is greater than or equal to 50, the year is interpreted // as 19YY. If the year field is less than 50, the year is interpreted as 20YY. // + // * + // Sample input value: 491231235959 (UTCTime format) // - // * Sample input value: 491231235959 (UTCTime format) - // - // * Output expiration + // * Output expiration // date/time: 12/31/2049 23:59:59 // // ABSOLUTE: The specific date and time when the // certificate will expire, expressed in seconds since the Unix Epoch. // - // * - // Sample input value: 2524608000 + // * Sample + // input value: 2524608000 // - // * Output expiration date/time: 01/01/2050 + // * Output expiration date/time: 01/01/2050 // 00:00:00 // // DAYS, MONTHS, YEARS: The relative time from the moment of issuance // until the certificate will expire, expressed in days, months, or years. Example // if DAYS, issued on 10/12/2020 at 12:34:54 UTC: // - // * Sample input value: 90 - // + // * Sample input value: 90 // - // * Output expiration date: 01/10/2020 12:34:54 UTC + // * + // Output expiration date: 01/10/2020 12:34:54 UTC // // This member is required. Type ValidityPeriodType diff --git a/service/alexaforbusiness/api_op_StartDeviceSync.go b/service/alexaforbusiness/api_op_StartDeviceSync.go index cb5b69970c6..7894309c965 100644 --- a/service/alexaforbusiness/api_op_StartDeviceSync.go +++ b/service/alexaforbusiness/api_op_StartDeviceSync.go @@ -14,22 +14,22 @@ import ( // Resets a device and its account to the known default settings. This clears all // information and settings set by previous users in the following ways: // -// * +// * // Bluetooth - This unpairs all bluetooth devices paired with your echo device. // +// * +// Volume - This resets the echo device's volume to the default value. // -// * Volume - This resets the echo device's volume to the default value. -// -// * +// * // Notifications - This clears all notifications from your echo device. // -// * -// Lists - This clears all to-do items from your echo device. +// * Lists - +// This clears all to-do items from your echo device. // -// * Settings - -// This internally syncs the room's profile (if the device is assigned to a room), -// contacts, address books, delegation access for account linking, and -// communications (if enabled on the room profile). +// * Settings - This internally +// syncs the room's profile (if the device is assigned to a room), contacts, +// address books, delegation access for account linking, and communications (if +// enabled on the room profile). func (c *Client) StartDeviceSync(ctx context.Context, params *StartDeviceSyncInput, optFns ...func(*Options)) (*StartDeviceSyncOutput, error) { if params == nil { params = &StartDeviceSyncInput{} diff --git a/service/alexaforbusiness/types/enums.go b/service/alexaforbusiness/types/enums.go index ff2af18f09f..eaaa4ec5818 100644 --- a/service/alexaforbusiness/types/enums.go +++ b/service/alexaforbusiness/types/enums.go @@ -6,9 +6,9 @@ type BusinessReportFailureCode string // Enum values for BusinessReportFailureCode const ( - BusinessReportFailureCodeAccess_denied BusinessReportFailureCode = "ACCESS_DENIED" - BusinessReportFailureCodeNo_such_bucket BusinessReportFailureCode = "NO_SUCH_BUCKET" - BusinessReportFailureCodeInternal_failure BusinessReportFailureCode = "INTERNAL_FAILURE" + BusinessReportFailureCodeAccessDenied BusinessReportFailureCode = "ACCESS_DENIED" + BusinessReportFailureCodeNoSuchBucket BusinessReportFailureCode = "NO_SUCH_BUCKET" + BusinessReportFailureCodeInternalFailure BusinessReportFailureCode = "INTERNAL_FAILURE" ) // Values returns all known values for BusinessReportFailureCode. Note that this @@ -26,8 +26,8 @@ type BusinessReportFormat string // Enum values for BusinessReportFormat const ( - BusinessReportFormatCsv BusinessReportFormat = "CSV" - BusinessReportFormatCsv_zip BusinessReportFormat = "CSV_ZIP" + BusinessReportFormatCsv BusinessReportFormat = "CSV" + BusinessReportFormatCsvZip BusinessReportFormat = "CSV_ZIP" ) // Values returns all known values for BusinessReportFormat. Note that this can be @@ -44,9 +44,9 @@ type BusinessReportInterval string // Enum values for BusinessReportInterval const ( - BusinessReportIntervalOne_day BusinessReportInterval = "ONE_DAY" - BusinessReportIntervalOne_week BusinessReportInterval = "ONE_WEEK" - BusinessReportIntervalThirty_days BusinessReportInterval = "THIRTY_DAYS" + BusinessReportIntervalOneDay BusinessReportInterval = "ONE_DAY" + BusinessReportIntervalOneWeek BusinessReportInterval = "ONE_WEEK" + BusinessReportIntervalThirtyDays BusinessReportInterval = "THIRTY_DAYS" ) // Values returns all known values for BusinessReportInterval. Note that this can @@ -104,16 +104,16 @@ type ConferenceProviderType string // Enum values for ConferenceProviderType const ( - ConferenceProviderTypeChime ConferenceProviderType = "CHIME" - ConferenceProviderTypeBluejeans ConferenceProviderType = "BLUEJEANS" - ConferenceProviderTypeFuze ConferenceProviderType = "FUZE" - ConferenceProviderTypeGoogle_hangouts ConferenceProviderType = "GOOGLE_HANGOUTS" - ConferenceProviderTypePolycom ConferenceProviderType = "POLYCOM" - ConferenceProviderTypeRingcentral ConferenceProviderType = "RINGCENTRAL" - ConferenceProviderTypeSkype_for_business ConferenceProviderType = "SKYPE_FOR_BUSINESS" - ConferenceProviderTypeWebex ConferenceProviderType = "WEBEX" - ConferenceProviderTypeZoom ConferenceProviderType = "ZOOM" - ConferenceProviderTypeCustom ConferenceProviderType = "CUSTOM" + ConferenceProviderTypeChime ConferenceProviderType = "CHIME" + ConferenceProviderTypeBluejeans ConferenceProviderType = "BLUEJEANS" + ConferenceProviderTypeFuze ConferenceProviderType = "FUZE" + ConferenceProviderTypeGoogleHangouts ConferenceProviderType = "GOOGLE_HANGOUTS" + ConferenceProviderTypePolycom ConferenceProviderType = "POLYCOM" + ConferenceProviderTypeRingcentral ConferenceProviderType = "RINGCENTRAL" + ConferenceProviderTypeSkypeForBusiness ConferenceProviderType = "SKYPE_FOR_BUSINESS" + ConferenceProviderTypeWebex ConferenceProviderType = "WEBEX" + ConferenceProviderTypeZoom ConferenceProviderType = "ZOOM" + ConferenceProviderTypeCustom ConferenceProviderType = "CUSTOM" ) // Values returns all known values for ConferenceProviderType. Note that this can @@ -156,8 +156,8 @@ type DeviceEventType string // Enum values for DeviceEventType const ( - DeviceEventTypeConnection_status DeviceEventType = "CONNECTION_STATUS" - DeviceEventTypeDevice_status DeviceEventType = "DEVICE_STATUS" + DeviceEventTypeConnectionStatus DeviceEventType = "CONNECTION_STATUS" + DeviceEventTypeDeviceStatus DeviceEventType = "DEVICE_STATUS" ) // Values returns all known values for DeviceEventType. Note that this can be @@ -176,7 +176,7 @@ type DeviceStatus string const ( DeviceStatusReady DeviceStatus = "READY" DeviceStatusPending DeviceStatus = "PENDING" - DeviceStatusWas_offline DeviceStatus = "WAS_OFFLINE" + DeviceStatusWasOffline DeviceStatus = "WAS_OFFLINE" DeviceStatusDeregistered DeviceStatus = "DEREGISTERED" DeviceStatusFailed DeviceStatus = "FAILED" ) @@ -198,23 +198,23 @@ type DeviceStatusDetailCode string // Enum values for DeviceStatusDetailCode const ( - DeviceStatusDetailCodeDevice_software_update_needed DeviceStatusDetailCode = "DEVICE_SOFTWARE_UPDATE_NEEDED" - DeviceStatusDetailCodeDevice_was_offline DeviceStatusDetailCode = "DEVICE_WAS_OFFLINE" - DeviceStatusDetailCodeCredentials_access_failure DeviceStatusDetailCode = "CREDENTIALS_ACCESS_FAILURE" - DeviceStatusDetailCodeTls_version_mismatch DeviceStatusDetailCode = "TLS_VERSION_MISMATCH" - DeviceStatusDetailCodeAssociation_rejection DeviceStatusDetailCode = "ASSOCIATION_REJECTION" - DeviceStatusDetailCodeAuthentication_failure DeviceStatusDetailCode = "AUTHENTICATION_FAILURE" - DeviceStatusDetailCodeDhcp_failure DeviceStatusDetailCode = "DHCP_FAILURE" - DeviceStatusDetailCodeInternet_unavailable DeviceStatusDetailCode = "INTERNET_UNAVAILABLE" - DeviceStatusDetailCodeDns_failure DeviceStatusDetailCode = "DNS_FAILURE" - DeviceStatusDetailCodeUnknown_failure DeviceStatusDetailCode = "UNKNOWN_FAILURE" - DeviceStatusDetailCodeCertificate_issuing_limit_exceeded DeviceStatusDetailCode = "CERTIFICATE_ISSUING_LIMIT_EXCEEDED" - DeviceStatusDetailCodeInvalid_certificate_authority DeviceStatusDetailCode = "INVALID_CERTIFICATE_AUTHORITY" - DeviceStatusDetailCodeNetwork_profile_not_found DeviceStatusDetailCode = "NETWORK_PROFILE_NOT_FOUND" - DeviceStatusDetailCodeInvalid_password_state DeviceStatusDetailCode = "INVALID_PASSWORD_STATE" - DeviceStatusDetailCodePassword_not_found DeviceStatusDetailCode = "PASSWORD_NOT_FOUND" - DeviceStatusDetailCodePassword_manager_access_denied DeviceStatusDetailCode = "PASSWORD_MANAGER_ACCESS_DENIED" - DeviceStatusDetailCodeCertificate_authority_access_denied DeviceStatusDetailCode = "CERTIFICATE_AUTHORITY_ACCESS_DENIED" + DeviceStatusDetailCodeDeviceSoftwareUpdateNeeded DeviceStatusDetailCode = "DEVICE_SOFTWARE_UPDATE_NEEDED" + DeviceStatusDetailCodeDeviceWasOffline DeviceStatusDetailCode = "DEVICE_WAS_OFFLINE" + DeviceStatusDetailCodeCredentialsAccessFailure DeviceStatusDetailCode = "CREDENTIALS_ACCESS_FAILURE" + DeviceStatusDetailCodeTlsVersionMismatch DeviceStatusDetailCode = "TLS_VERSION_MISMATCH" + DeviceStatusDetailCodeAssociationRejection DeviceStatusDetailCode = "ASSOCIATION_REJECTION" + DeviceStatusDetailCodeAuthenticationFailure DeviceStatusDetailCode = "AUTHENTICATION_FAILURE" + DeviceStatusDetailCodeDhcpFailure DeviceStatusDetailCode = "DHCP_FAILURE" + DeviceStatusDetailCodeInternetUnavailable DeviceStatusDetailCode = "INTERNET_UNAVAILABLE" + DeviceStatusDetailCodeDnsFailure DeviceStatusDetailCode = "DNS_FAILURE" + DeviceStatusDetailCodeUnknownFailure DeviceStatusDetailCode = "UNKNOWN_FAILURE" + DeviceStatusDetailCodeCertificateIssuingLimitExceeded DeviceStatusDetailCode = "CERTIFICATE_ISSUING_LIMIT_EXCEEDED" + DeviceStatusDetailCodeInvalidCertificateAuthority DeviceStatusDetailCode = "INVALID_CERTIFICATE_AUTHORITY" + DeviceStatusDetailCodeNetworkProfileNotFound DeviceStatusDetailCode = "NETWORK_PROFILE_NOT_FOUND" + DeviceStatusDetailCodeInvalidPasswordState DeviceStatusDetailCode = "INVALID_PASSWORD_STATE" + DeviceStatusDetailCodePasswordNotFound DeviceStatusDetailCode = "PASSWORD_NOT_FOUND" + DeviceStatusDetailCodePasswordManagerAccessDenied DeviceStatusDetailCode = "PASSWORD_MANAGER_ACCESS_DENIED" + DeviceStatusDetailCodeCertificateAuthorityAccessDenied DeviceStatusDetailCode = "CERTIFICATE_AUTHORITY_ACCESS_DENIED" ) // Values returns all known values for DeviceStatusDetailCode. Note that this can @@ -316,10 +316,10 @@ type EndOfMeetingReminderType string // Enum values for EndOfMeetingReminderType const ( - EndOfMeetingReminderTypeAnnouncement_time_check EndOfMeetingReminderType = "ANNOUNCEMENT_TIME_CHECK" - EndOfMeetingReminderTypeAnnouncement_variable_time_left EndOfMeetingReminderType = "ANNOUNCEMENT_VARIABLE_TIME_LEFT" - EndOfMeetingReminderTypeChime EndOfMeetingReminderType = "CHIME" - EndOfMeetingReminderTypeKnock EndOfMeetingReminderType = "KNOCK" + EndOfMeetingReminderTypeAnnouncementTimeCheck EndOfMeetingReminderType = "ANNOUNCEMENT_TIME_CHECK" + EndOfMeetingReminderTypeAnnouncementVariableTimeLeft EndOfMeetingReminderType = "ANNOUNCEMENT_VARIABLE_TIME_LEFT" + EndOfMeetingReminderTypeChime EndOfMeetingReminderType = "CHIME" + EndOfMeetingReminderTypeKnock EndOfMeetingReminderType = "KNOCK" ) // Values returns all known values for EndOfMeetingReminderType. Note that this can @@ -362,14 +362,14 @@ type Feature string // Enum values for Feature const ( - FeatureBluetooth Feature = "BLUETOOTH" - FeatureVolume Feature = "VOLUME" - FeatureNotifications Feature = "NOTIFICATIONS" - FeatureLists Feature = "LISTS" - FeatureSkills Feature = "SKILLS" - FeatureNetwork_profile Feature = "NETWORK_PROFILE" - FeatureSettings Feature = "SETTINGS" - FeatureAll Feature = "ALL" + FeatureBluetooth Feature = "BLUETOOTH" + FeatureVolume Feature = "VOLUME" + FeatureNotifications Feature = "NOTIFICATIONS" + FeatureLists Feature = "LISTS" + FeatureSkills Feature = "SKILLS" + FeatureNetworkProfile Feature = "NETWORK_PROFILE" + FeatureSettings Feature = "SETTINGS" + FeatureAll Feature = "ALL" ) // Values returns all known values for Feature. Note that this can be expanded in @@ -392,7 +392,7 @@ type Locale string // Enum values for Locale const ( - LocaleEn_us Locale = "en-US" + LocaleEnUs Locale = "en-US" ) // Values returns all known values for Locale. Note that this can be expanded in @@ -408,7 +408,7 @@ type NetworkEapMethod string // Enum values for NetworkEapMethod const ( - NetworkEapMethodEap_tls NetworkEapMethod = "EAP_TLS" + NetworkEapMethodEapTls NetworkEapMethod = "EAP_TLS" ) // Values returns all known values for NetworkEapMethod. Note that this can be @@ -424,11 +424,11 @@ type NetworkSecurityType string // Enum values for NetworkSecurityType const ( - NetworkSecurityTypeOpen NetworkSecurityType = "OPEN" - NetworkSecurityTypeWep NetworkSecurityType = "WEP" - NetworkSecurityTypeWpa_psk NetworkSecurityType = "WPA_PSK" - NetworkSecurityTypeWpa2_psk NetworkSecurityType = "WPA2_PSK" - NetworkSecurityTypeWpa2_enterprise NetworkSecurityType = "WPA2_ENTERPRISE" + NetworkSecurityTypeOpen NetworkSecurityType = "OPEN" + NetworkSecurityTypeWep NetworkSecurityType = "WEP" + NetworkSecurityTypeWpaPsk NetworkSecurityType = "WPA_PSK" + NetworkSecurityTypeWpa2Psk NetworkSecurityType = "WPA2_PSK" + NetworkSecurityTypeWpa2Enterprise NetworkSecurityType = "WPA2_ENTERPRISE" ) // Values returns all known values for NetworkSecurityType. Note that this can be diff --git a/service/alexaforbusiness/types/types.go b/service/alexaforbusiness/types/types.go index 18a2db7c799..d00f739455b 100644 --- a/service/alexaforbusiness/types/types.go +++ b/service/alexaforbusiness/types/types.go @@ -609,14 +609,14 @@ type MeetingRoomConfiguration struct { // The values that indicate whether a pin is always required (YES), never required // (NO), or OPTIONAL. // -// * If YES, Alexa will always ask for a meeting pin. +// * If YES, Alexa will always ask for a meeting pin. // +// * If NO, +// Alexa will never ask for a meeting pin. // -// * If NO, Alexa will never ask for a meeting pin. -// -// * If OPTIONAL, Alexa will -// ask if you have a meeting pin and if the customer responds with yes, it will ask -// for the meeting pin. +// * If OPTIONAL, Alexa will ask if you +// have a meeting pin and if the customer responds with yes, it will ask for the +// meeting pin. type MeetingSetting struct { // The values that indicate whether the pin is always required. diff --git a/service/amplify/types/enums.go b/service/amplify/types/enums.go index 31e5b37195a..4794603724a 100644 --- a/service/amplify/types/enums.go +++ b/service/amplify/types/enums.go @@ -6,14 +6,14 @@ type DomainStatus string // Enum values for DomainStatus const ( - DomainStatusPending_verification DomainStatus = "PENDING_VERIFICATION" - DomainStatusIn_progress DomainStatus = "IN_PROGRESS" - DomainStatusAvailable DomainStatus = "AVAILABLE" - DomainStatusPending_deployment DomainStatus = "PENDING_DEPLOYMENT" - DomainStatusFailed DomainStatus = "FAILED" - DomainStatusCreating DomainStatus = "CREATING" - DomainStatusRequesting_certificate DomainStatus = "REQUESTING_CERTIFICATE" - DomainStatusUpdating DomainStatus = "UPDATING" + DomainStatusPendingVerification DomainStatus = "PENDING_VERIFICATION" + DomainStatusInProgress DomainStatus = "IN_PROGRESS" + DomainStatusAvailable DomainStatus = "AVAILABLE" + DomainStatusPendingDeployment DomainStatus = "PENDING_DEPLOYMENT" + DomainStatusFailed DomainStatus = "FAILED" + DomainStatusCreating DomainStatus = "CREATING" + DomainStatusRequestingCertificate DomainStatus = "REQUESTING_CERTIFICATE" + DomainStatusUpdating DomainStatus = "UPDATING" ) // Values returns all known values for DomainStatus. Note that this can be expanded @@ -64,10 +64,10 @@ type JobType string // Enum values for JobType const ( - JobTypeRelease JobType = "RELEASE" - JobTypeRetry JobType = "RETRY" - JobTypeManual JobType = "MANUAL" - JobTypeWeb_hook JobType = "WEB_HOOK" + JobTypeRelease JobType = "RELEASE" + JobTypeRetry JobType = "RETRY" + JobTypeManual JobType = "MANUAL" + JobTypeWebHook JobType = "WEB_HOOK" ) // Values returns all known values for JobType. Note that this can be expanded in @@ -106,7 +106,7 @@ const ( StageBeta Stage = "BETA" StageDevelopment Stage = "DEVELOPMENT" StageExperimental Stage = "EXPERIMENTAL" - StagePull_request Stage = "PULL_REQUEST" + StagePullRequest Stage = "PULL_REQUEST" ) // Values returns all known values for Stage. Note that this can be expanded in the diff --git a/service/apigateway/api_op_CreateAuthorizer.go b/service/apigateway/api_op_CreateAuthorizer.go index 546436021b0..4e3168e11d0 100644 --- a/service/apigateway/api_op_CreateAuthorizer.go +++ b/service/apigateway/api_op_CreateAuthorizer.go @@ -79,13 +79,13 @@ type CreateAuthorizerInput struct { // The identity source for which authorization is requested. // - // * For a TOKEN or + // * For a TOKEN or // COGNITO_USER_POOLS authorizer, this is required and specifies the request header // mapping expression for the custom header holding the authorization token // submitted by the client. For example, if the token header name is Auth, the // header mapping expression is method.request.header.Auth. // - // * For the REQUEST + // * For the REQUEST // authorizer, this is required when authorization caching is enabled. The value is // a comma-separated string of one or more mapping expressions of the specified // request parameters. For example, if an Auth header, a Name query string @@ -164,13 +164,13 @@ type CreateAuthorizerOutput struct { // The identity source for which authorization is requested. // - // * For a TOKEN or + // * For a TOKEN or // COGNITO_USER_POOLS authorizer, this is required and specifies the request header // mapping expression for the custom header holding the authorization token // submitted by the client. For example, if the token header name is Auth, the // header mapping expression is method.request.header.Auth. // - // * For the REQUEST + // * For the REQUEST // authorizer, this is required when authorization caching is enabled. The value is // a comma-separated string of one or more mapping expressions of the specified // request parameters. For example, if an Auth header, a Name query string diff --git a/service/apigateway/api_op_CreateRestApi.go b/service/apigateway/api_op_CreateRestApi.go index f35d6f0c0c6..269c5ae333a 100644 --- a/service/apigateway/api_op_CreateRestApi.go +++ b/service/apigateway/api_op_CreateRestApi.go @@ -39,11 +39,11 @@ type CreateRestApiInput struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource types.ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi @@ -93,11 +93,11 @@ type CreateRestApiOutput struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource types.ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi diff --git a/service/apigateway/api_op_DeleteGatewayResponse.go b/service/apigateway/api_op_DeleteGatewayResponse.go index 9178abee782..8a18b303c57 100644 --- a/service/apigateway/api_op_DeleteGatewayResponse.go +++ b/service/apigateway/api_op_DeleteGatewayResponse.go @@ -35,52 +35,50 @@ type DeleteGatewayResponseInput struct { // [Required] The response type of the associated GatewayResponse. Valid values // are // - // * ACCESS_DENIED + // * ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * - // AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * AUTHORIZER_CONFIGURATION_ERROR + // * + // AUTHORIZER_CONFIGURATION_ERROR // - // * - // BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * - // DEFAULT_5XX + // * DEFAULT_5XX // - // * EXPIRED_TOKEN + // * EXPIRED_TOKEN // - // * INVALID_SIGNATURE + // * INVALID_SIGNATURE // - // * + // * // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * INVALID_API_KEY + // * INVALID_API_KEY // - // * + // * // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED + // * QUOTA_EXCEEDED // - // * REQUEST_TOO_LARGE + // * REQUEST_TOO_LARGE // + // * + // RESOURCE_NOT_FOUND // - // * RESOURCE_NOT_FOUND + // * THROTTLED // - // * THROTTLED + // * UNAUTHORIZED // - // * UNAUTHORIZED - // - // * - // UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE // // This member is required. ResponseType types.GatewayResponseType diff --git a/service/apigateway/api_op_GetAccount.go b/service/apigateway/api_op_GetAccount.go index ab367cb8c1f..8718ab095c9 100644 --- a/service/apigateway/api_op_GetAccount.go +++ b/service/apigateway/api_op_GetAccount.go @@ -45,12 +45,12 @@ type GetAccountInput struct { // The following exception // may be thrown when the request fails. // -// * UnauthorizedException +// * UnauthorizedException // -// * +// * // NotFoundException // -// * TooManyRequestsException +// * TooManyRequestsException // // For detailed error code // information, including the corresponding HTTP Status Codes, see API Gateway diff --git a/service/apigateway/api_op_GetAuthorizer.go b/service/apigateway/api_op_GetAuthorizer.go index d4e4e7d5f42..3c17b01fa13 100644 --- a/service/apigateway/api_op_GetAuthorizer.go +++ b/service/apigateway/api_op_GetAuthorizer.go @@ -91,13 +91,13 @@ type GetAuthorizerOutput struct { // The identity source for which authorization is requested. // - // * For a TOKEN or + // * For a TOKEN or // COGNITO_USER_POOLS authorizer, this is required and specifies the request header // mapping expression for the custom header holding the authorization token // submitted by the client. For example, if the token header name is Auth, the // header mapping expression is method.request.header.Auth. // - // * For the REQUEST + // * For the REQUEST // authorizer, this is required when authorization caching is enabled. The value is // a comma-separated string of one or more mapping expressions of the specified // request parameters. For example, if an Auth header, a Name query string diff --git a/service/apigateway/api_op_GetGatewayResponse.go b/service/apigateway/api_op_GetGatewayResponse.go index 19f9d071623..a0df4624202 100644 --- a/service/apigateway/api_op_GetGatewayResponse.go +++ b/service/apigateway/api_op_GetGatewayResponse.go @@ -33,52 +33,50 @@ type GetGatewayResponseInput struct { // [Required] The response type of the associated GatewayResponse. Valid values // are // - // * ACCESS_DENIED + // * ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * - // AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * AUTHORIZER_CONFIGURATION_ERROR + // * + // AUTHORIZER_CONFIGURATION_ERROR // - // * - // BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * - // DEFAULT_5XX + // * DEFAULT_5XX // - // * EXPIRED_TOKEN + // * EXPIRED_TOKEN // - // * INVALID_SIGNATURE + // * INVALID_SIGNATURE // - // * + // * // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * INVALID_API_KEY + // * INVALID_API_KEY // - // * + // * // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED - // - // * REQUEST_TOO_LARGE + // * QUOTA_EXCEEDED // + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * UNAUTHORIZED + // * UNAUTHORIZED // - // * - // UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE // // This member is required. ResponseType types.GatewayResponseType @@ -159,52 +157,51 @@ type GetGatewayResponseOutput struct { // The response type of the associated GatewayResponse. Valid values are // - // * + // * // ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * + // * // AUTHORIZER_CONFIGURATION_ERROR // - // * BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * - // BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * DEFAULT_5XX - // - // * EXPIRED_TOKEN + // * DEFAULT_5XX // + // * EXPIRED_TOKEN // // * INVALID_SIGNATURE // - // * INTEGRATION_FAILURE + // * + // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * - // INVALID_API_KEY + // * INVALID_API_KEY // - // * MISSING_AUTHENTICATION_TOKEN + // * + // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED + // * QUOTA_EXCEEDED // - // * - // REQUEST_TOO_LARGE + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * - // UNAUTHORIZED + // * UNAUTHORIZED // - // * UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE ResponseType types.GatewayResponseType // The HTTP status code for this GatewayResponse. diff --git a/service/apigateway/api_op_GetIntegration.go b/service/apigateway/api_op_GetIntegration.go index db17d2cdeb0..3c37e80c7ea 100644 --- a/service/apigateway/api_op_GetIntegration.go +++ b/service/apigateway/api_op_GetIntegration.go @@ -87,10 +87,10 @@ type GetIntegrationOutput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a request payload from a + // * CONVERT_TO_BINARY: Converts a request payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a request payload from a binary blob to a Base64-encoded string. // // If @@ -149,23 +149,23 @@ type GetIntegrationOutput struct { // the content type does not match any of the mapped content types, as specified in // requestTemplates. The valid value is one of the following: // - // * WHEN_NO_MATCH: + // * WHEN_NO_MATCH: // passes the method request body through the integration request to the back end // without transformation when the method request content type does not match any // content type associated with the mapping templates defined in the integration // request. // - // * WHEN_NO_TEMPLATES: passes the method request body through the + // * WHEN_NO_TEMPLATES: passes the method request body through the // integration request to the back end without transformation when no mapping // template is defined in the integration request. If a template is defined when // this option is selected, the method request of an unmapped content-type will be // rejected with an HTTP 415 Unsupported Media Type response. // - // * NEVER: rejects - // the method request with an HTTP 415 Unsupported Media Type response when either - // the method request content type does not match any content type associated with - // the mapping templates defined in the integration request or no mapping template - // is defined in the integration request. + // * NEVER: rejects the + // method request with an HTTP 415 Unsupported Media Type response when either the + // method request content type does not match any content type associated with the + // mapping templates defined in the integration request or no mapping template is + // defined in the integration request. PassthroughBehavior *string // A key-value map specifying request parameters that are passed from the method @@ -192,48 +192,48 @@ type GetIntegrationOutput struct { // Specifies an API method integration type. The valid value is one of the // following: // - // * AWS: for integrating the API method request with an AWS - // service action, including the Lambda function-invoking action. With the Lambda + // * AWS: for integrating the API method request with an AWS service + // action, including the Lambda function-invoking action. With the Lambda // function-invoking action, this is referred to as the Lambda custom integration. // With any other AWS service action, this is known as AWS integration. // - // * + // * // AWS_PROXY: for integrating the API method request with the Lambda // function-invoking action with the client request passed through as-is. This // integration is also referred to as the Lambda proxy integration. // - // * HTTP: - // for integrating the API method request with an HTTP endpoint, including a - // private HTTP endpoint within a VPC. This integration is also referred to as the - // HTTP custom integration. + // * HTTP: for + // integrating the API method request with an HTTP endpoint, including a private + // HTTP endpoint within a VPC. This integration is also referred to as the HTTP + // custom integration. // - // * HTTP_PROXY: for integrating the API method - // request with an HTTP endpoint, including a private HTTP endpoint within a VPC, - // with the client request passed through as-is. This is also referred to as the - // HTTP proxy integration. + // * HTTP_PROXY: for integrating the API method request with + // an HTTP endpoint, including a private HTTP endpoint within a VPC, with the + // client request passed through as-is. This is also referred to as the HTTP proxy + // integration. // - // * MOCK: for integrating the API method request with - // API Gateway as a "loop-back" endpoint without invoking any backend. + // * MOCK: for integrating the API method request with API Gateway as + // a "loop-back" endpoint without invoking any backend. // - // For the - // HTTP and HTTP proxy integrations, each integration can specify a protocol - // (http/https), port and path. Standard 80 and 443 ports are supported as well as - // custom ports above 1024. An HTTP or HTTP proxy integration with a connectionType - // of VPC_LINK is referred to as a private integration and uses a VpcLink to - // connect API Gateway to a network load balancer of a VPC. + // For the HTTP and HTTP + // proxy integrations, each integration can specify a protocol (http/https), port + // and path. Standard 80 and 443 ports are supported as well as custom ports above + // 1024. An HTTP or HTTP proxy integration with a connectionType of VPC_LINK is + // referred to as a private integration and uses a VpcLink to connect API Gateway + // to a network load balancer of a VPC. Type types.IntegrationType // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * - // For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded - // HTTP(S) URL according to the RFC-3986 specification + // * For + // HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) + // URL according to the RFC-3986 specification // (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), for either standard // integration, where connectionType is not VPC_LINK, or private integration, where // connectionType is VPC_LINK. For a private HTTP integration, the URI is not used // for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form + // * For AWS or AWS_PROXY integrations, the URI is of the form // arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the // name of the integrated AWS service (e.g., s3); and {subdomain} is a designated diff --git a/service/apigateway/api_op_GetIntegrationResponse.go b/service/apigateway/api_op_GetIntegrationResponse.go index b56109c3c27..85e5bc992ce 100644 --- a/service/apigateway/api_op_GetIntegrationResponse.go +++ b/service/apigateway/api_op_GetIntegrationResponse.go @@ -69,10 +69,10 @@ type GetIntegrationResponseOutput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a response payload from a + // * CONVERT_TO_BINARY: Converts a response payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a response payload from a binary blob to a Base64-encoded string. // // If diff --git a/service/apigateway/api_op_GetRestApi.go b/service/apigateway/api_op_GetRestApi.go index a970cceea9e..7db6ba24b06 100644 --- a/service/apigateway/api_op_GetRestApi.go +++ b/service/apigateway/api_op_GetRestApi.go @@ -52,11 +52,11 @@ type GetRestApiOutput struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource types.ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi diff --git a/service/apigateway/api_op_ImportRestApi.go b/service/apigateway/api_op_ImportRestApi.go index cce80ca3667..cb8eb98679f 100644 --- a/service/apigateway/api_op_ImportRestApi.go +++ b/service/apigateway/api_op_ImportRestApi.go @@ -69,11 +69,11 @@ type ImportRestApiOutput struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource types.ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi diff --git a/service/apigateway/api_op_PutGatewayResponse.go b/service/apigateway/api_op_PutGatewayResponse.go index 97392ed440b..774b5c9ea8d 100644 --- a/service/apigateway/api_op_PutGatewayResponse.go +++ b/service/apigateway/api_op_PutGatewayResponse.go @@ -35,52 +35,50 @@ type PutGatewayResponseInput struct { // [Required] The response type of the associated GatewayResponse. Valid values // are // - // * ACCESS_DENIED + // * ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * - // AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * AUTHORIZER_CONFIGURATION_ERROR + // * + // AUTHORIZER_CONFIGURATION_ERROR // - // * - // BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * - // DEFAULT_5XX + // * DEFAULT_5XX // - // * EXPIRED_TOKEN + // * EXPIRED_TOKEN // - // * INVALID_SIGNATURE + // * INVALID_SIGNATURE // - // * + // * // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * INVALID_API_KEY + // * INVALID_API_KEY // - // * + // * // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED - // - // * REQUEST_TOO_LARGE + // * QUOTA_EXCEEDED // + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * UNAUTHORIZED + // * UNAUTHORIZED // - // * - // UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE // // This member is required. ResponseType types.GatewayResponseType @@ -172,52 +170,51 @@ type PutGatewayResponseOutput struct { // The response type of the associated GatewayResponse. Valid values are // - // * + // * // ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * + // * // AUTHORIZER_CONFIGURATION_ERROR // - // * BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * - // BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * DEFAULT_5XX - // - // * EXPIRED_TOKEN + // * DEFAULT_5XX // + // * EXPIRED_TOKEN // // * INVALID_SIGNATURE // - // * INTEGRATION_FAILURE + // * + // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * - // INVALID_API_KEY + // * INVALID_API_KEY // - // * MISSING_AUTHENTICATION_TOKEN + // * + // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED + // * QUOTA_EXCEEDED // - // * - // REQUEST_TOO_LARGE + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * - // UNAUTHORIZED + // * UNAUTHORIZED // - // * UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE ResponseType types.GatewayResponseType // The HTTP status code for this GatewayResponse. diff --git a/service/apigateway/api_op_PutIntegration.go b/service/apigateway/api_op_PutIntegration.go index 80dc48468f5..477ffc87d79 100644 --- a/service/apigateway/api_op_PutIntegration.go +++ b/service/apigateway/api_op_PutIntegration.go @@ -77,10 +77,10 @@ type PutIntegrationInput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a request payload from a + // * CONVERT_TO_BINARY: Converts a request payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a request payload from a binary blob to a Base64-encoded string. // // If @@ -103,17 +103,17 @@ type PutIntegrationInput struct { // specified as the requestTemplates property on the Integration resource. There // are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, and NEVER. // - // * + // * // WHEN_NO_MATCH passes the request body for unmapped content types through to the // integration back end without transformation. // - // * NEVER rejects unmapped - // content types with an HTTP 415 'Unsupported Media Type' response. + // * NEVER rejects unmapped content + // types with an HTTP 415 'Unsupported Media Type' response. // - // * - // WHEN_NO_TEMPLATES allows pass-through when the integration has NO content types - // mapped to templates. However if there is at least one content type defined, - // unmapped content types will be rejected with the same 415 response. + // * WHEN_NO_TEMPLATES + // allows pass-through when the integration has NO content types mapped to + // templates. However if there is at least one content type defined, unmapped + // content types will be rejected with the same 415 response. PassthroughBehavior *string // A key-value map specifying request parameters that are passed from the method @@ -144,15 +144,15 @@ type PutIntegrationInput struct { // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * - // For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded - // HTTP(S) URL according to the RFC-3986 specification + // * For + // HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) + // URL according to the RFC-3986 specification // (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), for either standard // integration, where connectionType is not VPC_LINK, or private integration, where // connectionType is VPC_LINK. For a private HTTP integration, the URI is not used // for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form + // * For AWS or AWS_PROXY integrations, the URI is of the form // arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the // name of the integrated AWS service (e.g., s3); and {subdomain} is a designated @@ -204,10 +204,10 @@ type PutIntegrationOutput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a request payload from a + // * CONVERT_TO_BINARY: Converts a request payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a request payload from a binary blob to a Base64-encoded string. // // If @@ -266,23 +266,23 @@ type PutIntegrationOutput struct { // the content type does not match any of the mapped content types, as specified in // requestTemplates. The valid value is one of the following: // - // * WHEN_NO_MATCH: + // * WHEN_NO_MATCH: // passes the method request body through the integration request to the back end // without transformation when the method request content type does not match any // content type associated with the mapping templates defined in the integration // request. // - // * WHEN_NO_TEMPLATES: passes the method request body through the + // * WHEN_NO_TEMPLATES: passes the method request body through the // integration request to the back end without transformation when no mapping // template is defined in the integration request. If a template is defined when // this option is selected, the method request of an unmapped content-type will be // rejected with an HTTP 415 Unsupported Media Type response. // - // * NEVER: rejects - // the method request with an HTTP 415 Unsupported Media Type response when either - // the method request content type does not match any content type associated with - // the mapping templates defined in the integration request or no mapping template - // is defined in the integration request. + // * NEVER: rejects the + // method request with an HTTP 415 Unsupported Media Type response when either the + // method request content type does not match any content type associated with the + // mapping templates defined in the integration request or no mapping template is + // defined in the integration request. PassthroughBehavior *string // A key-value map specifying request parameters that are passed from the method @@ -309,48 +309,48 @@ type PutIntegrationOutput struct { // Specifies an API method integration type. The valid value is one of the // following: // - // * AWS: for integrating the API method request with an AWS - // service action, including the Lambda function-invoking action. With the Lambda + // * AWS: for integrating the API method request with an AWS service + // action, including the Lambda function-invoking action. With the Lambda // function-invoking action, this is referred to as the Lambda custom integration. // With any other AWS service action, this is known as AWS integration. // - // * + // * // AWS_PROXY: for integrating the API method request with the Lambda // function-invoking action with the client request passed through as-is. This // integration is also referred to as the Lambda proxy integration. // - // * HTTP: - // for integrating the API method request with an HTTP endpoint, including a - // private HTTP endpoint within a VPC. This integration is also referred to as the - // HTTP custom integration. + // * HTTP: for + // integrating the API method request with an HTTP endpoint, including a private + // HTTP endpoint within a VPC. This integration is also referred to as the HTTP + // custom integration. // - // * HTTP_PROXY: for integrating the API method - // request with an HTTP endpoint, including a private HTTP endpoint within a VPC, - // with the client request passed through as-is. This is also referred to as the - // HTTP proxy integration. + // * HTTP_PROXY: for integrating the API method request with + // an HTTP endpoint, including a private HTTP endpoint within a VPC, with the + // client request passed through as-is. This is also referred to as the HTTP proxy + // integration. // - // * MOCK: for integrating the API method request with - // API Gateway as a "loop-back" endpoint without invoking any backend. + // * MOCK: for integrating the API method request with API Gateway as + // a "loop-back" endpoint without invoking any backend. // - // For the - // HTTP and HTTP proxy integrations, each integration can specify a protocol - // (http/https), port and path. Standard 80 and 443 ports are supported as well as - // custom ports above 1024. An HTTP or HTTP proxy integration with a connectionType - // of VPC_LINK is referred to as a private integration and uses a VpcLink to - // connect API Gateway to a network load balancer of a VPC. + // For the HTTP and HTTP + // proxy integrations, each integration can specify a protocol (http/https), port + // and path. Standard 80 and 443 ports are supported as well as custom ports above + // 1024. An HTTP or HTTP proxy integration with a connectionType of VPC_LINK is + // referred to as a private integration and uses a VpcLink to connect API Gateway + // to a network load balancer of a VPC. Type types.IntegrationType // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * - // For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded - // HTTP(S) URL according to the RFC-3986 specification + // * For + // HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) + // URL according to the RFC-3986 specification // (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), for either standard // integration, where connectionType is not VPC_LINK, or private integration, where // connectionType is VPC_LINK. For a private HTTP integration, the URI is not used // for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form + // * For AWS or AWS_PROXY integrations, the URI is of the form // arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the // name of the integrated AWS service (e.g., s3); and {subdomain} is a designated diff --git a/service/apigateway/api_op_PutIntegrationResponse.go b/service/apigateway/api_op_PutIntegrationResponse.go index bade812656d..216c1df063e 100644 --- a/service/apigateway/api_op_PutIntegrationResponse.go +++ b/service/apigateway/api_op_PutIntegrationResponse.go @@ -55,10 +55,10 @@ type PutIntegrationResponseInput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a response payload from a + // * CONVERT_TO_BINARY: Converts a response payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a response payload from a binary blob to a Base64-encoded string. // // If @@ -104,10 +104,10 @@ type PutIntegrationResponseOutput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a response payload from a + // * CONVERT_TO_BINARY: Converts a response payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a response payload from a binary blob to a Base64-encoded string. // // If diff --git a/service/apigateway/api_op_PutRestApi.go b/service/apigateway/api_op_PutRestApi.go index 03c513502fe..6dcc1772eeb 100644 --- a/service/apigateway/api_op_PutRestApi.go +++ b/service/apigateway/api_op_PutRestApi.go @@ -70,11 +70,11 @@ type PutRestApiOutput struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource types.ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi diff --git a/service/apigateway/api_op_UpdateAccount.go b/service/apigateway/api_op_UpdateAccount.go index 64a31107afe..5aac1492c4e 100644 --- a/service/apigateway/api_op_UpdateAccount.go +++ b/service/apigateway/api_op_UpdateAccount.go @@ -49,12 +49,12 @@ type UpdateAccountInput struct { // The following exception // may be thrown when the request fails. // -// * UnauthorizedException +// * UnauthorizedException // -// * +// * // NotFoundException // -// * TooManyRequestsException +// * TooManyRequestsException // // For detailed error code // information, including the corresponding HTTP Status Codes, see API Gateway diff --git a/service/apigateway/api_op_UpdateAuthorizer.go b/service/apigateway/api_op_UpdateAuthorizer.go index 50233936846..32647db4c34 100644 --- a/service/apigateway/api_op_UpdateAuthorizer.go +++ b/service/apigateway/api_op_UpdateAuthorizer.go @@ -95,13 +95,13 @@ type UpdateAuthorizerOutput struct { // The identity source for which authorization is requested. // - // * For a TOKEN or + // * For a TOKEN or // COGNITO_USER_POOLS authorizer, this is required and specifies the request header // mapping expression for the custom header holding the authorization token // submitted by the client. For example, if the token header name is Auth, the // header mapping expression is method.request.header.Auth. // - // * For the REQUEST + // * For the REQUEST // authorizer, this is required when authorization caching is enabled. The value is // a comma-separated string of one or more mapping expressions of the specified // request parameters. For example, if an Auth header, a Name query string diff --git a/service/apigateway/api_op_UpdateGatewayResponse.go b/service/apigateway/api_op_UpdateGatewayResponse.go index 641b094df66..34655c6c5f4 100644 --- a/service/apigateway/api_op_UpdateGatewayResponse.go +++ b/service/apigateway/api_op_UpdateGatewayResponse.go @@ -33,52 +33,50 @@ type UpdateGatewayResponseInput struct { // [Required] The response type of the associated GatewayResponse. Valid values // are // - // * ACCESS_DENIED + // * ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * - // AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * AUTHORIZER_CONFIGURATION_ERROR + // * + // AUTHORIZER_CONFIGURATION_ERROR // - // * - // BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * - // DEFAULT_5XX + // * DEFAULT_5XX // - // * EXPIRED_TOKEN + // * EXPIRED_TOKEN // - // * INVALID_SIGNATURE + // * INVALID_SIGNATURE // - // * + // * // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * INVALID_API_KEY + // * INVALID_API_KEY // - // * + // * // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED - // - // * REQUEST_TOO_LARGE + // * QUOTA_EXCEEDED // + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * UNAUTHORIZED + // * UNAUTHORIZED // - // * - // UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE // // This member is required. ResponseType types.GatewayResponseType @@ -163,52 +161,51 @@ type UpdateGatewayResponseOutput struct { // The response type of the associated GatewayResponse. Valid values are // - // * + // * // ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * + // * // AUTHORIZER_CONFIGURATION_ERROR // - // * BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * - // BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * DEFAULT_5XX - // - // * EXPIRED_TOKEN + // * DEFAULT_5XX // + // * EXPIRED_TOKEN // // * INVALID_SIGNATURE // - // * INTEGRATION_FAILURE + // * + // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * - // INVALID_API_KEY + // * INVALID_API_KEY // - // * MISSING_AUTHENTICATION_TOKEN + // * + // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED + // * QUOTA_EXCEEDED // - // * - // REQUEST_TOO_LARGE + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * - // UNAUTHORIZED + // * UNAUTHORIZED // - // * UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE ResponseType types.GatewayResponseType // The HTTP status code for this GatewayResponse. diff --git a/service/apigateway/api_op_UpdateIntegration.go b/service/apigateway/api_op_UpdateIntegration.go index 68b684538ab..c0f0b3ea031 100644 --- a/service/apigateway/api_op_UpdateIntegration.go +++ b/service/apigateway/api_op_UpdateIntegration.go @@ -91,10 +91,10 @@ type UpdateIntegrationOutput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a request payload from a + // * CONVERT_TO_BINARY: Converts a request payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a request payload from a binary blob to a Base64-encoded string. // // If @@ -153,23 +153,23 @@ type UpdateIntegrationOutput struct { // the content type does not match any of the mapped content types, as specified in // requestTemplates. The valid value is one of the following: // - // * WHEN_NO_MATCH: + // * WHEN_NO_MATCH: // passes the method request body through the integration request to the back end // without transformation when the method request content type does not match any // content type associated with the mapping templates defined in the integration // request. // - // * WHEN_NO_TEMPLATES: passes the method request body through the + // * WHEN_NO_TEMPLATES: passes the method request body through the // integration request to the back end without transformation when no mapping // template is defined in the integration request. If a template is defined when // this option is selected, the method request of an unmapped content-type will be // rejected with an HTTP 415 Unsupported Media Type response. // - // * NEVER: rejects - // the method request with an HTTP 415 Unsupported Media Type response when either - // the method request content type does not match any content type associated with - // the mapping templates defined in the integration request or no mapping template - // is defined in the integration request. + // * NEVER: rejects the + // method request with an HTTP 415 Unsupported Media Type response when either the + // method request content type does not match any content type associated with the + // mapping templates defined in the integration request or no mapping template is + // defined in the integration request. PassthroughBehavior *string // A key-value map specifying request parameters that are passed from the method @@ -196,48 +196,48 @@ type UpdateIntegrationOutput struct { // Specifies an API method integration type. The valid value is one of the // following: // - // * AWS: for integrating the API method request with an AWS - // service action, including the Lambda function-invoking action. With the Lambda + // * AWS: for integrating the API method request with an AWS service + // action, including the Lambda function-invoking action. With the Lambda // function-invoking action, this is referred to as the Lambda custom integration. // With any other AWS service action, this is known as AWS integration. // - // * + // * // AWS_PROXY: for integrating the API method request with the Lambda // function-invoking action with the client request passed through as-is. This // integration is also referred to as the Lambda proxy integration. // - // * HTTP: - // for integrating the API method request with an HTTP endpoint, including a - // private HTTP endpoint within a VPC. This integration is also referred to as the - // HTTP custom integration. + // * HTTP: for + // integrating the API method request with an HTTP endpoint, including a private + // HTTP endpoint within a VPC. This integration is also referred to as the HTTP + // custom integration. // - // * HTTP_PROXY: for integrating the API method - // request with an HTTP endpoint, including a private HTTP endpoint within a VPC, - // with the client request passed through as-is. This is also referred to as the - // HTTP proxy integration. + // * HTTP_PROXY: for integrating the API method request with + // an HTTP endpoint, including a private HTTP endpoint within a VPC, with the + // client request passed through as-is. This is also referred to as the HTTP proxy + // integration. // - // * MOCK: for integrating the API method request with - // API Gateway as a "loop-back" endpoint without invoking any backend. + // * MOCK: for integrating the API method request with API Gateway as + // a "loop-back" endpoint without invoking any backend. // - // For the - // HTTP and HTTP proxy integrations, each integration can specify a protocol - // (http/https), port and path. Standard 80 and 443 ports are supported as well as - // custom ports above 1024. An HTTP or HTTP proxy integration with a connectionType - // of VPC_LINK is referred to as a private integration and uses a VpcLink to - // connect API Gateway to a network load balancer of a VPC. + // For the HTTP and HTTP + // proxy integrations, each integration can specify a protocol (http/https), port + // and path. Standard 80 and 443 ports are supported as well as custom ports above + // 1024. An HTTP or HTTP proxy integration with a connectionType of VPC_LINK is + // referred to as a private integration and uses a VpcLink to connect API Gateway + // to a network load balancer of a VPC. Type types.IntegrationType // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * - // For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded - // HTTP(S) URL according to the RFC-3986 specification + // * For + // HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) + // URL according to the RFC-3986 specification // (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), for either standard // integration, where connectionType is not VPC_LINK, or private integration, where // connectionType is VPC_LINK. For a private HTTP integration, the URI is not used // for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form + // * For AWS or AWS_PROXY integrations, the URI is of the form // arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the // name of the integrated AWS service (e.g., s3); and {subdomain} is a designated diff --git a/service/apigateway/api_op_UpdateIntegrationResponse.go b/service/apigateway/api_op_UpdateIntegrationResponse.go index 5fc2f2dc923..cf3abf2a8b7 100644 --- a/service/apigateway/api_op_UpdateIntegrationResponse.go +++ b/service/apigateway/api_op_UpdateIntegrationResponse.go @@ -74,10 +74,10 @@ type UpdateIntegrationResponseOutput struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a response payload from a + // * CONVERT_TO_BINARY: Converts a response payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a response payload from a binary blob to a Base64-encoded string. // // If diff --git a/service/apigateway/api_op_UpdateRestApi.go b/service/apigateway/api_op_UpdateRestApi.go index 83fc5653c99..429a6fb0eb2 100644 --- a/service/apigateway/api_op_UpdateRestApi.go +++ b/service/apigateway/api_op_UpdateRestApi.go @@ -56,11 +56,11 @@ type UpdateRestApiOutput struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource types.ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi diff --git a/service/apigateway/types/enums.go b/service/apigateway/types/enums.go index 76e9d06e07d..784f47d3fa0 100644 --- a/service/apigateway/types/enums.go +++ b/service/apigateway/types/enums.go @@ -40,9 +40,9 @@ type AuthorizerType string // Enum values for AuthorizerType const ( - AuthorizerTypeToken AuthorizerType = "TOKEN" - AuthorizerTypeRequest AuthorizerType = "REQUEST" - AuthorizerTypeCognito_user_pools AuthorizerType = "COGNITO_USER_POOLS" + AuthorizerTypeToken AuthorizerType = "TOKEN" + AuthorizerTypeRequest AuthorizerType = "REQUEST" + AuthorizerTypeCognitoUserPools AuthorizerType = "COGNITO_USER_POOLS" ) // Values returns all known values for AuthorizerType. Note that this can be @@ -60,14 +60,14 @@ type CacheClusterSize string // Enum values for CacheClusterSize const ( - CacheClusterSizeSize_0_point_5_gb CacheClusterSize = "0.5" - CacheClusterSizeSize_1_point_6_gb CacheClusterSize = "1.6" - CacheClusterSizeSize_6_point_1_gb CacheClusterSize = "6.1" - CacheClusterSizeSize_13_point_5_gb CacheClusterSize = "13.5" - CacheClusterSizeSize_28_point_4_gb CacheClusterSize = "28.4" - CacheClusterSizeSize_58_point_2_gb CacheClusterSize = "58.2" - CacheClusterSizeSize_118_gb CacheClusterSize = "118" - CacheClusterSizeSize_237_gb CacheClusterSize = "237" + CacheClusterSizeSize0Point5Gb CacheClusterSize = "0.5" + CacheClusterSizeSize1Point6Gb CacheClusterSize = "1.6" + CacheClusterSizeSize6Point1Gb CacheClusterSize = "6.1" + CacheClusterSizeSize13Point5Gb CacheClusterSize = "13.5" + CacheClusterSizeSize28Point4Gb CacheClusterSize = "28.4" + CacheClusterSizeSize58Point2Gb CacheClusterSize = "58.2" + CacheClusterSizeSize118Gb CacheClusterSize = "118" + CacheClusterSizeSize237Gb CacheClusterSize = "237" ) // Values returns all known values for CacheClusterSize. Note that this can be @@ -90,11 +90,11 @@ type CacheClusterStatus string // Enum values for CacheClusterStatus const ( - CacheClusterStatusCreate_in_progress CacheClusterStatus = "CREATE_IN_PROGRESS" - CacheClusterStatusAvailable CacheClusterStatus = "AVAILABLE" - CacheClusterStatusDelete_in_progress CacheClusterStatus = "DELETE_IN_PROGRESS" - CacheClusterStatusNot_available CacheClusterStatus = "NOT_AVAILABLE" - CacheClusterStatusFlush_in_progress CacheClusterStatus = "FLUSH_IN_PROGRESS" + CacheClusterStatusCreateInProgress CacheClusterStatus = "CREATE_IN_PROGRESS" + CacheClusterStatusAvailable CacheClusterStatus = "AVAILABLE" + CacheClusterStatusDeleteInProgress CacheClusterStatus = "DELETE_IN_PROGRESS" + CacheClusterStatusNotAvailable CacheClusterStatus = "NOT_AVAILABLE" + CacheClusterStatusFlushInProgress CacheClusterStatus = "FLUSH_IN_PROGRESS" ) // Values returns all known values for CacheClusterStatus. Note that this can be @@ -115,7 +115,7 @@ type ConnectionType string // Enum values for ConnectionType const ( ConnectionTypeInternet ConnectionType = "INTERNET" - ConnectionTypeVpc_link ConnectionType = "VPC_LINK" + ConnectionTypeVpcLink ConnectionType = "VPC_LINK" ) // Values returns all known values for ConnectionType. Note that this can be @@ -132,8 +132,8 @@ type ContentHandlingStrategy string // Enum values for ContentHandlingStrategy const ( - ContentHandlingStrategyConvert_to_binary ContentHandlingStrategy = "CONVERT_TO_BINARY" - ContentHandlingStrategyConvert_to_text ContentHandlingStrategy = "CONVERT_TO_TEXT" + ContentHandlingStrategyConvertToBinary ContentHandlingStrategy = "CONVERT_TO_BINARY" + ContentHandlingStrategyConvertToText ContentHandlingStrategy = "CONVERT_TO_TEXT" ) // Values returns all known values for ContentHandlingStrategy. Note that this can @@ -150,18 +150,18 @@ type DocumentationPartType string // Enum values for DocumentationPartType const ( - DocumentationPartTypeApi DocumentationPartType = "API" - DocumentationPartTypeAuthorizer DocumentationPartType = "AUTHORIZER" - DocumentationPartTypeModel DocumentationPartType = "MODEL" - DocumentationPartTypeResource DocumentationPartType = "RESOURCE" - DocumentationPartTypeMethod DocumentationPartType = "METHOD" - DocumentationPartTypePath_parameter DocumentationPartType = "PATH_PARAMETER" - DocumentationPartTypeQuery_parameter DocumentationPartType = "QUERY_PARAMETER" - DocumentationPartTypeRequest_header DocumentationPartType = "REQUEST_HEADER" - DocumentationPartTypeRequest_body DocumentationPartType = "REQUEST_BODY" - DocumentationPartTypeResponse DocumentationPartType = "RESPONSE" - DocumentationPartTypeResponse_header DocumentationPartType = "RESPONSE_HEADER" - DocumentationPartTypeResponse_body DocumentationPartType = "RESPONSE_BODY" + DocumentationPartTypeApi DocumentationPartType = "API" + DocumentationPartTypeAuthorizer DocumentationPartType = "AUTHORIZER" + DocumentationPartTypeModel DocumentationPartType = "MODEL" + DocumentationPartTypeResource DocumentationPartType = "RESOURCE" + DocumentationPartTypeMethod DocumentationPartType = "METHOD" + DocumentationPartTypePathParameter DocumentationPartType = "PATH_PARAMETER" + DocumentationPartTypeQueryParameter DocumentationPartType = "QUERY_PARAMETER" + DocumentationPartTypeRequestHeader DocumentationPartType = "REQUEST_HEADER" + DocumentationPartTypeRequestBody DocumentationPartType = "REQUEST_BODY" + DocumentationPartTypeResponse DocumentationPartType = "RESPONSE" + DocumentationPartTypeResponseHeader DocumentationPartType = "RESPONSE_HEADER" + DocumentationPartTypeResponseBody DocumentationPartType = "RESPONSE_BODY" ) // Values returns all known values for DocumentationPartType. Note that this can be @@ -228,26 +228,26 @@ type GatewayResponseType string // Enum values for GatewayResponseType const ( - GatewayResponseTypeDefault_4xx GatewayResponseType = "DEFAULT_4XX" - GatewayResponseTypeDefault_5xx GatewayResponseType = "DEFAULT_5XX" - GatewayResponseTypeResource_not_found GatewayResponseType = "RESOURCE_NOT_FOUND" - GatewayResponseTypeUnauthorized GatewayResponseType = "UNAUTHORIZED" - GatewayResponseTypeInvalid_api_key GatewayResponseType = "INVALID_API_KEY" - GatewayResponseTypeAccess_denied GatewayResponseType = "ACCESS_DENIED" - GatewayResponseTypeAuthorizer_failure GatewayResponseType = "AUTHORIZER_FAILURE" - GatewayResponseTypeAuthorizer_configuration_error GatewayResponseType = "AUTHORIZER_CONFIGURATION_ERROR" - GatewayResponseTypeInvalid_signature GatewayResponseType = "INVALID_SIGNATURE" - GatewayResponseTypeExpired_token GatewayResponseType = "EXPIRED_TOKEN" - GatewayResponseTypeMissing_authentication_token GatewayResponseType = "MISSING_AUTHENTICATION_TOKEN" - GatewayResponseTypeIntegration_failure GatewayResponseType = "INTEGRATION_FAILURE" - GatewayResponseTypeIntegration_timeout GatewayResponseType = "INTEGRATION_TIMEOUT" - GatewayResponseTypeApi_configuration_error GatewayResponseType = "API_CONFIGURATION_ERROR" - GatewayResponseTypeUnsupported_media_type GatewayResponseType = "UNSUPPORTED_MEDIA_TYPE" - GatewayResponseTypeBad_request_parameters GatewayResponseType = "BAD_REQUEST_PARAMETERS" - GatewayResponseTypeBad_request_body GatewayResponseType = "BAD_REQUEST_BODY" - GatewayResponseTypeRequest_too_large GatewayResponseType = "REQUEST_TOO_LARGE" - GatewayResponseTypeThrottled GatewayResponseType = "THROTTLED" - GatewayResponseTypeQuota_exceeded GatewayResponseType = "QUOTA_EXCEEDED" + GatewayResponseTypeDefault4xx GatewayResponseType = "DEFAULT_4XX" + GatewayResponseTypeDefault5xx GatewayResponseType = "DEFAULT_5XX" + GatewayResponseTypeResourceNotFound GatewayResponseType = "RESOURCE_NOT_FOUND" + GatewayResponseTypeUnauthorized GatewayResponseType = "UNAUTHORIZED" + GatewayResponseTypeInvalidApiKey GatewayResponseType = "INVALID_API_KEY" + GatewayResponseTypeAccessDenied GatewayResponseType = "ACCESS_DENIED" + GatewayResponseTypeAuthorizerFailure GatewayResponseType = "AUTHORIZER_FAILURE" + GatewayResponseTypeAuthorizerConfigurationError GatewayResponseType = "AUTHORIZER_CONFIGURATION_ERROR" + GatewayResponseTypeInvalidSignature GatewayResponseType = "INVALID_SIGNATURE" + GatewayResponseTypeExpiredToken GatewayResponseType = "EXPIRED_TOKEN" + GatewayResponseTypeMissingAuthenticationToken GatewayResponseType = "MISSING_AUTHENTICATION_TOKEN" + GatewayResponseTypeIntegrationFailure GatewayResponseType = "INTEGRATION_FAILURE" + GatewayResponseTypeIntegrationTimeout GatewayResponseType = "INTEGRATION_TIMEOUT" + GatewayResponseTypeApiConfigurationError GatewayResponseType = "API_CONFIGURATION_ERROR" + GatewayResponseTypeUnsupportedMediaType GatewayResponseType = "UNSUPPORTED_MEDIA_TYPE" + GatewayResponseTypeBadRequestParameters GatewayResponseType = "BAD_REQUEST_PARAMETERS" + GatewayResponseTypeBadRequestBody GatewayResponseType = "BAD_REQUEST_BODY" + GatewayResponseTypeRequestTooLarge GatewayResponseType = "REQUEST_TOO_LARGE" + GatewayResponseTypeThrottled GatewayResponseType = "THROTTLED" + GatewayResponseTypeQuotaExceeded GatewayResponseType = "QUOTA_EXCEEDED" ) // Values returns all known values for GatewayResponseType. Note that this can be @@ -282,11 +282,11 @@ type IntegrationType string // Enum values for IntegrationType const ( - IntegrationTypeHttp IntegrationType = "HTTP" - IntegrationTypeAws IntegrationType = "AWS" - IntegrationTypeMock IntegrationType = "MOCK" - IntegrationTypeHttp_proxy IntegrationType = "HTTP_PROXY" - IntegrationTypeAws_proxy IntegrationType = "AWS_PROXY" + IntegrationTypeHttp IntegrationType = "HTTP" + IntegrationTypeAws IntegrationType = "AWS" + IntegrationTypeMock IntegrationType = "MOCK" + IntegrationTypeHttpProxy IntegrationType = "HTTP_PROXY" + IntegrationTypeAwsProxy IntegrationType = "AWS_PROXY" ) // Values returns all known values for IntegrationType. Note that this can be @@ -388,8 +388,8 @@ type SecurityPolicy string // Enum values for SecurityPolicy const ( - SecurityPolicyTls_1_0 SecurityPolicy = "TLS_1_0" - SecurityPolicyTls_1_2 SecurityPolicy = "TLS_1_2" + SecurityPolicyTls10 SecurityPolicy = "TLS_1_0" + SecurityPolicyTls12 SecurityPolicy = "TLS_1_2" ) // Values returns all known values for SecurityPolicy. Note that this can be @@ -406,9 +406,9 @@ type UnauthorizedCacheControlHeaderStrategy string // Enum values for UnauthorizedCacheControlHeaderStrategy const ( - UnauthorizedCacheControlHeaderStrategyFail_with_403 UnauthorizedCacheControlHeaderStrategy = "FAIL_WITH_403" - UnauthorizedCacheControlHeaderStrategySucceed_with_response_header UnauthorizedCacheControlHeaderStrategy = "SUCCEED_WITH_RESPONSE_HEADER" - UnauthorizedCacheControlHeaderStrategySucceed_without_response_header UnauthorizedCacheControlHeaderStrategy = "SUCCEED_WITHOUT_RESPONSE_HEADER" + UnauthorizedCacheControlHeaderStrategyFailWith403 UnauthorizedCacheControlHeaderStrategy = "FAIL_WITH_403" + UnauthorizedCacheControlHeaderStrategySucceedWithResponseHeader UnauthorizedCacheControlHeaderStrategy = "SUCCEED_WITH_RESPONSE_HEADER" + UnauthorizedCacheControlHeaderStrategySucceedWithoutResponseHeader UnauthorizedCacheControlHeaderStrategy = "SUCCEED_WITHOUT_RESPONSE_HEADER" ) // Values returns all known values for UnauthorizedCacheControlHeaderStrategy. Note diff --git a/service/apigateway/types/types.go b/service/apigateway/types/types.go index 91fc1fabb28..5a6e4bc50b2 100644 --- a/service/apigateway/types/types.go +++ b/service/apigateway/types/types.go @@ -116,13 +116,13 @@ type Authorizer struct { // The identity source for which authorization is requested. // - // * For a TOKEN or + // * For a TOKEN or // COGNITO_USER_POOLS authorizer, this is required and specifies the request header // mapping expression for the custom header holding the authorization token // submitted by the client. For example, if the token header name is Auth, the // header mapping expression is method.request.header.Auth. // - // * For the REQUEST + // * For the REQUEST // authorizer, this is required when authorization caching is enabled. The value is // a comma-separated string of one or more mapping expressions of the specified // request parameters. For example, if an Auth header, a Name query string @@ -534,52 +534,51 @@ type GatewayResponse struct { // The response type of the associated GatewayResponse. Valid values are // - // * + // * // ACCESS_DENIED // - // * API_CONFIGURATION_ERROR + // * API_CONFIGURATION_ERROR // - // * AUTHORIZER_FAILURE + // * AUTHORIZER_FAILURE // - // * + // * // AUTHORIZER_CONFIGURATION_ERROR // - // * BAD_REQUEST_PARAMETERS + // * BAD_REQUEST_PARAMETERS // - // * - // BAD_REQUEST_BODY + // * BAD_REQUEST_BODY // - // * DEFAULT_4XX + // * + // DEFAULT_4XX // - // * DEFAULT_5XX - // - // * EXPIRED_TOKEN + // * DEFAULT_5XX // + // * EXPIRED_TOKEN // // * INVALID_SIGNATURE // - // * INTEGRATION_FAILURE + // * + // INTEGRATION_FAILURE // - // * INTEGRATION_TIMEOUT + // * INTEGRATION_TIMEOUT // - // * - // INVALID_API_KEY + // * INVALID_API_KEY // - // * MISSING_AUTHENTICATION_TOKEN + // * + // MISSING_AUTHENTICATION_TOKEN // - // * QUOTA_EXCEEDED + // * QUOTA_EXCEEDED // - // * - // REQUEST_TOO_LARGE + // * REQUEST_TOO_LARGE // - // * RESOURCE_NOT_FOUND + // * + // RESOURCE_NOT_FOUND // - // * THROTTLED + // * THROTTLED // - // * - // UNAUTHORIZED + // * UNAUTHORIZED // - // * UNSUPPORTED_MEDIA_TYPE + // * UNSUPPORTED_MEDIA_TYPE ResponseType GatewayResponseType // The HTTP status code for this GatewayResponse. @@ -619,10 +618,10 @@ type Integration struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a request payload from a + // * CONVERT_TO_BINARY: Converts a request payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a request payload from a binary blob to a Base64-encoded string. // // If @@ -681,23 +680,23 @@ type Integration struct { // the content type does not match any of the mapped content types, as specified in // requestTemplates. The valid value is one of the following: // - // * WHEN_NO_MATCH: + // * WHEN_NO_MATCH: // passes the method request body through the integration request to the back end // without transformation when the method request content type does not match any // content type associated with the mapping templates defined in the integration // request. // - // * WHEN_NO_TEMPLATES: passes the method request body through the + // * WHEN_NO_TEMPLATES: passes the method request body through the // integration request to the back end without transformation when no mapping // template is defined in the integration request. If a template is defined when // this option is selected, the method request of an unmapped content-type will be // rejected with an HTTP 415 Unsupported Media Type response. // - // * NEVER: rejects - // the method request with an HTTP 415 Unsupported Media Type response when either - // the method request content type does not match any content type associated with - // the mapping templates defined in the integration request or no mapping template - // is defined in the integration request. + // * NEVER: rejects the + // method request with an HTTP 415 Unsupported Media Type response when either the + // method request content type does not match any content type associated with the + // mapping templates defined in the integration request or no mapping template is + // defined in the integration request. PassthroughBehavior *string // A key-value map specifying request parameters that are passed from the method @@ -724,48 +723,48 @@ type Integration struct { // Specifies an API method integration type. The valid value is one of the // following: // - // * AWS: for integrating the API method request with an AWS - // service action, including the Lambda function-invoking action. With the Lambda + // * AWS: for integrating the API method request with an AWS service + // action, including the Lambda function-invoking action. With the Lambda // function-invoking action, this is referred to as the Lambda custom integration. // With any other AWS service action, this is known as AWS integration. // - // * + // * // AWS_PROXY: for integrating the API method request with the Lambda // function-invoking action with the client request passed through as-is. This // integration is also referred to as the Lambda proxy integration. // - // * HTTP: - // for integrating the API method request with an HTTP endpoint, including a - // private HTTP endpoint within a VPC. This integration is also referred to as the - // HTTP custom integration. + // * HTTP: for + // integrating the API method request with an HTTP endpoint, including a private + // HTTP endpoint within a VPC. This integration is also referred to as the HTTP + // custom integration. // - // * HTTP_PROXY: for integrating the API method - // request with an HTTP endpoint, including a private HTTP endpoint within a VPC, - // with the client request passed through as-is. This is also referred to as the - // HTTP proxy integration. + // * HTTP_PROXY: for integrating the API method request with + // an HTTP endpoint, including a private HTTP endpoint within a VPC, with the + // client request passed through as-is. This is also referred to as the HTTP proxy + // integration. // - // * MOCK: for integrating the API method request with - // API Gateway as a "loop-back" endpoint without invoking any backend. + // * MOCK: for integrating the API method request with API Gateway as + // a "loop-back" endpoint without invoking any backend. // - // For the - // HTTP and HTTP proxy integrations, each integration can specify a protocol - // (http/https), port and path. Standard 80 and 443 ports are supported as well as - // custom ports above 1024. An HTTP or HTTP proxy integration with a connectionType - // of VPC_LINK is referred to as a private integration and uses a VpcLink to - // connect API Gateway to a network load balancer of a VPC. + // For the HTTP and HTTP + // proxy integrations, each integration can specify a protocol (http/https), port + // and path. Standard 80 and 443 ports are supported as well as custom ports above + // 1024. An HTTP or HTTP proxy integration with a connectionType of VPC_LINK is + // referred to as a private integration and uses a VpcLink to connect API Gateway + // to a network load balancer of a VPC. Type IntegrationType // Specifies Uniform Resource Identifier (URI) of the integration endpoint. // - // * - // For HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded - // HTTP(S) URL according to the RFC-3986 specification + // * For + // HTTP or HTTP_PROXY integrations, the URI must be a fully formed, encoded HTTP(S) + // URL according to the RFC-3986 specification // (https://en.wikipedia.org/wiki/Uniform_Resource_Identifier), for either standard // integration, where connectionType is not VPC_LINK, or private integration, where // connectionType is VPC_LINK. For a private HTTP integration, the URI is not used // for routing. // - // * For AWS or AWS_PROXY integrations, the URI is of the form + // * For AWS or AWS_PROXY integrations, the URI is of the form // arn:aws:apigateway:{region}:{subdomain.service|service}:path|action/{service_api}. // Here, {Region} is the API Gateway region (e.g., us-east-1); {service} is the // name of the integrated AWS service (e.g., s3); and {subdomain} is a designated @@ -794,10 +793,10 @@ type IntegrationResponse struct { // values are CONVERT_TO_BINARY and CONVERT_TO_TEXT, with the following // behaviors: // - // * CONVERT_TO_BINARY: Converts a response payload from a + // * CONVERT_TO_BINARY: Converts a response payload from a // Base64-encoded string to the corresponding binary blob. // - // * CONVERT_TO_TEXT: + // * CONVERT_TO_TEXT: // Converts a response payload from a binary blob to a Base64-encoded string. // // If @@ -1453,11 +1452,11 @@ type RestApi struct { // The source of the API key for metering requests according to a usage plan. Valid // values are: // - // * HEADER to read the API key from the X-API-Key header of a + // * HEADER to read the API key from the X-API-Key header of a // request. // - // * AUTHORIZER to read the API key from the UsageIdentifierKey from - // a custom authorizer. + // * AUTHORIZER to read the API key from the UsageIdentifierKey from a + // custom authorizer. ApiKeySource ApiKeySourceType // The list of binary media types supported by the RestApi. By default, the RestApi diff --git a/service/apigatewayv2/types/enums.go b/service/apigatewayv2/types/enums.go index c33405c36c2..46fce137e1d 100644 --- a/service/apigatewayv2/types/enums.go +++ b/service/apigatewayv2/types/enums.go @@ -6,10 +6,10 @@ type AuthorizationType string // Enum values for AuthorizationType const ( - AuthorizationTypeNone AuthorizationType = "NONE" - AuthorizationTypeAws_iam AuthorizationType = "AWS_IAM" - AuthorizationTypeCustom AuthorizationType = "CUSTOM" - AuthorizationTypeJwt AuthorizationType = "JWT" + AuthorizationTypeNone AuthorizationType = "NONE" + AuthorizationTypeAwsIam AuthorizationType = "AWS_IAM" + AuthorizationTypeCustom AuthorizationType = "CUSTOM" + AuthorizationTypeJwt AuthorizationType = "JWT" ) // Values returns all known values for AuthorizationType. Note that this can be @@ -47,7 +47,7 @@ type ConnectionType string // Enum values for ConnectionType const ( ConnectionTypeInternet ConnectionType = "INTERNET" - ConnectionTypeVpc_link ConnectionType = "VPC_LINK" + ConnectionTypeVpcLink ConnectionType = "VPC_LINK" ) // Values returns all known values for ConnectionType. Note that this can be @@ -64,8 +64,8 @@ type ContentHandlingStrategy string // Enum values for ContentHandlingStrategy const ( - ContentHandlingStrategyConvert_to_binary ContentHandlingStrategy = "CONVERT_TO_BINARY" - ContentHandlingStrategyConvert_to_text ContentHandlingStrategy = "CONVERT_TO_TEXT" + ContentHandlingStrategyConvertToBinary ContentHandlingStrategy = "CONVERT_TO_BINARY" + ContentHandlingStrategyConvertToText ContentHandlingStrategy = "CONVERT_TO_TEXT" ) // Values returns all known values for ContentHandlingStrategy. Note that this can @@ -138,11 +138,11 @@ type IntegrationType string // Enum values for IntegrationType const ( - IntegrationTypeAws IntegrationType = "AWS" - IntegrationTypeHttp IntegrationType = "HTTP" - IntegrationTypeMock IntegrationType = "MOCK" - IntegrationTypeHttp_proxy IntegrationType = "HTTP_PROXY" - IntegrationTypeAws_proxy IntegrationType = "AWS_PROXY" + IntegrationTypeAws IntegrationType = "AWS" + IntegrationTypeHttp IntegrationType = "HTTP" + IntegrationTypeMock IntegrationType = "MOCK" + IntegrationTypeHttpProxy IntegrationType = "HTTP_PROXY" + IntegrationTypeAwsProxy IntegrationType = "AWS_PROXY" ) // Values returns all known values for IntegrationType. Note that this can be @@ -182,9 +182,9 @@ type PassthroughBehavior string // Enum values for PassthroughBehavior const ( - PassthroughBehaviorWhen_no_match PassthroughBehavior = "WHEN_NO_MATCH" - PassthroughBehaviorNever PassthroughBehavior = "NEVER" - PassthroughBehaviorWhen_no_templates PassthroughBehavior = "WHEN_NO_TEMPLATES" + PassthroughBehaviorWhenNoMatch PassthroughBehavior = "WHEN_NO_MATCH" + PassthroughBehaviorNever PassthroughBehavior = "NEVER" + PassthroughBehaviorWhenNoTemplates PassthroughBehavior = "WHEN_NO_TEMPLATES" ) // Values returns all known values for PassthroughBehavior. Note that this can be @@ -220,8 +220,8 @@ type SecurityPolicy string // Enum values for SecurityPolicy const ( - SecurityPolicyTls_1_0 SecurityPolicy = "TLS_1_0" - SecurityPolicyTls_1_2 SecurityPolicy = "TLS_1_2" + SecurityPolicyTls10 SecurityPolicy = "TLS_1_0" + SecurityPolicyTls12 SecurityPolicy = "TLS_1_2" ) // Values returns all known values for SecurityPolicy. Note that this can be diff --git a/service/appconfig/api_op_CreateConfigurationProfile.go b/service/appconfig/api_op_CreateConfigurationProfile.go index 43759aa6243..8a116c4189f 100644 --- a/service/appconfig/api_op_CreateConfigurationProfile.go +++ b/service/appconfig/api_op_CreateConfigurationProfile.go @@ -16,13 +16,13 @@ import ( // Store parameters, and Amazon S3 objects. A configuration profile includes the // following information. // -// * The Uri location of the configuration data. +// * The Uri location of the configuration data. // -// * -// The AWS Identity and Access Management (IAM) role that provides access to the +// * The AWS +// Identity and Access Management (IAM) role that provides access to the // configuration data. // -// * A validator for the configuration data. Available +// * A validator for the configuration data. Available // validators include either a JSON Schema or an AWS Lambda function. // // For more diff --git a/service/appconfig/doc.go b/service/appconfig/doc.go index e800857327d..34f01ec1297 100644 --- a/service/appconfig/doc.go +++ b/service/appconfig/doc.go @@ -24,18 +24,18 @@ // automatically rolls back to the previous version. AppConfig supports multiple // use cases. Here are some examples. // -// * Application tuning: Use AppConfig to +// * Application tuning: Use AppConfig to // carefully introduce changes to your application that can only be tested with // production traffic. // -// * Feature toggle: Use AppConfig to turn on new features +// * Feature toggle: Use AppConfig to turn on new features // that require a timely deployment, such as a product launch or announcement. // -// -// * Allow list: Use AppConfig to allow premium subscribers to access paid +// * +// Allow list: Use AppConfig to allow premium subscribers to access paid // content. // -// * Operational issues: Use AppConfig to reduce stress on your +// * Operational issues: Use AppConfig to reduce stress on your // application when a dependency or other external factor impacts the system. // // This diff --git a/service/appconfig/types/enums.go b/service/appconfig/types/enums.go index ceb0dcb5ad4..90b11611baf 100644 --- a/service/appconfig/types/enums.go +++ b/service/appconfig/types/enums.go @@ -22,12 +22,12 @@ type DeploymentEventType string // Enum values for DeploymentEventType const ( - DeploymentEventTypePercentage_updated DeploymentEventType = "PERCENTAGE_UPDATED" - DeploymentEventTypeRollback_started DeploymentEventType = "ROLLBACK_STARTED" - DeploymentEventTypeRollback_completed DeploymentEventType = "ROLLBACK_COMPLETED" - DeploymentEventTypeBake_time_started DeploymentEventType = "BAKE_TIME_STARTED" - DeploymentEventTypeDeployment_started DeploymentEventType = "DEPLOYMENT_STARTED" - DeploymentEventTypeDeployment_completed DeploymentEventType = "DEPLOYMENT_COMPLETED" + DeploymentEventTypePercentageUpdated DeploymentEventType = "PERCENTAGE_UPDATED" + DeploymentEventTypeRollbackStarted DeploymentEventType = "ROLLBACK_STARTED" + DeploymentEventTypeRollbackCompleted DeploymentEventType = "ROLLBACK_COMPLETED" + DeploymentEventTypeBakeTimeStarted DeploymentEventType = "BAKE_TIME_STARTED" + DeploymentEventTypeDeploymentStarted DeploymentEventType = "DEPLOYMENT_STARTED" + DeploymentEventTypeDeploymentCompleted DeploymentEventType = "DEPLOYMENT_COMPLETED" ) // Values returns all known values for DeploymentEventType. Note that this can be @@ -48,12 +48,12 @@ type DeploymentState string // Enum values for DeploymentState const ( - DeploymentStateBaking DeploymentState = "BAKING" - DeploymentStateValidating DeploymentState = "VALIDATING" - DeploymentStateDeploying DeploymentState = "DEPLOYING" - DeploymentStateComplete DeploymentState = "COMPLETE" - DeploymentStateRolling_back DeploymentState = "ROLLING_BACK" - DeploymentStateRolled_back DeploymentState = "ROLLED_BACK" + DeploymentStateBaking DeploymentState = "BAKING" + DeploymentStateValidating DeploymentState = "VALIDATING" + DeploymentStateDeploying DeploymentState = "DEPLOYING" + DeploymentStateComplete DeploymentState = "COMPLETE" + DeploymentStateRollingBack DeploymentState = "ROLLING_BACK" + DeploymentStateRolledBack DeploymentState = "ROLLED_BACK" ) // Values returns all known values for DeploymentState. Note that this can be @@ -74,10 +74,10 @@ type EnvironmentState string // Enum values for EnvironmentState const ( - EnvironmentStateReady_for_deployment EnvironmentState = "READY_FOR_DEPLOYMENT" - EnvironmentStateDeploying EnvironmentState = "DEPLOYING" - EnvironmentStateRolling_back EnvironmentState = "ROLLING_BACK" - EnvironmentStateRolled_back EnvironmentState = "ROLLED_BACK" + EnvironmentStateReadyForDeployment EnvironmentState = "READY_FOR_DEPLOYMENT" + EnvironmentStateDeploying EnvironmentState = "DEPLOYING" + EnvironmentStateRollingBack EnvironmentState = "ROLLING_BACK" + EnvironmentStateRolledBack EnvironmentState = "ROLLED_BACK" ) // Values returns all known values for EnvironmentState. Note that this can be @@ -114,8 +114,8 @@ type ReplicateTo string // Enum values for ReplicateTo const ( - ReplicateToNone ReplicateTo = "NONE" - ReplicateToSsm_document ReplicateTo = "SSM_DOCUMENT" + ReplicateToNone ReplicateTo = "NONE" + ReplicateToSsmDocument ReplicateTo = "SSM_DOCUMENT" ) // Values returns all known values for ReplicateTo. Note that this can be expanded @@ -132,10 +132,10 @@ type TriggeredBy string // Enum values for TriggeredBy const ( - TriggeredByUser TriggeredBy = "USER" - TriggeredByAppconfig TriggeredBy = "APPCONFIG" - TriggeredByCloudwatch_alarm TriggeredBy = "CLOUDWATCH_ALARM" - TriggeredByInternal_error TriggeredBy = "INTERNAL_ERROR" + TriggeredByUser TriggeredBy = "USER" + TriggeredByAppconfig TriggeredBy = "APPCONFIG" + TriggeredByCloudwatchAlarm TriggeredBy = "CLOUDWATCH_ALARM" + TriggeredByInternalError TriggeredBy = "INTERNAL_ERROR" ) // Values returns all known values for TriggeredBy. Note that this can be expanded @@ -154,8 +154,8 @@ type ValidatorType string // Enum values for ValidatorType const ( - ValidatorTypeJson_schema ValidatorType = "JSON_SCHEMA" - ValidatorTypeLambda ValidatorType = "LAMBDA" + ValidatorTypeJsonSchema ValidatorType = "JSON_SCHEMA" + ValidatorTypeLambda ValidatorType = "LAMBDA" ) // Values returns all known values for ValidatorType. Note that this can be diff --git a/service/appflow/doc.go b/service/appflow/doc.go index c91f99446aa..6bcec1e06b2 100644 --- a/service/appflow/doc.go +++ b/service/appflow/doc.go @@ -11,19 +11,19 @@ // and Amazon Redshift. Use the following links to get started on the Amazon // AppFlow API: // -// * Actions +// * Actions // (https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Operations.html): An // alphabetical list of all Amazon AppFlow API operations. // -// * Data types +// * Data types // (https://docs.aws.amazon.com/appflow/1.0/APIReference/API_Types.html): An // alphabetical list of all Amazon AppFlow data types. // -// * Common parameters +// * Common parameters // (https://docs.aws.amazon.com/appflow/1.0/APIReference/CommonParameters.html): // Parameters that all Query operations can use. // -// * Common errors +// * Common errors // (https://docs.aws.amazon.com/appflow/1.0/APIReference/CommonErrors.html): Client // and server errors that all operations can return. // diff --git a/service/appflow/types/enums.go b/service/appflow/types/enums.go index 54cb43ad963..924283b6d1e 100644 --- a/service/appflow/types/enums.go +++ b/service/appflow/types/enums.go @@ -6,8 +6,8 @@ type AggregationType string // Enum values for AggregationType const ( - AggregationTypeNone AggregationType = "None" - AggregationTypeSingle_file AggregationType = "SingleFile" + AggregationTypeNone AggregationType = "None" + AggregationTypeSingleFile AggregationType = "SingleFile" ) // Values returns all known values for AggregationType. Note that this can be @@ -106,21 +106,21 @@ type DatadogConnectorOperator string // Enum values for DatadogConnectorOperator const ( - DatadogConnectorOperatorProjection DatadogConnectorOperator = "PROJECTION" - DatadogConnectorOperatorBetween DatadogConnectorOperator = "BETWEEN" - DatadogConnectorOperatorEqual_to DatadogConnectorOperator = "EQUAL_TO" - DatadogConnectorOperatorAddition DatadogConnectorOperator = "ADDITION" - DatadogConnectorOperatorMultiplication DatadogConnectorOperator = "MULTIPLICATION" - DatadogConnectorOperatorDivision DatadogConnectorOperator = "DIVISION" - DatadogConnectorOperatorSubtraction DatadogConnectorOperator = "SUBTRACTION" - DatadogConnectorOperatorMask_all DatadogConnectorOperator = "MASK_ALL" - DatadogConnectorOperatorMask_first_n DatadogConnectorOperator = "MASK_FIRST_N" - DatadogConnectorOperatorMask_last_n DatadogConnectorOperator = "MASK_LAST_N" - DatadogConnectorOperatorValidate_non_null DatadogConnectorOperator = "VALIDATE_NON_NULL" - DatadogConnectorOperatorValidate_non_zero DatadogConnectorOperator = "VALIDATE_NON_ZERO" - DatadogConnectorOperatorValidate_non_negative DatadogConnectorOperator = "VALIDATE_NON_NEGATIVE" - DatadogConnectorOperatorValidate_numeric DatadogConnectorOperator = "VALIDATE_NUMERIC" - DatadogConnectorOperatorNo_op DatadogConnectorOperator = "NO_OP" + DatadogConnectorOperatorProjection DatadogConnectorOperator = "PROJECTION" + DatadogConnectorOperatorBetween DatadogConnectorOperator = "BETWEEN" + DatadogConnectorOperatorEqualTo DatadogConnectorOperator = "EQUAL_TO" + DatadogConnectorOperatorAddition DatadogConnectorOperator = "ADDITION" + DatadogConnectorOperatorMultiplication DatadogConnectorOperator = "MULTIPLICATION" + DatadogConnectorOperatorDivision DatadogConnectorOperator = "DIVISION" + DatadogConnectorOperatorSubtraction DatadogConnectorOperator = "SUBTRACTION" + DatadogConnectorOperatorMaskAll DatadogConnectorOperator = "MASK_ALL" + DatadogConnectorOperatorMaskFirstN DatadogConnectorOperator = "MASK_FIRST_N" + DatadogConnectorOperatorMaskLastN DatadogConnectorOperator = "MASK_LAST_N" + DatadogConnectorOperatorValidateNonNull DatadogConnectorOperator = "VALIDATE_NON_NULL" + DatadogConnectorOperatorValidateNonZero DatadogConnectorOperator = "VALIDATE_NON_ZERO" + DatadogConnectorOperatorValidateNonNegative DatadogConnectorOperator = "VALIDATE_NON_NEGATIVE" + DatadogConnectorOperatorValidateNumeric DatadogConnectorOperator = "VALIDATE_NUMERIC" + DatadogConnectorOperatorNoOp DatadogConnectorOperator = "NO_OP" ) // Values returns all known values for DatadogConnectorOperator. Note that this can @@ -168,21 +168,21 @@ type DynatraceConnectorOperator string // Enum values for DynatraceConnectorOperator const ( - DynatraceConnectorOperatorProjection DynatraceConnectorOperator = "PROJECTION" - DynatraceConnectorOperatorBetween DynatraceConnectorOperator = "BETWEEN" - DynatraceConnectorOperatorEqual_to DynatraceConnectorOperator = "EQUAL_TO" - DynatraceConnectorOperatorAddition DynatraceConnectorOperator = "ADDITION" - DynatraceConnectorOperatorMultiplication DynatraceConnectorOperator = "MULTIPLICATION" - DynatraceConnectorOperatorDivision DynatraceConnectorOperator = "DIVISION" - DynatraceConnectorOperatorSubtraction DynatraceConnectorOperator = "SUBTRACTION" - DynatraceConnectorOperatorMask_all DynatraceConnectorOperator = "MASK_ALL" - DynatraceConnectorOperatorMask_first_n DynatraceConnectorOperator = "MASK_FIRST_N" - DynatraceConnectorOperatorMask_last_n DynatraceConnectorOperator = "MASK_LAST_N" - DynatraceConnectorOperatorValidate_non_null DynatraceConnectorOperator = "VALIDATE_NON_NULL" - DynatraceConnectorOperatorValidate_non_zero DynatraceConnectorOperator = "VALIDATE_NON_ZERO" - DynatraceConnectorOperatorValidate_non_negative DynatraceConnectorOperator = "VALIDATE_NON_NEGATIVE" - DynatraceConnectorOperatorValidate_numeric DynatraceConnectorOperator = "VALIDATE_NUMERIC" - DynatraceConnectorOperatorNo_op DynatraceConnectorOperator = "NO_OP" + DynatraceConnectorOperatorProjection DynatraceConnectorOperator = "PROJECTION" + DynatraceConnectorOperatorBetween DynatraceConnectorOperator = "BETWEEN" + DynatraceConnectorOperatorEqualTo DynatraceConnectorOperator = "EQUAL_TO" + DynatraceConnectorOperatorAddition DynatraceConnectorOperator = "ADDITION" + DynatraceConnectorOperatorMultiplication DynatraceConnectorOperator = "MULTIPLICATION" + DynatraceConnectorOperatorDivision DynatraceConnectorOperator = "DIVISION" + DynatraceConnectorOperatorSubtraction DynatraceConnectorOperator = "SUBTRACTION" + DynatraceConnectorOperatorMaskAll DynatraceConnectorOperator = "MASK_ALL" + DynatraceConnectorOperatorMaskFirstN DynatraceConnectorOperator = "MASK_FIRST_N" + DynatraceConnectorOperatorMaskLastN DynatraceConnectorOperator = "MASK_LAST_N" + DynatraceConnectorOperatorValidateNonNull DynatraceConnectorOperator = "VALIDATE_NON_NULL" + DynatraceConnectorOperatorValidateNonZero DynatraceConnectorOperator = "VALIDATE_NON_ZERO" + DynatraceConnectorOperatorValidateNonNegative DynatraceConnectorOperator = "VALIDATE_NON_NEGATIVE" + DynatraceConnectorOperatorValidateNumeric DynatraceConnectorOperator = "VALIDATE_NUMERIC" + DynatraceConnectorOperatorNoOp DynatraceConnectorOperator = "NO_OP" ) // Values returns all known values for DynatraceConnectorOperator. Note that this @@ -297,21 +297,21 @@ type InforNexusConnectorOperator string // Enum values for InforNexusConnectorOperator const ( - InforNexusConnectorOperatorProjection InforNexusConnectorOperator = "PROJECTION" - InforNexusConnectorOperatorBetween InforNexusConnectorOperator = "BETWEEN" - InforNexusConnectorOperatorEqual_to InforNexusConnectorOperator = "EQUAL_TO" - InforNexusConnectorOperatorAddition InforNexusConnectorOperator = "ADDITION" - InforNexusConnectorOperatorMultiplication InforNexusConnectorOperator = "MULTIPLICATION" - InforNexusConnectorOperatorDivision InforNexusConnectorOperator = "DIVISION" - InforNexusConnectorOperatorSubtraction InforNexusConnectorOperator = "SUBTRACTION" - InforNexusConnectorOperatorMask_all InforNexusConnectorOperator = "MASK_ALL" - InforNexusConnectorOperatorMask_first_n InforNexusConnectorOperator = "MASK_FIRST_N" - InforNexusConnectorOperatorMask_last_n InforNexusConnectorOperator = "MASK_LAST_N" - InforNexusConnectorOperatorValidate_non_null InforNexusConnectorOperator = "VALIDATE_NON_NULL" - InforNexusConnectorOperatorValidate_non_zero InforNexusConnectorOperator = "VALIDATE_NON_ZERO" - InforNexusConnectorOperatorValidate_non_negative InforNexusConnectorOperator = "VALIDATE_NON_NEGATIVE" - InforNexusConnectorOperatorValidate_numeric InforNexusConnectorOperator = "VALIDATE_NUMERIC" - InforNexusConnectorOperatorNo_op InforNexusConnectorOperator = "NO_OP" + InforNexusConnectorOperatorProjection InforNexusConnectorOperator = "PROJECTION" + InforNexusConnectorOperatorBetween InforNexusConnectorOperator = "BETWEEN" + InforNexusConnectorOperatorEqualTo InforNexusConnectorOperator = "EQUAL_TO" + InforNexusConnectorOperatorAddition InforNexusConnectorOperator = "ADDITION" + InforNexusConnectorOperatorMultiplication InforNexusConnectorOperator = "MULTIPLICATION" + InforNexusConnectorOperatorDivision InforNexusConnectorOperator = "DIVISION" + InforNexusConnectorOperatorSubtraction InforNexusConnectorOperator = "SUBTRACTION" + InforNexusConnectorOperatorMaskAll InforNexusConnectorOperator = "MASK_ALL" + InforNexusConnectorOperatorMaskFirstN InforNexusConnectorOperator = "MASK_FIRST_N" + InforNexusConnectorOperatorMaskLastN InforNexusConnectorOperator = "MASK_LAST_N" + InforNexusConnectorOperatorValidateNonNull InforNexusConnectorOperator = "VALIDATE_NON_NULL" + InforNexusConnectorOperatorValidateNonZero InforNexusConnectorOperator = "VALIDATE_NON_ZERO" + InforNexusConnectorOperatorValidateNonNegative InforNexusConnectorOperator = "VALIDATE_NON_NEGATIVE" + InforNexusConnectorOperatorValidateNumeric InforNexusConnectorOperator = "VALIDATE_NUMERIC" + InforNexusConnectorOperatorNoOp InforNexusConnectorOperator = "NO_OP" ) // Values returns all known values for InforNexusConnectorOperator. Note that this @@ -341,22 +341,22 @@ type MarketoConnectorOperator string // Enum values for MarketoConnectorOperator const ( - MarketoConnectorOperatorProjection MarketoConnectorOperator = "PROJECTION" - MarketoConnectorOperatorLess_than MarketoConnectorOperator = "LESS_THAN" - MarketoConnectorOperatorGreater_than MarketoConnectorOperator = "GREATER_THAN" - MarketoConnectorOperatorBetween MarketoConnectorOperator = "BETWEEN" - MarketoConnectorOperatorAddition MarketoConnectorOperator = "ADDITION" - MarketoConnectorOperatorMultiplication MarketoConnectorOperator = "MULTIPLICATION" - MarketoConnectorOperatorDivision MarketoConnectorOperator = "DIVISION" - MarketoConnectorOperatorSubtraction MarketoConnectorOperator = "SUBTRACTION" - MarketoConnectorOperatorMask_all MarketoConnectorOperator = "MASK_ALL" - MarketoConnectorOperatorMask_first_n MarketoConnectorOperator = "MASK_FIRST_N" - MarketoConnectorOperatorMask_last_n MarketoConnectorOperator = "MASK_LAST_N" - MarketoConnectorOperatorValidate_non_null MarketoConnectorOperator = "VALIDATE_NON_NULL" - MarketoConnectorOperatorValidate_non_zero MarketoConnectorOperator = "VALIDATE_NON_ZERO" - MarketoConnectorOperatorValidate_non_negative MarketoConnectorOperator = "VALIDATE_NON_NEGATIVE" - MarketoConnectorOperatorValidate_numeric MarketoConnectorOperator = "VALIDATE_NUMERIC" - MarketoConnectorOperatorNo_op MarketoConnectorOperator = "NO_OP" + MarketoConnectorOperatorProjection MarketoConnectorOperator = "PROJECTION" + MarketoConnectorOperatorLessThan MarketoConnectorOperator = "LESS_THAN" + MarketoConnectorOperatorGreaterThan MarketoConnectorOperator = "GREATER_THAN" + MarketoConnectorOperatorBetween MarketoConnectorOperator = "BETWEEN" + MarketoConnectorOperatorAddition MarketoConnectorOperator = "ADDITION" + MarketoConnectorOperatorMultiplication MarketoConnectorOperator = "MULTIPLICATION" + MarketoConnectorOperatorDivision MarketoConnectorOperator = "DIVISION" + MarketoConnectorOperatorSubtraction MarketoConnectorOperator = "SUBTRACTION" + MarketoConnectorOperatorMaskAll MarketoConnectorOperator = "MASK_ALL" + MarketoConnectorOperatorMaskFirstN MarketoConnectorOperator = "MASK_FIRST_N" + MarketoConnectorOperatorMaskLastN MarketoConnectorOperator = "MASK_LAST_N" + MarketoConnectorOperatorValidateNonNull MarketoConnectorOperator = "VALIDATE_NON_NULL" + MarketoConnectorOperatorValidateNonZero MarketoConnectorOperator = "VALIDATE_NON_ZERO" + MarketoConnectorOperatorValidateNonNegative MarketoConnectorOperator = "VALIDATE_NON_NEGATIVE" + MarketoConnectorOperatorValidateNumeric MarketoConnectorOperator = "VALIDATE_NUMERIC" + MarketoConnectorOperatorNoOp MarketoConnectorOperator = "NO_OP" ) // Values returns all known values for MarketoConnectorOperator. Note that this can @@ -387,27 +387,27 @@ type Operator string // Enum values for Operator const ( - OperatorProjection Operator = "PROJECTION" - OperatorLess_than Operator = "LESS_THAN" - OperatorGreater_than Operator = "GREATER_THAN" - OperatorContains Operator = "CONTAINS" - OperatorBetween Operator = "BETWEEN" - OperatorLess_than_or_equal_to Operator = "LESS_THAN_OR_EQUAL_TO" - OperatorGreater_than_or_equal_to Operator = "GREATER_THAN_OR_EQUAL_TO" - OperatorEqual_to Operator = "EQUAL_TO" - OperatorNot_equal_to Operator = "NOT_EQUAL_TO" - OperatorAddition Operator = "ADDITION" - OperatorMultiplication Operator = "MULTIPLICATION" - OperatorDivision Operator = "DIVISION" - OperatorSubtraction Operator = "SUBTRACTION" - OperatorMask_all Operator = "MASK_ALL" - OperatorMask_first_n Operator = "MASK_FIRST_N" - OperatorMask_last_n Operator = "MASK_LAST_N" - OperatorValidate_non_null Operator = "VALIDATE_NON_NULL" - OperatorValidate_non_zero Operator = "VALIDATE_NON_ZERO" - OperatorValidate_non_negative Operator = "VALIDATE_NON_NEGATIVE" - OperatorValidate_numeric Operator = "VALIDATE_NUMERIC" - OperatorNo_op Operator = "NO_OP" + OperatorProjection Operator = "PROJECTION" + OperatorLessThan Operator = "LESS_THAN" + OperatorGreaterThan Operator = "GREATER_THAN" + OperatorContains Operator = "CONTAINS" + OperatorBetween Operator = "BETWEEN" + OperatorLessThanOrEqualTo Operator = "LESS_THAN_OR_EQUAL_TO" + OperatorGreaterThanOrEqualTo Operator = "GREATER_THAN_OR_EQUAL_TO" + OperatorEqualTo Operator = "EQUAL_TO" + OperatorNotEqualTo Operator = "NOT_EQUAL_TO" + OperatorAddition Operator = "ADDITION" + OperatorMultiplication Operator = "MULTIPLICATION" + OperatorDivision Operator = "DIVISION" + OperatorSubtraction Operator = "SUBTRACTION" + OperatorMaskAll Operator = "MASK_ALL" + OperatorMaskFirstN Operator = "MASK_FIRST_N" + OperatorMaskLastN Operator = "MASK_LAST_N" + OperatorValidateNonNull Operator = "VALIDATE_NON_NULL" + OperatorValidateNonZero Operator = "VALIDATE_NON_ZERO" + OperatorValidateNonNegative Operator = "VALIDATE_NON_NEGATIVE" + OperatorValidateNumeric Operator = "VALIDATE_NUMERIC" + OperatorNoOp Operator = "NO_OP" ) // Values returns all known values for Operator. Note that this can be expanded in @@ -443,20 +443,20 @@ type OperatorPropertiesKeys string // Enum values for OperatorPropertiesKeys const ( - OperatorPropertiesKeysValue OperatorPropertiesKeys = "VALUE" - OperatorPropertiesKeysValues OperatorPropertiesKeys = "VALUES" - OperatorPropertiesKeysData_type OperatorPropertiesKeys = "DATA_TYPE" - OperatorPropertiesKeysUpper_bound OperatorPropertiesKeys = "UPPER_BOUND" - OperatorPropertiesKeysLower_bound OperatorPropertiesKeys = "LOWER_BOUND" - OperatorPropertiesKeysSource_data_type OperatorPropertiesKeys = "SOURCE_DATA_TYPE" - OperatorPropertiesKeysDestination_data_type OperatorPropertiesKeys = "DESTINATION_DATA_TYPE" - OperatorPropertiesKeysValidation_action OperatorPropertiesKeys = "VALIDATION_ACTION" - OperatorPropertiesKeysMask_value OperatorPropertiesKeys = "MASK_VALUE" - OperatorPropertiesKeysMask_length OperatorPropertiesKeys = "MASK_LENGTH" - OperatorPropertiesKeysTruncate_length OperatorPropertiesKeys = "TRUNCATE_LENGTH" - OperatorPropertiesKeysMath_operation_fields_order OperatorPropertiesKeys = "MATH_OPERATION_FIELDS_ORDER" - OperatorPropertiesKeysConcat_format OperatorPropertiesKeys = "CONCAT_FORMAT" - OperatorPropertiesKeysSubfield_category_map OperatorPropertiesKeys = "SUBFIELD_CATEGORY_MAP" + OperatorPropertiesKeysValue OperatorPropertiesKeys = "VALUE" + OperatorPropertiesKeysValues OperatorPropertiesKeys = "VALUES" + OperatorPropertiesKeysDataType OperatorPropertiesKeys = "DATA_TYPE" + OperatorPropertiesKeysUpperBound OperatorPropertiesKeys = "UPPER_BOUND" + OperatorPropertiesKeysLowerBound OperatorPropertiesKeys = "LOWER_BOUND" + OperatorPropertiesKeysSourceDataType OperatorPropertiesKeys = "SOURCE_DATA_TYPE" + OperatorPropertiesKeysDestinationDataType OperatorPropertiesKeys = "DESTINATION_DATA_TYPE" + OperatorPropertiesKeysValidationAction OperatorPropertiesKeys = "VALIDATION_ACTION" + OperatorPropertiesKeysMaskValue OperatorPropertiesKeys = "MASK_VALUE" + OperatorPropertiesKeysMaskLength OperatorPropertiesKeys = "MASK_LENGTH" + OperatorPropertiesKeysTruncateLength OperatorPropertiesKeys = "TRUNCATE_LENGTH" + OperatorPropertiesKeysMathOperationFieldsOrder OperatorPropertiesKeys = "MATH_OPERATION_FIELDS_ORDER" + OperatorPropertiesKeysConcatFormat OperatorPropertiesKeys = "CONCAT_FORMAT" + OperatorPropertiesKeysSubfieldCategoryMap OperatorPropertiesKeys = "SUBFIELD_CATEGORY_MAP" ) // Values returns all known values for OperatorPropertiesKeys. Note that this can @@ -509,9 +509,9 @@ type PrefixType string // Enum values for PrefixType const ( - PrefixTypeFilename PrefixType = "FILENAME" - PrefixTypePath PrefixType = "PATH" - PrefixTypePath_and_filename PrefixType = "PATH_AND_FILENAME" + PrefixTypeFilename PrefixType = "FILENAME" + PrefixTypePath PrefixType = "PATH" + PrefixTypePathAndFilename PrefixType = "PATH_AND_FILENAME" ) // Values returns all known values for PrefixType. Note that this can be expanded @@ -529,26 +529,26 @@ type S3ConnectorOperator string // Enum values for S3ConnectorOperator const ( - S3ConnectorOperatorProjection S3ConnectorOperator = "PROJECTION" - S3ConnectorOperatorLess_than S3ConnectorOperator = "LESS_THAN" - S3ConnectorOperatorGreater_than S3ConnectorOperator = "GREATER_THAN" - S3ConnectorOperatorBetween S3ConnectorOperator = "BETWEEN" - S3ConnectorOperatorLess_than_or_equal_to S3ConnectorOperator = "LESS_THAN_OR_EQUAL_TO" - S3ConnectorOperatorGreater_than_or_equal_to S3ConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" - S3ConnectorOperatorEqual_to S3ConnectorOperator = "EQUAL_TO" - S3ConnectorOperatorNot_equal_to S3ConnectorOperator = "NOT_EQUAL_TO" - S3ConnectorOperatorAddition S3ConnectorOperator = "ADDITION" - S3ConnectorOperatorMultiplication S3ConnectorOperator = "MULTIPLICATION" - S3ConnectorOperatorDivision S3ConnectorOperator = "DIVISION" - S3ConnectorOperatorSubtraction S3ConnectorOperator = "SUBTRACTION" - S3ConnectorOperatorMask_all S3ConnectorOperator = "MASK_ALL" - S3ConnectorOperatorMask_first_n S3ConnectorOperator = "MASK_FIRST_N" - S3ConnectorOperatorMask_last_n S3ConnectorOperator = "MASK_LAST_N" - S3ConnectorOperatorValidate_non_null S3ConnectorOperator = "VALIDATE_NON_NULL" - S3ConnectorOperatorValidate_non_zero S3ConnectorOperator = "VALIDATE_NON_ZERO" - S3ConnectorOperatorValidate_non_negative S3ConnectorOperator = "VALIDATE_NON_NEGATIVE" - S3ConnectorOperatorValidate_numeric S3ConnectorOperator = "VALIDATE_NUMERIC" - S3ConnectorOperatorNo_op S3ConnectorOperator = "NO_OP" + S3ConnectorOperatorProjection S3ConnectorOperator = "PROJECTION" + S3ConnectorOperatorLessThan S3ConnectorOperator = "LESS_THAN" + S3ConnectorOperatorGreaterThan S3ConnectorOperator = "GREATER_THAN" + S3ConnectorOperatorBetween S3ConnectorOperator = "BETWEEN" + S3ConnectorOperatorLessThanOrEqualTo S3ConnectorOperator = "LESS_THAN_OR_EQUAL_TO" + S3ConnectorOperatorGreaterThanOrEqualTo S3ConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" + S3ConnectorOperatorEqualTo S3ConnectorOperator = "EQUAL_TO" + S3ConnectorOperatorNotEqualTo S3ConnectorOperator = "NOT_EQUAL_TO" + S3ConnectorOperatorAddition S3ConnectorOperator = "ADDITION" + S3ConnectorOperatorMultiplication S3ConnectorOperator = "MULTIPLICATION" + S3ConnectorOperatorDivision S3ConnectorOperator = "DIVISION" + S3ConnectorOperatorSubtraction S3ConnectorOperator = "SUBTRACTION" + S3ConnectorOperatorMaskAll S3ConnectorOperator = "MASK_ALL" + S3ConnectorOperatorMaskFirstN S3ConnectorOperator = "MASK_FIRST_N" + S3ConnectorOperatorMaskLastN S3ConnectorOperator = "MASK_LAST_N" + S3ConnectorOperatorValidateNonNull S3ConnectorOperator = "VALIDATE_NON_NULL" + S3ConnectorOperatorValidateNonZero S3ConnectorOperator = "VALIDATE_NON_ZERO" + S3ConnectorOperatorValidateNonNegative S3ConnectorOperator = "VALIDATE_NON_NEGATIVE" + S3ConnectorOperatorValidateNumeric S3ConnectorOperator = "VALIDATE_NUMERIC" + S3ConnectorOperatorNoOp S3ConnectorOperator = "NO_OP" ) // Values returns all known values for S3ConnectorOperator. Note that this can be @@ -583,27 +583,27 @@ type SalesforceConnectorOperator string // Enum values for SalesforceConnectorOperator const ( - SalesforceConnectorOperatorProjection SalesforceConnectorOperator = "PROJECTION" - SalesforceConnectorOperatorLess_than SalesforceConnectorOperator = "LESS_THAN" - SalesforceConnectorOperatorContains SalesforceConnectorOperator = "CONTAINS" - SalesforceConnectorOperatorGreater_than SalesforceConnectorOperator = "GREATER_THAN" - SalesforceConnectorOperatorBetween SalesforceConnectorOperator = "BETWEEN" - SalesforceConnectorOperatorLess_than_or_equal_to SalesforceConnectorOperator = "LESS_THAN_OR_EQUAL_TO" - SalesforceConnectorOperatorGreater_than_or_equal_to SalesforceConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" - SalesforceConnectorOperatorEqual_to SalesforceConnectorOperator = "EQUAL_TO" - SalesforceConnectorOperatorNot_equal_to SalesforceConnectorOperator = "NOT_EQUAL_TO" - SalesforceConnectorOperatorAddition SalesforceConnectorOperator = "ADDITION" - SalesforceConnectorOperatorMultiplication SalesforceConnectorOperator = "MULTIPLICATION" - SalesforceConnectorOperatorDivision SalesforceConnectorOperator = "DIVISION" - SalesforceConnectorOperatorSubtraction SalesforceConnectorOperator = "SUBTRACTION" - SalesforceConnectorOperatorMask_all SalesforceConnectorOperator = "MASK_ALL" - SalesforceConnectorOperatorMask_first_n SalesforceConnectorOperator = "MASK_FIRST_N" - SalesforceConnectorOperatorMask_last_n SalesforceConnectorOperator = "MASK_LAST_N" - SalesforceConnectorOperatorValidate_non_null SalesforceConnectorOperator = "VALIDATE_NON_NULL" - SalesforceConnectorOperatorValidate_non_zero SalesforceConnectorOperator = "VALIDATE_NON_ZERO" - SalesforceConnectorOperatorValidate_non_negative SalesforceConnectorOperator = "VALIDATE_NON_NEGATIVE" - SalesforceConnectorOperatorValidate_numeric SalesforceConnectorOperator = "VALIDATE_NUMERIC" - SalesforceConnectorOperatorNo_op SalesforceConnectorOperator = "NO_OP" + SalesforceConnectorOperatorProjection SalesforceConnectorOperator = "PROJECTION" + SalesforceConnectorOperatorLessThan SalesforceConnectorOperator = "LESS_THAN" + SalesforceConnectorOperatorContains SalesforceConnectorOperator = "CONTAINS" + SalesforceConnectorOperatorGreaterThan SalesforceConnectorOperator = "GREATER_THAN" + SalesforceConnectorOperatorBetween SalesforceConnectorOperator = "BETWEEN" + SalesforceConnectorOperatorLessThanOrEqualTo SalesforceConnectorOperator = "LESS_THAN_OR_EQUAL_TO" + SalesforceConnectorOperatorGreaterThanOrEqualTo SalesforceConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" + SalesforceConnectorOperatorEqualTo SalesforceConnectorOperator = "EQUAL_TO" + SalesforceConnectorOperatorNotEqualTo SalesforceConnectorOperator = "NOT_EQUAL_TO" + SalesforceConnectorOperatorAddition SalesforceConnectorOperator = "ADDITION" + SalesforceConnectorOperatorMultiplication SalesforceConnectorOperator = "MULTIPLICATION" + SalesforceConnectorOperatorDivision SalesforceConnectorOperator = "DIVISION" + SalesforceConnectorOperatorSubtraction SalesforceConnectorOperator = "SUBTRACTION" + SalesforceConnectorOperatorMaskAll SalesforceConnectorOperator = "MASK_ALL" + SalesforceConnectorOperatorMaskFirstN SalesforceConnectorOperator = "MASK_FIRST_N" + SalesforceConnectorOperatorMaskLastN SalesforceConnectorOperator = "MASK_LAST_N" + SalesforceConnectorOperatorValidateNonNull SalesforceConnectorOperator = "VALIDATE_NON_NULL" + SalesforceConnectorOperatorValidateNonZero SalesforceConnectorOperator = "VALIDATE_NON_ZERO" + SalesforceConnectorOperatorValidateNonNegative SalesforceConnectorOperator = "VALIDATE_NON_NEGATIVE" + SalesforceConnectorOperatorValidateNumeric SalesforceConnectorOperator = "VALIDATE_NUMERIC" + SalesforceConnectorOperatorNoOp SalesforceConnectorOperator = "NO_OP" ) // Values returns all known values for SalesforceConnectorOperator. Note that this @@ -665,27 +665,27 @@ type ServiceNowConnectorOperator string // Enum values for ServiceNowConnectorOperator const ( - ServiceNowConnectorOperatorProjection ServiceNowConnectorOperator = "PROJECTION" - ServiceNowConnectorOperatorContains ServiceNowConnectorOperator = "CONTAINS" - ServiceNowConnectorOperatorLess_than ServiceNowConnectorOperator = "LESS_THAN" - ServiceNowConnectorOperatorGreater_than ServiceNowConnectorOperator = "GREATER_THAN" - ServiceNowConnectorOperatorBetween ServiceNowConnectorOperator = "BETWEEN" - ServiceNowConnectorOperatorLess_than_or_equal_to ServiceNowConnectorOperator = "LESS_THAN_OR_EQUAL_TO" - ServiceNowConnectorOperatorGreater_than_or_equal_to ServiceNowConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" - ServiceNowConnectorOperatorEqual_to ServiceNowConnectorOperator = "EQUAL_TO" - ServiceNowConnectorOperatorNot_equal_to ServiceNowConnectorOperator = "NOT_EQUAL_TO" - ServiceNowConnectorOperatorAddition ServiceNowConnectorOperator = "ADDITION" - ServiceNowConnectorOperatorMultiplication ServiceNowConnectorOperator = "MULTIPLICATION" - ServiceNowConnectorOperatorDivision ServiceNowConnectorOperator = "DIVISION" - ServiceNowConnectorOperatorSubtraction ServiceNowConnectorOperator = "SUBTRACTION" - ServiceNowConnectorOperatorMask_all ServiceNowConnectorOperator = "MASK_ALL" - ServiceNowConnectorOperatorMask_first_n ServiceNowConnectorOperator = "MASK_FIRST_N" - ServiceNowConnectorOperatorMask_last_n ServiceNowConnectorOperator = "MASK_LAST_N" - ServiceNowConnectorOperatorValidate_non_null ServiceNowConnectorOperator = "VALIDATE_NON_NULL" - ServiceNowConnectorOperatorValidate_non_zero ServiceNowConnectorOperator = "VALIDATE_NON_ZERO" - ServiceNowConnectorOperatorValidate_non_negative ServiceNowConnectorOperator = "VALIDATE_NON_NEGATIVE" - ServiceNowConnectorOperatorValidate_numeric ServiceNowConnectorOperator = "VALIDATE_NUMERIC" - ServiceNowConnectorOperatorNo_op ServiceNowConnectorOperator = "NO_OP" + ServiceNowConnectorOperatorProjection ServiceNowConnectorOperator = "PROJECTION" + ServiceNowConnectorOperatorContains ServiceNowConnectorOperator = "CONTAINS" + ServiceNowConnectorOperatorLessThan ServiceNowConnectorOperator = "LESS_THAN" + ServiceNowConnectorOperatorGreaterThan ServiceNowConnectorOperator = "GREATER_THAN" + ServiceNowConnectorOperatorBetween ServiceNowConnectorOperator = "BETWEEN" + ServiceNowConnectorOperatorLessThanOrEqualTo ServiceNowConnectorOperator = "LESS_THAN_OR_EQUAL_TO" + ServiceNowConnectorOperatorGreaterThanOrEqualTo ServiceNowConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" + ServiceNowConnectorOperatorEqualTo ServiceNowConnectorOperator = "EQUAL_TO" + ServiceNowConnectorOperatorNotEqualTo ServiceNowConnectorOperator = "NOT_EQUAL_TO" + ServiceNowConnectorOperatorAddition ServiceNowConnectorOperator = "ADDITION" + ServiceNowConnectorOperatorMultiplication ServiceNowConnectorOperator = "MULTIPLICATION" + ServiceNowConnectorOperatorDivision ServiceNowConnectorOperator = "DIVISION" + ServiceNowConnectorOperatorSubtraction ServiceNowConnectorOperator = "SUBTRACTION" + ServiceNowConnectorOperatorMaskAll ServiceNowConnectorOperator = "MASK_ALL" + ServiceNowConnectorOperatorMaskFirstN ServiceNowConnectorOperator = "MASK_FIRST_N" + ServiceNowConnectorOperatorMaskLastN ServiceNowConnectorOperator = "MASK_LAST_N" + ServiceNowConnectorOperatorValidateNonNull ServiceNowConnectorOperator = "VALIDATE_NON_NULL" + ServiceNowConnectorOperatorValidateNonZero ServiceNowConnectorOperator = "VALIDATE_NON_ZERO" + ServiceNowConnectorOperatorValidateNonNegative ServiceNowConnectorOperator = "VALIDATE_NON_NEGATIVE" + ServiceNowConnectorOperatorValidateNumeric ServiceNowConnectorOperator = "VALIDATE_NUMERIC" + ServiceNowConnectorOperatorNoOp ServiceNowConnectorOperator = "NO_OP" ) // Values returns all known values for ServiceNowConnectorOperator. Note that this @@ -721,20 +721,20 @@ type SingularConnectorOperator string // Enum values for SingularConnectorOperator const ( - SingularConnectorOperatorProjection SingularConnectorOperator = "PROJECTION" - SingularConnectorOperatorEqual_to SingularConnectorOperator = "EQUAL_TO" - SingularConnectorOperatorAddition SingularConnectorOperator = "ADDITION" - SingularConnectorOperatorMultiplication SingularConnectorOperator = "MULTIPLICATION" - SingularConnectorOperatorDivision SingularConnectorOperator = "DIVISION" - SingularConnectorOperatorSubtraction SingularConnectorOperator = "SUBTRACTION" - SingularConnectorOperatorMask_all SingularConnectorOperator = "MASK_ALL" - SingularConnectorOperatorMask_first_n SingularConnectorOperator = "MASK_FIRST_N" - SingularConnectorOperatorMask_last_n SingularConnectorOperator = "MASK_LAST_N" - SingularConnectorOperatorValidate_non_null SingularConnectorOperator = "VALIDATE_NON_NULL" - SingularConnectorOperatorValidate_non_zero SingularConnectorOperator = "VALIDATE_NON_ZERO" - SingularConnectorOperatorValidate_non_negative SingularConnectorOperator = "VALIDATE_NON_NEGATIVE" - SingularConnectorOperatorValidate_numeric SingularConnectorOperator = "VALIDATE_NUMERIC" - SingularConnectorOperatorNo_op SingularConnectorOperator = "NO_OP" + SingularConnectorOperatorProjection SingularConnectorOperator = "PROJECTION" + SingularConnectorOperatorEqualTo SingularConnectorOperator = "EQUAL_TO" + SingularConnectorOperatorAddition SingularConnectorOperator = "ADDITION" + SingularConnectorOperatorMultiplication SingularConnectorOperator = "MULTIPLICATION" + SingularConnectorOperatorDivision SingularConnectorOperator = "DIVISION" + SingularConnectorOperatorSubtraction SingularConnectorOperator = "SUBTRACTION" + SingularConnectorOperatorMaskAll SingularConnectorOperator = "MASK_ALL" + SingularConnectorOperatorMaskFirstN SingularConnectorOperator = "MASK_FIRST_N" + SingularConnectorOperatorMaskLastN SingularConnectorOperator = "MASK_LAST_N" + SingularConnectorOperatorValidateNonNull SingularConnectorOperator = "VALIDATE_NON_NULL" + SingularConnectorOperatorValidateNonZero SingularConnectorOperator = "VALIDATE_NON_ZERO" + SingularConnectorOperatorValidateNonNegative SingularConnectorOperator = "VALIDATE_NON_NEGATIVE" + SingularConnectorOperatorValidateNumeric SingularConnectorOperator = "VALIDATE_NUMERIC" + SingularConnectorOperatorNoOp SingularConnectorOperator = "NO_OP" ) // Values returns all known values for SingularConnectorOperator. Note that this @@ -763,25 +763,25 @@ type SlackConnectorOperator string // Enum values for SlackConnectorOperator const ( - SlackConnectorOperatorProjection SlackConnectorOperator = "PROJECTION" - SlackConnectorOperatorLess_than SlackConnectorOperator = "LESS_THAN" - SlackConnectorOperatorGreater_than SlackConnectorOperator = "GREATER_THAN" - SlackConnectorOperatorBetween SlackConnectorOperator = "BETWEEN" - SlackConnectorOperatorLess_than_or_equal_to SlackConnectorOperator = "LESS_THAN_OR_EQUAL_TO" - SlackConnectorOperatorGreater_than_or_equal_to SlackConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" - SlackConnectorOperatorEqual_to SlackConnectorOperator = "EQUAL_TO" - SlackConnectorOperatorAddition SlackConnectorOperator = "ADDITION" - SlackConnectorOperatorMultiplication SlackConnectorOperator = "MULTIPLICATION" - SlackConnectorOperatorDivision SlackConnectorOperator = "DIVISION" - SlackConnectorOperatorSubtraction SlackConnectorOperator = "SUBTRACTION" - SlackConnectorOperatorMask_all SlackConnectorOperator = "MASK_ALL" - SlackConnectorOperatorMask_first_n SlackConnectorOperator = "MASK_FIRST_N" - SlackConnectorOperatorMask_last_n SlackConnectorOperator = "MASK_LAST_N" - SlackConnectorOperatorValidate_non_null SlackConnectorOperator = "VALIDATE_NON_NULL" - SlackConnectorOperatorValidate_non_zero SlackConnectorOperator = "VALIDATE_NON_ZERO" - SlackConnectorOperatorValidate_non_negative SlackConnectorOperator = "VALIDATE_NON_NEGATIVE" - SlackConnectorOperatorValidate_numeric SlackConnectorOperator = "VALIDATE_NUMERIC" - SlackConnectorOperatorNo_op SlackConnectorOperator = "NO_OP" + SlackConnectorOperatorProjection SlackConnectorOperator = "PROJECTION" + SlackConnectorOperatorLessThan SlackConnectorOperator = "LESS_THAN" + SlackConnectorOperatorGreaterThan SlackConnectorOperator = "GREATER_THAN" + SlackConnectorOperatorBetween SlackConnectorOperator = "BETWEEN" + SlackConnectorOperatorLessThanOrEqualTo SlackConnectorOperator = "LESS_THAN_OR_EQUAL_TO" + SlackConnectorOperatorGreaterThanOrEqualTo SlackConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" + SlackConnectorOperatorEqualTo SlackConnectorOperator = "EQUAL_TO" + SlackConnectorOperatorAddition SlackConnectorOperator = "ADDITION" + SlackConnectorOperatorMultiplication SlackConnectorOperator = "MULTIPLICATION" + SlackConnectorOperatorDivision SlackConnectorOperator = "DIVISION" + SlackConnectorOperatorSubtraction SlackConnectorOperator = "SUBTRACTION" + SlackConnectorOperatorMaskAll SlackConnectorOperator = "MASK_ALL" + SlackConnectorOperatorMaskFirstN SlackConnectorOperator = "MASK_FIRST_N" + SlackConnectorOperatorMaskLastN SlackConnectorOperator = "MASK_LAST_N" + SlackConnectorOperatorValidateNonNull SlackConnectorOperator = "VALIDATE_NON_NULL" + SlackConnectorOperatorValidateNonZero SlackConnectorOperator = "VALIDATE_NON_ZERO" + SlackConnectorOperatorValidateNonNegative SlackConnectorOperator = "VALIDATE_NON_NEGATIVE" + SlackConnectorOperatorValidateNumeric SlackConnectorOperator = "VALIDATE_NUMERIC" + SlackConnectorOperatorNoOp SlackConnectorOperator = "NO_OP" ) // Values returns all known values for SlackConnectorOperator. Note that this can @@ -843,20 +843,20 @@ type TrendmicroConnectorOperator string // Enum values for TrendmicroConnectorOperator const ( - TrendmicroConnectorOperatorProjection TrendmicroConnectorOperator = "PROJECTION" - TrendmicroConnectorOperatorEqual_to TrendmicroConnectorOperator = "EQUAL_TO" - TrendmicroConnectorOperatorAddition TrendmicroConnectorOperator = "ADDITION" - TrendmicroConnectorOperatorMultiplication TrendmicroConnectorOperator = "MULTIPLICATION" - TrendmicroConnectorOperatorDivision TrendmicroConnectorOperator = "DIVISION" - TrendmicroConnectorOperatorSubtraction TrendmicroConnectorOperator = "SUBTRACTION" - TrendmicroConnectorOperatorMask_all TrendmicroConnectorOperator = "MASK_ALL" - TrendmicroConnectorOperatorMask_first_n TrendmicroConnectorOperator = "MASK_FIRST_N" - TrendmicroConnectorOperatorMask_last_n TrendmicroConnectorOperator = "MASK_LAST_N" - TrendmicroConnectorOperatorValidate_non_null TrendmicroConnectorOperator = "VALIDATE_NON_NULL" - TrendmicroConnectorOperatorValidate_non_zero TrendmicroConnectorOperator = "VALIDATE_NON_ZERO" - TrendmicroConnectorOperatorValidate_non_negative TrendmicroConnectorOperator = "VALIDATE_NON_NEGATIVE" - TrendmicroConnectorOperatorValidate_numeric TrendmicroConnectorOperator = "VALIDATE_NUMERIC" - TrendmicroConnectorOperatorNo_op TrendmicroConnectorOperator = "NO_OP" + TrendmicroConnectorOperatorProjection TrendmicroConnectorOperator = "PROJECTION" + TrendmicroConnectorOperatorEqualTo TrendmicroConnectorOperator = "EQUAL_TO" + TrendmicroConnectorOperatorAddition TrendmicroConnectorOperator = "ADDITION" + TrendmicroConnectorOperatorMultiplication TrendmicroConnectorOperator = "MULTIPLICATION" + TrendmicroConnectorOperatorDivision TrendmicroConnectorOperator = "DIVISION" + TrendmicroConnectorOperatorSubtraction TrendmicroConnectorOperator = "SUBTRACTION" + TrendmicroConnectorOperatorMaskAll TrendmicroConnectorOperator = "MASK_ALL" + TrendmicroConnectorOperatorMaskFirstN TrendmicroConnectorOperator = "MASK_FIRST_N" + TrendmicroConnectorOperatorMaskLastN TrendmicroConnectorOperator = "MASK_LAST_N" + TrendmicroConnectorOperatorValidateNonNull TrendmicroConnectorOperator = "VALIDATE_NON_NULL" + TrendmicroConnectorOperatorValidateNonZero TrendmicroConnectorOperator = "VALIDATE_NON_ZERO" + TrendmicroConnectorOperatorValidateNonNegative TrendmicroConnectorOperator = "VALIDATE_NON_NEGATIVE" + TrendmicroConnectorOperatorValidateNumeric TrendmicroConnectorOperator = "VALIDATE_NUMERIC" + TrendmicroConnectorOperatorNoOp TrendmicroConnectorOperator = "NO_OP" ) // Values returns all known values for TrendmicroConnectorOperator. Note that this @@ -905,27 +905,27 @@ type VeevaConnectorOperator string // Enum values for VeevaConnectorOperator const ( - VeevaConnectorOperatorProjection VeevaConnectorOperator = "PROJECTION" - VeevaConnectorOperatorLess_than VeevaConnectorOperator = "LESS_THAN" - VeevaConnectorOperatorGreater_than VeevaConnectorOperator = "GREATER_THAN" - VeevaConnectorOperatorContains VeevaConnectorOperator = "CONTAINS" - VeevaConnectorOperatorBetween VeevaConnectorOperator = "BETWEEN" - VeevaConnectorOperatorLess_than_or_equal_to VeevaConnectorOperator = "LESS_THAN_OR_EQUAL_TO" - VeevaConnectorOperatorGreater_than_or_equal_to VeevaConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" - VeevaConnectorOperatorEqual_to VeevaConnectorOperator = "EQUAL_TO" - VeevaConnectorOperatorNot_equal_to VeevaConnectorOperator = "NOT_EQUAL_TO" - VeevaConnectorOperatorAddition VeevaConnectorOperator = "ADDITION" - VeevaConnectorOperatorMultiplication VeevaConnectorOperator = "MULTIPLICATION" - VeevaConnectorOperatorDivision VeevaConnectorOperator = "DIVISION" - VeevaConnectorOperatorSubtraction VeevaConnectorOperator = "SUBTRACTION" - VeevaConnectorOperatorMask_all VeevaConnectorOperator = "MASK_ALL" - VeevaConnectorOperatorMask_first_n VeevaConnectorOperator = "MASK_FIRST_N" - VeevaConnectorOperatorMask_last_n VeevaConnectorOperator = "MASK_LAST_N" - VeevaConnectorOperatorValidate_non_null VeevaConnectorOperator = "VALIDATE_NON_NULL" - VeevaConnectorOperatorValidate_non_zero VeevaConnectorOperator = "VALIDATE_NON_ZERO" - VeevaConnectorOperatorValidate_non_negative VeevaConnectorOperator = "VALIDATE_NON_NEGATIVE" - VeevaConnectorOperatorValidate_numeric VeevaConnectorOperator = "VALIDATE_NUMERIC" - VeevaConnectorOperatorNo_op VeevaConnectorOperator = "NO_OP" + VeevaConnectorOperatorProjection VeevaConnectorOperator = "PROJECTION" + VeevaConnectorOperatorLessThan VeevaConnectorOperator = "LESS_THAN" + VeevaConnectorOperatorGreaterThan VeevaConnectorOperator = "GREATER_THAN" + VeevaConnectorOperatorContains VeevaConnectorOperator = "CONTAINS" + VeevaConnectorOperatorBetween VeevaConnectorOperator = "BETWEEN" + VeevaConnectorOperatorLessThanOrEqualTo VeevaConnectorOperator = "LESS_THAN_OR_EQUAL_TO" + VeevaConnectorOperatorGreaterThanOrEqualTo VeevaConnectorOperator = "GREATER_THAN_OR_EQUAL_TO" + VeevaConnectorOperatorEqualTo VeevaConnectorOperator = "EQUAL_TO" + VeevaConnectorOperatorNotEqualTo VeevaConnectorOperator = "NOT_EQUAL_TO" + VeevaConnectorOperatorAddition VeevaConnectorOperator = "ADDITION" + VeevaConnectorOperatorMultiplication VeevaConnectorOperator = "MULTIPLICATION" + VeevaConnectorOperatorDivision VeevaConnectorOperator = "DIVISION" + VeevaConnectorOperatorSubtraction VeevaConnectorOperator = "SUBTRACTION" + VeevaConnectorOperatorMaskAll VeevaConnectorOperator = "MASK_ALL" + VeevaConnectorOperatorMaskFirstN VeevaConnectorOperator = "MASK_FIRST_N" + VeevaConnectorOperatorMaskLastN VeevaConnectorOperator = "MASK_LAST_N" + VeevaConnectorOperatorValidateNonNull VeevaConnectorOperator = "VALIDATE_NON_NULL" + VeevaConnectorOperatorValidateNonZero VeevaConnectorOperator = "VALIDATE_NON_ZERO" + VeevaConnectorOperatorValidateNonNegative VeevaConnectorOperator = "VALIDATE_NON_NEGATIVE" + VeevaConnectorOperatorValidateNumeric VeevaConnectorOperator = "VALIDATE_NUMERIC" + VeevaConnectorOperatorNoOp VeevaConnectorOperator = "NO_OP" ) // Values returns all known values for VeevaConnectorOperator. Note that this can @@ -981,20 +981,20 @@ type ZendeskConnectorOperator string // Enum values for ZendeskConnectorOperator const ( - ZendeskConnectorOperatorProjection ZendeskConnectorOperator = "PROJECTION" - ZendeskConnectorOperatorGreater_than ZendeskConnectorOperator = "GREATER_THAN" - ZendeskConnectorOperatorAddition ZendeskConnectorOperator = "ADDITION" - ZendeskConnectorOperatorMultiplication ZendeskConnectorOperator = "MULTIPLICATION" - ZendeskConnectorOperatorDivision ZendeskConnectorOperator = "DIVISION" - ZendeskConnectorOperatorSubtraction ZendeskConnectorOperator = "SUBTRACTION" - ZendeskConnectorOperatorMask_all ZendeskConnectorOperator = "MASK_ALL" - ZendeskConnectorOperatorMask_first_n ZendeskConnectorOperator = "MASK_FIRST_N" - ZendeskConnectorOperatorMask_last_n ZendeskConnectorOperator = "MASK_LAST_N" - ZendeskConnectorOperatorValidate_non_null ZendeskConnectorOperator = "VALIDATE_NON_NULL" - ZendeskConnectorOperatorValidate_non_zero ZendeskConnectorOperator = "VALIDATE_NON_ZERO" - ZendeskConnectorOperatorValidate_non_negative ZendeskConnectorOperator = "VALIDATE_NON_NEGATIVE" - ZendeskConnectorOperatorValidate_numeric ZendeskConnectorOperator = "VALIDATE_NUMERIC" - ZendeskConnectorOperatorNo_op ZendeskConnectorOperator = "NO_OP" + ZendeskConnectorOperatorProjection ZendeskConnectorOperator = "PROJECTION" + ZendeskConnectorOperatorGreaterThan ZendeskConnectorOperator = "GREATER_THAN" + ZendeskConnectorOperatorAddition ZendeskConnectorOperator = "ADDITION" + ZendeskConnectorOperatorMultiplication ZendeskConnectorOperator = "MULTIPLICATION" + ZendeskConnectorOperatorDivision ZendeskConnectorOperator = "DIVISION" + ZendeskConnectorOperatorSubtraction ZendeskConnectorOperator = "SUBTRACTION" + ZendeskConnectorOperatorMaskAll ZendeskConnectorOperator = "MASK_ALL" + ZendeskConnectorOperatorMaskFirstN ZendeskConnectorOperator = "MASK_FIRST_N" + ZendeskConnectorOperatorMaskLastN ZendeskConnectorOperator = "MASK_LAST_N" + ZendeskConnectorOperatorValidateNonNull ZendeskConnectorOperator = "VALIDATE_NON_NULL" + ZendeskConnectorOperatorValidateNonZero ZendeskConnectorOperator = "VALIDATE_NON_ZERO" + ZendeskConnectorOperatorValidateNonNegative ZendeskConnectorOperator = "VALIDATE_NON_NEGATIVE" + ZendeskConnectorOperatorValidateNumeric ZendeskConnectorOperator = "VALIDATE_NUMERIC" + ZendeskConnectorOperatorNoOp ZendeskConnectorOperator = "NO_OP" ) // Values returns all known values for ZendeskConnectorOperator. Note that this can diff --git a/service/applicationautoscaling/api_op_DeleteScalingPolicy.go b/service/applicationautoscaling/api_op_DeleteScalingPolicy.go index ec2f212e6d1..8adf67cdfe8 100644 --- a/service/applicationautoscaling/api_op_DeleteScalingPolicy.go +++ b/service/applicationautoscaling/api_op_DeleteScalingPolicy.go @@ -45,68 +45,68 @@ type DeleteScalingPolicyInput struct { // The identifier of the resource associated with the scalable target. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -115,66 +115,65 @@ type DeleteScalingPolicyInput struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension types.ScalableDimension diff --git a/service/applicationautoscaling/api_op_DeleteScheduledAction.go b/service/applicationautoscaling/api_op_DeleteScheduledAction.go index 0671bbe4676..813db45a6d1 100644 --- a/service/applicationautoscaling/api_op_DeleteScheduledAction.go +++ b/service/applicationautoscaling/api_op_DeleteScheduledAction.go @@ -35,68 +35,68 @@ type DeleteScheduledActionInput struct { // The identifier of the resource associated with the scheduled action. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -105,66 +105,65 @@ type DeleteScheduledActionInput struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension types.ScalableDimension diff --git a/service/applicationautoscaling/api_op_DeregisterScalableTarget.go b/service/applicationautoscaling/api_op_DeregisterScalableTarget.go index 08e5d5904f4..c6787a89ad3 100644 --- a/service/applicationautoscaling/api_op_DeregisterScalableTarget.go +++ b/service/applicationautoscaling/api_op_DeregisterScalableTarget.go @@ -37,68 +37,68 @@ type DeregisterScalableTargetInput struct { // The identifier of the resource associated with the scalable target. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -107,66 +107,65 @@ type DeregisterScalableTargetInput struct { // The scalable dimension associated with the scalable target. This string consists // of the service namespace, resource type, and scaling property. // - // * + // * // ecs:service:DesiredCount - The desired task count of an ECS service. // - // * + // * // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet // request. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance - // count of an EMR Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The instance count of + // an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The - // desired capacity of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity + // of an AppStream 2.0 fleet. // - // * - // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // table. + // * dynamodb:table:ReadCapacityUnits - The provisioned + // read capacity for a DynamoDB table. // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity - // for a DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension types.ScalableDimension diff --git a/service/applicationautoscaling/api_op_DescribeScalableTargets.go b/service/applicationautoscaling/api_op_DescribeScalableTargets.go index 2c9f230c845..83ca2831d5e 100644 --- a/service/applicationautoscaling/api_op_DescribeScalableTargets.go +++ b/service/applicationautoscaling/api_op_DescribeScalableTargets.go @@ -51,68 +51,67 @@ type DescribeScalableTargetsInput struct { // consists of the resource type and unique identifier. If you specify a scalable // dimension, you must also specify a resource ID. // - // * ECS service - The - // resource type is service and the unique identifier is the cluster name and - // service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot - // Fleet request ID. Example: - // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID - // and instance group ID. Example: + // * ECS service - The resource + // type is service and the unique identifier is the cluster name and service name. + // Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource + // type is spot-fleet-request and the unique identifier is the Spot Fleet request + // ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * EMR + // cluster - The resource type is instancegroup and the unique identifier is the + // cluster ID and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceIds []*string @@ -120,66 +119,65 @@ type DescribeScalableTargetsInput struct { // of the service namespace, resource type, and scaling property. If you specify a // scalable dimension, you must also specify a resource ID. // - // * + // * // ecs:service:DesiredCount - The desired task count of an ECS service. // - // * + // * // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet // request. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance - // count of an EMR Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The instance count of + // an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The - // desired capacity of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity + // of an AppStream 2.0 fleet. // - // * - // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // table. + // * dynamodb:table:ReadCapacityUnits - The provisioned + // read capacity for a DynamoDB table. // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity - // for a DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. ScalableDimension types.ScalableDimension } diff --git a/service/applicationautoscaling/api_op_DescribeScalingActivities.go b/service/applicationautoscaling/api_op_DescribeScalingActivities.go index 6a59f19695f..2db9d86ee8c 100644 --- a/service/applicationautoscaling/api_op_DescribeScalingActivities.go +++ b/service/applicationautoscaling/api_op_DescribeScalingActivities.go @@ -52,68 +52,67 @@ type DescribeScalingActivitiesInput struct { // consists of the resource type and unique identifier. If you specify a scalable // dimension, you must also specify a resource ID. // - // * ECS service - The - // resource type is service and the unique identifier is the cluster name and - // service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot - // Fleet request ID. Example: - // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID - // and instance group ID. Example: + // * ECS service - The resource + // type is service and the unique identifier is the cluster name and service name. + // Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource + // type is spot-fleet-request and the unique identifier is the Spot Fleet request + // ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * EMR + // cluster - The resource type is instancegroup and the unique identifier is the + // cluster ID and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceId *string @@ -121,66 +120,65 @@ type DescribeScalingActivitiesInput struct { // type, and scaling property. If you specify a scalable dimension, you must also // specify a resource ID. // - // * ecs:service:DesiredCount - The desired task count - // of an ECS service. + // * ecs:service:DesiredCount - The desired task count of + // an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - + // The desired capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * + // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read + // * dynamodb:index:ReadCapacityUnits - The provisioned read // capacity for a DynamoDB global secondary index. // - // * + // * // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a // DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. // - // * + // * // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an // Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. ScalableDimension types.ScalableDimension } diff --git a/service/applicationautoscaling/api_op_DescribeScalingPolicies.go b/service/applicationautoscaling/api_op_DescribeScalingPolicies.go index f804f0d4ddc..f8afeec02d1 100644 --- a/service/applicationautoscaling/api_op_DescribeScalingPolicies.go +++ b/service/applicationautoscaling/api_op_DescribeScalingPolicies.go @@ -60,68 +60,67 @@ type DescribeScalingPoliciesInput struct { // consists of the resource type and unique identifier. If you specify a scalable // dimension, you must also specify a resource ID. // - // * ECS service - The - // resource type is service and the unique identifier is the cluster name and - // service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot - // Fleet request ID. Example: - // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID - // and instance group ID. Example: + // * ECS service - The resource + // type is service and the unique identifier is the cluster name and service name. + // Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource + // type is spot-fleet-request and the unique identifier is the Spot Fleet request + // ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * EMR + // cluster - The resource type is instancegroup and the unique identifier is the + // cluster ID and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceId *string @@ -129,66 +128,65 @@ type DescribeScalingPoliciesInput struct { // type, and scaling property. If you specify a scalable dimension, you must also // specify a resource ID. // - // * ecs:service:DesiredCount - The desired task count - // of an ECS service. + // * ecs:service:DesiredCount - The desired task count of + // an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - + // The desired capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * + // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read + // * dynamodb:index:ReadCapacityUnits - The provisioned read // capacity for a DynamoDB global secondary index. // - // * + // * // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a // DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. // - // * + // * // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an // Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. ScalableDimension types.ScalableDimension } diff --git a/service/applicationautoscaling/api_op_DescribeScheduledActions.go b/service/applicationautoscaling/api_op_DescribeScheduledActions.go index 6a48c05e124..0758b3dac02 100644 --- a/service/applicationautoscaling/api_op_DescribeScheduledActions.go +++ b/service/applicationautoscaling/api_op_DescribeScheduledActions.go @@ -55,68 +55,67 @@ type DescribeScheduledActionsInput struct { // consists of the resource type and unique identifier. If you specify a scalable // dimension, you must also specify a resource ID. // - // * ECS service - The - // resource type is service and the unique identifier is the cluster name and - // service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot - // Fleet request ID. Example: - // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID - // and instance group ID. Example: + // * ECS service - The resource + // type is service and the unique identifier is the cluster name and service name. + // Example: service/default/sample-webapp. + // + // * Spot Fleet request - The resource + // type is spot-fleet-request and the unique identifier is the Spot Fleet request + // ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. + // + // * EMR + // cluster - The resource type is instancegroup and the unique identifier is the + // cluster ID and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceId *string @@ -124,66 +123,65 @@ type DescribeScheduledActionsInput struct { // type, and scaling property. If you specify a scalable dimension, you must also // specify a resource ID. // - // * ecs:service:DesiredCount - The desired task count - // of an ECS service. + // * ecs:service:DesiredCount - The desired task count of + // an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The target - // capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity + // of a Spot Fleet request. // - // * - // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR - // Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The + // instance count of an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - + // The desired capacity of an AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * + // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for + // a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read + // * dynamodb:index:ReadCapacityUnits - The provisioned read // capacity for a DynamoDB global secondary index. // - // * + // * // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a // DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * rds:cluster:ReadReplicaCount - The count of + // Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + // edition and Aurora PostgreSQL-compatible edition. // - // * + // * // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an // Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. ScalableDimension types.ScalableDimension // The names of the scheduled actions to describe. diff --git a/service/applicationautoscaling/api_op_PutScalingPolicy.go b/service/applicationautoscaling/api_op_PutScalingPolicy.go index 85ae8968312..30a51816942 100644 --- a/service/applicationautoscaling/api_op_PutScalingPolicy.go +++ b/service/applicationautoscaling/api_op_PutScalingPolicy.go @@ -64,68 +64,68 @@ type PutScalingPolicyInput struct { // The identifier of the resource associated with the scaling policy. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -134,66 +134,65 @@ type PutScalingPolicyInput struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension types.ScalableDimension diff --git a/service/applicationautoscaling/api_op_PutScheduledAction.go b/service/applicationautoscaling/api_op_PutScheduledAction.go index b43dc69eb5f..4bc1446f07e 100644 --- a/service/applicationautoscaling/api_op_PutScheduledAction.go +++ b/service/applicationautoscaling/api_op_PutScheduledAction.go @@ -47,68 +47,68 @@ type PutScheduledActionInput struct { // The identifier of the resource associated with the scheduled action. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -117,66 +117,65 @@ type PutScheduledActionInput struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension types.ScalableDimension @@ -205,13 +204,13 @@ type PutScheduledActionInput struct { // The schedule for this action. The following formats are supported: // - // * At + // * At // expressions - "at(yyyy-mm-ddThh:mm:ss)" // - // * Rate expressions - "rate(value + // * Rate expressions - "rate(value // unit)" // - // * Cron expressions - "cron(fields)" + // * Cron expressions - "cron(fields)" // // At expressions are useful for // one-time schedules. Specify the time in UTC. For rate expressions, value is a diff --git a/service/applicationautoscaling/api_op_RegisterScalableTarget.go b/service/applicationautoscaling/api_op_RegisterScalableTarget.go index b6c33e3ef8a..21cacffec6c 100644 --- a/service/applicationautoscaling/api_op_RegisterScalableTarget.go +++ b/service/applicationautoscaling/api_op_RegisterScalableTarget.go @@ -52,68 +52,68 @@ type RegisterScalableTargetInput struct { // The identifier of the resource that is associated with the scalable target. This // string consists of the resource type and unique identifier. // - // * ECS service - - // The resource type is service and the unique identifier is the cluster name and + // * ECS service - The + // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -122,66 +122,65 @@ type RegisterScalableTargetInput struct { // The scalable dimension associated with the scalable target. This string consists // of the service namespace, resource type, and scaling property. // - // * + // * // ecs:service:DesiredCount - The desired task count of an ECS service. // - // * + // * // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet // request. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance - // count of an EMR Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The instance count of + // an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The - // desired capacity of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity + // of an AppStream 2.0 fleet. // - // * - // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // table. + // * dynamodb:table:ReadCapacityUnits - The provisioned + // read capacity for a DynamoDB table. // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity - // for a DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension types.ScalableDimension @@ -228,15 +227,15 @@ type RegisterScalableTargetInput struct { // true suspends the specified scaling activities. Setting it to false (default) // resumes the specified scaling activities. Suspension Outcomes // - // * For + // * For // DynamicScalingInSuspended, while a suspension is in effect, all scale-in // activities that are triggered by a scaling policy are suspended. // - // * For + // * For // DynamicScalingOutSuspended, while a suspension is in effect, all scale-out // activities that are triggered by a scaling policy are suspended. // - // * For + // * For // ScheduledScalingSuspended, while a suspension is in effect, all scaling // activities that involve scheduled actions are suspended. // diff --git a/service/applicationautoscaling/doc.go b/service/applicationautoscaling/doc.go index 021f0fd7479..01f00411e67 100644 --- a/service/applicationautoscaling/doc.go +++ b/service/applicationautoscaling/doc.go @@ -6,54 +6,53 @@ // With Application Auto Scaling, you can configure automatic scaling for the // following resources: // -// * Amazon ECS services +// * Amazon ECS services // -// * Amazon EC2 Spot Fleet -// requests +// * Amazon EC2 Spot Fleet requests // -// * Amazon EMR clusters +// * +// Amazon EMR clusters // -// * Amazon AppStream 2.0 fleets +// * Amazon AppStream 2.0 fleets // -// * -// Amazon DynamoDB tables and global secondary indexes throughput capacity +// * Amazon DynamoDB tables and +// global secondary indexes throughput capacity // -// * -// Amazon Aurora Replicas +// * Amazon Aurora Replicas // -// * Amazon SageMaker endpoint variants +// * Amazon +// SageMaker endpoint variants // -// * Custom -// resources provided by your own applications or services +// * Custom resources provided by your own +// applications or services // -// * Amazon Comprehend -// document classification and entity recognizer endpoints +// * Amazon Comprehend document classification and entity +// recognizer endpoints // -// * AWS Lambda -// function provisioned concurrency +// * AWS Lambda function provisioned concurrency // -// * Amazon Keyspaces (for Apache Cassandra) -// tables +// * Amazon +// Keyspaces (for Apache Cassandra) tables // -// * Amazon Managed Streaming for Apache Kafka cluster storage +// * Amazon Managed Streaming for Apache +// Kafka cluster storage // -// API -// Summary The Application Auto Scaling service API includes three key sets of -// actions: +// API Summary The Application Auto Scaling service API +// includes three key sets of actions: // -// * Register and manage scalable targets - Register AWS or custom -// resources as scalable targets (a resource that Application Auto Scaling can -// scale), set minimum and maximum capacity limits, and retrieve information on -// existing scalable targets. +// * Register and manage scalable targets - +// Register AWS or custom resources as scalable targets (a resource that +// Application Auto Scaling can scale), set minimum and maximum capacity limits, +// and retrieve information on existing scalable targets. // -// * Configure and manage automatic scaling - -// Define scaling policies to dynamically scale your resources in response to -// CloudWatch alarms, schedule one-time or recurring scaling actions, and retrieve -// your recent scaling activity history. +// * Configure and manage +// automatic scaling - Define scaling policies to dynamically scale your resources +// in response to CloudWatch alarms, schedule one-time or recurring scaling +// actions, and retrieve your recent scaling activity history. // -// * Suspend and resume scaling - -// Temporarily suspend and later resume automatic scaling by calling the -// RegisterScalableTarget +// * Suspend and +// resume scaling - Temporarily suspend and later resume automatic scaling by +// calling the RegisterScalableTarget // (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) // API action for any Application Auto Scaling scalable target. You can suspend and // resume (individually or in combination) scale-out activities that are triggered diff --git a/service/applicationautoscaling/types/enums.go b/service/applicationautoscaling/types/enums.go index c12d983dc7a..2d27bbb0d03 100644 --- a/service/applicationautoscaling/types/enums.go +++ b/service/applicationautoscaling/types/enums.go @@ -210,18 +210,18 @@ type ServiceNamespace string // Enum values for ServiceNamespace const ( - ServiceNamespaceEcs ServiceNamespace = "ecs" - ServiceNamespaceEmr ServiceNamespace = "elasticmapreduce" - ServiceNamespaceEc2 ServiceNamespace = "ec2" - ServiceNamespaceAppstream ServiceNamespace = "appstream" - ServiceNamespaceDynamodb ServiceNamespace = "dynamodb" - ServiceNamespaceRds ServiceNamespace = "rds" - ServiceNamespaceSagemaker ServiceNamespace = "sagemaker" - ServiceNamespaceCustom_resource ServiceNamespace = "custom-resource" - ServiceNamespaceComprehend ServiceNamespace = "comprehend" - ServiceNamespaceLambda ServiceNamespace = "lambda" - ServiceNamespaceCassandra ServiceNamespace = "cassandra" - ServiceNamespaceKafka ServiceNamespace = "kafka" + ServiceNamespaceEcs ServiceNamespace = "ecs" + ServiceNamespaceEmr ServiceNamespace = "elasticmapreduce" + ServiceNamespaceEc2 ServiceNamespace = "ec2" + ServiceNamespaceAppstream ServiceNamespace = "appstream" + ServiceNamespaceDynamodb ServiceNamespace = "dynamodb" + ServiceNamespaceRds ServiceNamespace = "rds" + ServiceNamespaceSagemaker ServiceNamespace = "sagemaker" + ServiceNamespaceCustomResource ServiceNamespace = "custom-resource" + ServiceNamespaceComprehend ServiceNamespace = "comprehend" + ServiceNamespaceLambda ServiceNamespace = "lambda" + ServiceNamespaceCassandra ServiceNamespace = "cassandra" + ServiceNamespaceKafka ServiceNamespace = "kafka" ) // Values returns all known values for ServiceNamespace. Note that this can be diff --git a/service/applicationautoscaling/types/types.go b/service/applicationautoscaling/types/types.go index 3c0e5595f4d..f86a92d59cc 100644 --- a/service/applicationautoscaling/types/types.go +++ b/service/applicationautoscaling/types/types.go @@ -27,14 +27,14 @@ type Alarm struct { // in the Amazon CloudWatch User Guide. To create your customized metric // specification: // -// * Add values for each required parameter from CloudWatch. -// You can use an existing metric, or a new metric that you create. To use your own +// * Add values for each required parameter from CloudWatch. You +// can use an existing metric, or a new metric that you create. To use your own // metric, you must first publish the metric to CloudWatch. For more information, // see Publish Custom Metrics // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) // in the Amazon CloudWatch User Guide. // -// * Choose a metric that changes +// * Choose a metric that changes // proportionally with capacity. The value of the metric should increase or // decrease in inverse proportion to the number of capacity units. That is, the // value of the metric should decrease when capacity increases, and increase when @@ -104,11 +104,11 @@ type PredefinedMetricSpecification struct { // final portion of the target group ARN into a single value, separated by a // forward slash (/). The format is app///targetgroup//, where: // - // * app// is the + // * app// is the // final portion of the load balancer ARN // - // * targetgroup// is the final portion - // of the target group ARN. + // * targetgroup// is the final portion of + // the target group ARN. // // This is an example: // app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d. @@ -142,68 +142,68 @@ type ScalableTarget struct { // The identifier of the resource associated with the scalable target. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -218,66 +218,65 @@ type ScalableTarget struct { // The scalable dimension associated with the scalable target. This string consists // of the service namespace, resource type, and scaling property. // - // * + // * // ecs:service:DesiredCount - The desired task count of an ECS service. // - // * + // * // ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet // request. // - // * elasticmapreduce:instancegroup:InstanceCount - The instance - // count of an EMR Instance Group. + // * elasticmapreduce:instancegroup:InstanceCount - The instance count of + // an EMR Instance Group. // - // * appstream:fleet:DesiredCapacity - The - // desired capacity of an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity + // of an AppStream 2.0 fleet. // - // * - // dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB - // table. + // * dynamodb:table:ReadCapacityUnits - The provisioned + // read capacity for a DynamoDB table. // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity - // for a DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned - // read capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension ScalableDimension @@ -334,68 +333,68 @@ type ScalingActivity struct { // The identifier of the resource associated with the scaling activity. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -404,66 +403,65 @@ type ScalingActivity struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension ScalableDimension @@ -524,68 +522,68 @@ type ScalingPolicy struct { // The identifier of the resource associated with the scaling policy. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -594,66 +592,65 @@ type ScalingPolicy struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. // // This member is required. ScalableDimension ScalableDimension @@ -685,68 +682,68 @@ type ScheduledAction struct { // The identifier of the resource associated with the scaling policy. This string // consists of the resource type and unique identifier. // - // * ECS service - The + // * ECS service - The // resource type is service and the unique identifier is the cluster name and // service name. Example: service/default/sample-webapp. // - // * Spot Fleet request - // - The resource type is spot-fleet-request and the unique identifier is the Spot + // * Spot Fleet request - + // The resource type is spot-fleet-request and the unique identifier is the Spot // Fleet request ID. Example: // spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * EMR cluster - // - The resource type is instancegroup and the unique identifier is the cluster ID + // * EMR cluster - + // The resource type is instancegroup and the unique identifier is the cluster ID // and instance group ID. Example: // instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. // - // * AppStream 2.0 fleet - The + // * AppStream 2.0 fleet - The // resource type is fleet and the unique identifier is the fleet name. Example: // fleet/sample-fleet. // - // * DynamoDB table - The resource type is table and the + // * DynamoDB table - The resource type is table and the // unique identifier is the table name. Example: table/my-table. // - // * DynamoDB - // global secondary index - The resource type is index and the unique identifier is - // the index name. Example: table/my-table/index/my-table-index. + // * DynamoDB global + // secondary index - The resource type is index and the unique identifier is the + // index name. Example: table/my-table/index/my-table-index. // - // * Aurora DB - // cluster - The resource type is cluster and the unique identifier is the cluster - // name. Example: cluster:my-db-cluster. + // * Aurora DB cluster - + // The resource type is cluster and the unique identifier is the cluster name. + // Example: cluster:my-db-cluster. // - // * Amazon SageMaker endpoint variant - - // The resource type is variant and the unique identifier is the resource ID. - // Example: endpoint/my-end-point/variant/KMeansClustering. + // * Amazon SageMaker endpoint variant - The + // resource type is variant and the unique identifier is the resource ID. Example: + // endpoint/my-end-point/variant/KMeansClustering. // - // * Custom resources - // are not supported with a resource type. This parameter must specify the - // OutputValue from the CloudFormation template stack used to access the resources. - // The unique identifier is defined by the service provider. More information is - // available in our GitHub repository + // * Custom resources are not + // supported with a resource type. This parameter must specify the OutputValue from + // the CloudFormation template stack used to access the resources. The unique + // identifier is defined by the service provider. More information is available in + // our GitHub repository // (https://github.com/aws/aws-auto-scaling-custom-resource). // - // * Amazon - // Comprehend document classification endpoint - The resource type and unique - // identifier are specified using the endpoint ARN. Example: + // * Amazon Comprehend + // document classification endpoint - The resource type and unique identifier are + // specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // - // - // * Amazon Comprehend entity recognizer endpoint - The resource type and unique + // * + // Amazon Comprehend entity recognizer endpoint - The resource type and unique // identifier are specified using the endpoint ARN. Example: // arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. // - // - // * Lambda provisioned concurrency - The resource type is function and the unique + // * + // Lambda provisioned concurrency - The resource type is function and the unique // identifier is the function name with a function version or alias name suffix // that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // - // * Amazon Keyspaces table - The resource type is - // table and the unique identifier is the table name. Example: + // * Amazon Keyspaces table - The resource type is table + // and the unique identifier is the table name. Example: // keyspace/mykeyspace/table/mytable. // - // * Amazon MSK cluster - The resource type - // and unique identifier are specified using the cluster ARN. Example: + // * Amazon MSK cluster - The resource type and + // unique identifier are specified using the cluster ARN. Example: // arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. // // This member is required. @@ -754,13 +751,13 @@ type ScheduledAction struct { // The schedule for this action. The following formats are supported: // - // * At + // * At // expressions - "at(yyyy-mm-ddThh:mm:ss)" // - // * Rate expressions - "rate(value + // * Rate expressions - "rate(value // unit)" // - // * Cron expressions - "cron(fields)" + // * Cron expressions - "cron(fields)" // // At expressions are useful for // one-time schedules. Specify the time in UTC. For rate expressions, value is a @@ -797,66 +794,65 @@ type ScheduledAction struct { // The scalable dimension. This string consists of the service namespace, resource // type, and scaling property. // - // * ecs:service:DesiredCount - The desired task - // count of an ECS service. + // * ecs:service:DesiredCount - The desired task count + // of an ECS service. // - // * ec2:spot-fleet-request:TargetCapacity - The - // target capacity of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target + // capacity of a Spot Fleet request. // - // * + // * // elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR // Instance Group. // - // * appstream:fleet:DesiredCapacity - The desired capacity of - // an AppStream 2.0 fleet. + // * appstream:fleet:DesiredCapacity - The desired capacity of an + // AppStream 2.0 fleet. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // - // * - // sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for an - // Amazon SageMaker model endpoint variant. + // * sagemaker:variant:DesiredInstanceCount - The + // number of EC2 instances for an Amazon SageMaker model endpoint variant. // - // * + // * // custom-resource:ResourceType:Property - The scalable dimension for a custom // resource provided by your own application or service. // - // * + // * // comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend document classification endpoint. // - // - // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of + // * + // comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of // inference units for an Amazon Comprehend entity recognizer endpoint. // - // * + // * // lambda:function:ProvisionedConcurrency - The provisioned concurrency for a // Lambda function. // - // * cassandra:table:ReadCapacityUnits - The provisioned read + // * cassandra:table:ReadCapacityUnits - The provisioned read // capacity for an Amazon Keyspaces table. // - // * - // cassandra:table:WriteCapacityUnits - The provisioned write capacity for an - // Amazon Keyspaces table. + // * cassandra:table:WriteCapacityUnits - + // The provisioned write capacity for an Amazon Keyspaces table. // - // * kafka:broker-storage:VolumeSize - The provisioned - // volume size (in GiB) for brokers in an Amazon MSK cluster. + // * + // kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) for + // brokers in an Amazon MSK cluster. ScalableDimension ScalableDimension // The new minimum and maximum capacity. You can set both values or just one. At @@ -877,30 +873,29 @@ type ScheduledAction struct { // the alarm. For the following examples, suppose that you have an alarm with a // breach threshold of 50: // -// * To trigger the adjustment when the metric is -// greater than or equal to 50 and less than 60, specify a lower bound of 0 and an -// upper bound of 10. +// * To trigger the adjustment when the metric is greater +// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper +// bound of 10. // -// * To trigger the adjustment when the metric is greater -// than 40 and less than or equal to 50, specify a lower bound of -10 and an upper -// bound of 0. +// * To trigger the adjustment when the metric is greater than 40 and +// less than or equal to 50, specify a lower bound of -10 and an upper bound of +// 0. // -// There are a few rules for the step adjustments for your step -// policy: +// There are a few rules for the step adjustments for your step policy: // -// * The ranges of your step adjustments can't overlap or have a -// gap. +// * The +// ranges of your step adjustments can't overlap or have a gap. // -// * At most one step adjustment can have a null lower bound. If one step -// adjustment has a negative lower bound, then there must be a step adjustment with -// a null lower bound. +// * At most one step +// adjustment can have a null lower bound. If one step adjustment has a negative +// lower bound, then there must be a step adjustment with a null lower bound. // -// * At most one step adjustment can have a null upper -// bound. If one step adjustment has a positive upper bound, then there must be a -// step adjustment with a null upper bound. +// * At +// most one step adjustment can have a null upper bound. If one step adjustment has +// a positive upper bound, then there must be a step adjustment with a null upper +// bound. // -// * The upper and lower bound can't -// be null in the same step adjustment. +// * The upper and lower bound can't be null in the same step adjustment. type StepAdjustment struct { // The amount by which to scale, based on the specified adjustment type. A positive @@ -960,39 +955,39 @@ type StepScalingPolicyConfiguration struct { // activity stops and doesn't complete. Application Auto Scaling provides a default // value of 300 for the following scalable targets: // - // * ECS services + // * ECS services // - // * Spot - // Fleet requests + // * Spot Fleet + // requests // - // * EMR clusters + // * EMR clusters // - // * AppStream 2.0 fleets + // * AppStream 2.0 fleets // - // * Aurora DB - // clusters + // * Aurora DB clusters // - // * Amazon SageMaker endpoint variants + // * Amazon + // SageMaker endpoint variants // - // * Custom resources + // * Custom resources // - // For - // all other scalable targets, the default value is 0: + // For all other scalable targets, + // the default value is 0: // - // * DynamoDB tables + // * DynamoDB tables // + // * DynamoDB global secondary + // indexes // - // * DynamoDB global secondary indexes - // - // * Amazon Comprehend document - // classification and entity recognizer endpoints + // * Amazon Comprehend document classification and entity recognizer + // endpoints // - // * Lambda provisioned - // concurrency + // * Lambda provisioned concurrency // - // * Amazon Keyspaces tables + // * Amazon Keyspaces tables // - // * Amazon MSK cluster storage + // * Amazon + // MSK cluster storage Cooldown *int32 // The aggregation type for the CloudWatch metrics. Valid values are Minimum, @@ -1070,39 +1065,39 @@ type TargetTrackingScalingPolicyConfiguration struct { // complete. Application Auto Scaling provides a default value of 300 for the // following scalable targets: // - // * ECS services - // - // * Spot Fleet requests + // * ECS services // + // * Spot Fleet requests // - // * EMR clusters + // * EMR + // clusters // - // * AppStream 2.0 fleets + // * AppStream 2.0 fleets // - // * Aurora DB clusters + // * Aurora DB clusters // - // * - // Amazon SageMaker endpoint variants + // * Amazon SageMaker + // endpoint variants // - // * Custom resources + // * Custom resources // - // For all other - // scalable targets, the default value is 0: + // For all other scalable targets, the + // default value is 0: // - // * DynamoDB tables + // * DynamoDB tables // - // * DynamoDB - // global secondary indexes + // * DynamoDB global secondary indexes // - // * Amazon Comprehend document classification and - // entity recognizer endpoints + // * + // Amazon Comprehend document classification and entity recognizer endpoints // - // * Lambda provisioned concurrency + // * + // Lambda provisioned concurrency // - // * Amazon - // Keyspaces tables + // * Amazon Keyspaces tables // - // * Amazon MSK cluster storage + // * Amazon MSK cluster + // storage ScaleInCooldown *int32 // The amount of time, in seconds, to wait for a previous scale-out activity to @@ -1116,38 +1111,38 @@ type TargetTrackingScalingPolicyConfiguration struct { // the next scale-out activity. Application Auto Scaling provides a default value // of 300 for the following scalable targets: // - // * ECS services + // * ECS services // - // * Spot Fleet + // * Spot Fleet // requests // - // * EMR clusters - // - // * AppStream 2.0 fleets + // * EMR clusters // - // * Aurora DB - // clusters + // * AppStream 2.0 fleets // - // * Amazon SageMaker endpoint variants + // * Aurora DB clusters // - // * Custom resources + // * Amazon + // SageMaker endpoint variants // - // For - // all other scalable targets, the default value is 0: + // * Custom resources // - // * DynamoDB tables + // For all other scalable targets, + // the default value is 0: // + // * DynamoDB tables // - // * DynamoDB global secondary indexes + // * DynamoDB global secondary + // indexes // - // * Amazon Comprehend document - // classification and entity recognizer endpoints + // * Amazon Comprehend document classification and entity recognizer + // endpoints // - // * Lambda provisioned - // concurrency + // * Lambda provisioned concurrency // - // * Amazon Keyspaces tables + // * Amazon Keyspaces tables // - // * Amazon MSK cluster storage + // * Amazon + // MSK cluster storage ScaleOutCooldown *int32 } diff --git a/service/applicationdiscoveryservice/api_op_DescribeConfigurations.go b/service/applicationdiscoveryservice/api_op_DescribeConfigurations.go index b0aa59ecada..121492b60cd 100644 --- a/service/applicationdiscoveryservice/api_op_DescribeConfigurations.go +++ b/service/applicationdiscoveryservice/api_op_DescribeConfigurations.go @@ -13,20 +13,20 @@ import ( // Retrieves attributes for a list of configuration item IDs. All of the supplied // IDs must be for the same asset type from one of the following: // -// * server +// * server // +// * +// application // -// * application +// * process // -// * process +// * connection // -// * connection -// -// Output fields are specific to -// the asset type specified. For example, the output for a server configuration -// item includes a list of attributes about the server, such as host name, -// operating system, number of network cards, etc. For a complete list of outputs -// for each asset type, see Using the DescribeConfigurations Action +// Output fields are specific to the asset +// type specified. For example, the output for a server configuration item includes +// a list of attributes about the server, such as host name, operating system, +// number of network cards, etc. For a complete list of outputs for each asset +// type, see Using the DescribeConfigurations Action // (https://docs.aws.amazon.com/application-discovery/latest/userguide/discovery-api-queries.html#DescribeConfigurations) // in the AWS Application Discovery Service User Guide. func (c *Client) DescribeConfigurations(ctx context.Context, params *DescribeConfigurationsInput, optFns ...func(*Options)) (*DescribeConfigurationsOutput, error) { diff --git a/service/applicationdiscoveryservice/api_op_DescribeExportTasks.go b/service/applicationdiscoveryservice/api_op_DescribeExportTasks.go index 9fd76d04aca..d5ba7eb01fd 100644 --- a/service/applicationdiscoveryservice/api_op_DescribeExportTasks.go +++ b/service/applicationdiscoveryservice/api_op_DescribeExportTasks.go @@ -35,8 +35,8 @@ type DescribeExportTasksInput struct { // One or more filters. // - // * AgentId - ID of the agent whose collected data will - // be exported + // * AgentId - ID of the agent whose collected data will be + // exported Filters []*types.ExportFilter // The maximum number of volume results returned by DescribeExportTasks in diff --git a/service/applicationdiscoveryservice/api_op_DescribeTags.go b/service/applicationdiscoveryservice/api_op_DescribeTags.go index 19c7cbcc128..12361925f05 100644 --- a/service/applicationdiscoveryservice/api_op_DescribeTags.go +++ b/service/applicationdiscoveryservice/api_op_DescribeTags.go @@ -15,11 +15,11 @@ import ( // key-value pairs, name and value, passed to the optional parameter filters. There // are three valid tag filter names: // -// * tagKey +// * tagKey // -// * tagValue +// * tagValue // -// * +// * // configurationId // // Also, all configuration items associated with your user account diff --git a/service/applicationdiscoveryservice/api_op_StartContinuousExport.go b/service/applicationdiscoveryservice/api_op_StartContinuousExport.go index ca2e36d0803..dbce975c302 100644 --- a/service/applicationdiscoveryservice/api_op_StartContinuousExport.go +++ b/service/applicationdiscoveryservice/api_op_StartContinuousExport.go @@ -45,8 +45,8 @@ type StartContinuousExportOutput struct { // A dictionary which describes how the data is stored. // - // * databaseName - the - // name of the Glue database used to store the schema. + // * databaseName - the name + // of the Glue database used to store the schema. SchemaStorageConfig map[string]*string // The timestamp representing when the continuous export was started. diff --git a/service/applicationdiscoveryservice/api_op_StartImportTask.go b/service/applicationdiscoveryservice/api_op_StartImportTask.go index 4d30449fbad..d180bb0eeea 100644 --- a/service/applicationdiscoveryservice/api_op_StartImportTask.go +++ b/service/applicationdiscoveryservice/api_op_StartImportTask.go @@ -20,24 +20,24 @@ import ( // devices as applications and track their migration status. To start an import // request, do this: // -// * Download the specially formatted comma separated value +// * Download the specially formatted comma separated value // (CSV) import template, which you can find here: // https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv // (https://s3-us-west-2.amazonaws.com/templates-7cffcf56-bd96-4b1c-b45b-a5b42f282e46/import_template.csv). // +// * +// Fill out the template with your server and application data. // -// * Fill out the template with your server and application data. +// * Upload your +// import file to an Amazon S3 bucket, and make a note of it's Object URL. Your +// import file must be in the CSV format. // -// * Upload -// your import file to an Amazon S3 bucket, and make a note of it's Object URL. -// Your import file must be in the CSV format. +// * Use the console or the StartImportTask +// command with the AWS CLI or one of the AWS SDKs to import the records from your +// file. // -// * Use the console or the -// StartImportTask command with the AWS CLI or one of the AWS SDKs to import the -// records from your file. -// -// For more information, including step-by-step -// procedures, see Migration Hub Import +// For more information, including step-by-step procedures, see Migration +// Hub Import // (https://docs.aws.amazon.com/application-discovery/latest/userguide/discovery-import.html) // in the AWS Application Discovery Service User Guide. There are limits to the // number of import tasks you can create (and delete) in an AWS account. For more diff --git a/service/applicationdiscoveryservice/doc.go b/service/applicationdiscoveryservice/doc.go index 599fd3d774d..51cd4d8e2dc 100644 --- a/service/applicationdiscoveryservice/doc.go +++ b/service/applicationdiscoveryservice/doc.go @@ -11,78 +11,77 @@ // Service offers three ways of performing discovery and collecting data about your // on-premises servers: // -// * Agentless discovery is recommended for environments -// that use VMware vCenter Server. This mode doesn't require you to install an -// agent on each host. It does not work in non-VMware environments. -// -// * -// Agentless discovery gathers server information regardless of the operating -// systems, which minimizes the time required for initial on-premises -// infrastructure assessment. -// -// * Agentless discovery doesn't collect -// information about network dependencies, only agent-based discovery collects that -// information. -// -// * Agent-based discovery collects a richer set of data than -// agentless discovery by using the AWS Application Discovery Agent, which you -// install on one or more hosts in your data center. -// -// * The agent captures -// infrastructure and application information, including an inventory of running -// processes, system performance information, resource utilization, and network -// dependencies. -// -// * The information collected by agents is secured at rest -// and in transit to the Application Discovery Service database in the cloud. -// -// -// * AWS Partner Network (APN) solutions integrate with Application Discovery -// Service, enabling you to import details of your on-premises environment directly -// into Migration Hub without using the discovery connector or discovery agent. -// -// -// * Third-party application discovery tools can query AWS Application Discovery -// Service, and they can write to the Application Discovery Service database using -// the public API. -// -// * In this way, you can import data into Migration Hub -// and view it, so that you can associate applications with servers and track -// migrations. -// -// Recommendations We recommend that you use agent-based discovery for -// non-VMware environments, and whenever you want to collect information about -// network dependencies. You can run agent-based and agentless discovery -// simultaneously. Use agentless discovery to complete the initial infrastructure -// assessment quickly, and then install agents on select hosts to collect -// additional information. Working With This Guide This API reference provides -// descriptions, syntax, and usage examples for each of the actions and data types -// for Application Discovery Service. The topic for each action shows the API -// request parameters and the response. Alternatively, you can use one of the AWS -// SDKs to access an API that is tailored to the programming language or platform -// that you're using. For more information, see AWS SDKs +// * Agentless discovery is recommended for environments that +// use VMware vCenter Server. This mode doesn't require you to install an agent on +// each host. It does not work in non-VMware environments. +// +// * Agentless discovery +// gathers server information regardless of the operating systems, which minimizes +// the time required for initial on-premises infrastructure assessment. +// +// * +// Agentless discovery doesn't collect information about network dependencies, only +// agent-based discovery collects that information. +// +// * Agent-based discovery +// collects a richer set of data than agentless discovery by using the AWS +// Application Discovery Agent, which you install on one or more hosts in your data +// center. +// +// * The agent captures infrastructure and application information, +// including an inventory of running processes, system performance information, +// resource utilization, and network dependencies. +// +// * The information collected by +// agents is secured at rest and in transit to the Application Discovery Service +// database in the cloud. +// +// * AWS Partner Network (APN) solutions integrate with +// Application Discovery Service, enabling you to import details of your +// on-premises environment directly into Migration Hub without using the discovery +// connector or discovery agent. +// +// * Third-party application discovery tools can +// query AWS Application Discovery Service, and they can write to the Application +// Discovery Service database using the public API. +// +// * In this way, you can import +// data into Migration Hub and view it, so that you can associate applications with +// servers and track migrations. +// +// Recommendations We recommend that you use +// agent-based discovery for non-VMware environments, and whenever you want to +// collect information about network dependencies. You can run agent-based and +// agentless discovery simultaneously. Use agentless discovery to complete the +// initial infrastructure assessment quickly, and then install agents on select +// hosts to collect additional information. Working With This Guide This API +// reference provides descriptions, syntax, and usage examples for each of the +// actions and data types for Application Discovery Service. The topic for each +// action shows the API request parameters and the response. Alternatively, you can +// use one of the AWS SDKs to access an API that is tailored to the programming +// language or platform that you're using. For more information, see AWS SDKs // (http://aws.amazon.com/tools/#SDKs). // -// * Remember that you must set your +// * Remember that you must set your // Migration Hub home region before you call any of these APIs. // -// * You must -// make API calls for write actions (create, notify, associate, disassociate, -// import, or put) while in your home region, or a HomeRegionNotSetException error -// is returned. +// * You must make +// API calls for write actions (create, notify, associate, disassociate, import, or +// put) while in your home region, or a HomeRegionNotSetException error is +// returned. // -// * API calls for read actions (list, describe, stop, and -// delete) are permitted outside of your home region. +// * API calls for read actions (list, describe, stop, and delete) are +// permitted outside of your home region. // -// * Although it is -// unlikely, the Migration Hub home region could change. If you call APIs outside -// the home region, an InvalidInputException is returned. +// * Although it is unlikely, the Migration +// Hub home region could change. If you call APIs outside the home region, an +// InvalidInputException is returned. // -// * You must call -// GetHomeRegion to obtain the latest Migration Hub home region. +// * You must call GetHomeRegion to obtain the +// latest Migration Hub home region. // -// This guide is -// intended for use with the AWS Application Discovery Service User Guide +// This guide is intended for use with the AWS +// Application Discovery Service User Guide // (http://docs.aws.amazon.com/application-discovery/latest/userguide/). All data // is handled according to the AWS Privacy Policy (http://aws.amazon.com/privacy/). // You can operate Application Discovery Service offline to inspect collected data diff --git a/service/applicationdiscoveryservice/types/enums.go b/service/applicationdiscoveryservice/types/enums.go index 93e11072245..af6e1a77da0 100644 --- a/service/applicationdiscoveryservice/types/enums.go +++ b/service/applicationdiscoveryservice/types/enums.go @@ -32,9 +32,9 @@ type BatchDeleteImportDataErrorCode string // Enum values for BatchDeleteImportDataErrorCode const ( - BatchDeleteImportDataErrorCodeNot_found BatchDeleteImportDataErrorCode = "NOT_FOUND" - BatchDeleteImportDataErrorCodeInternal_server_error BatchDeleteImportDataErrorCode = "INTERNAL_SERVER_ERROR" - BatchDeleteImportDataErrorCodeOver_limit BatchDeleteImportDataErrorCode = "OVER_LIMIT" + BatchDeleteImportDataErrorCodeNotFound BatchDeleteImportDataErrorCode = "NOT_FOUND" + BatchDeleteImportDataErrorCodeInternalServerError BatchDeleteImportDataErrorCode = "INTERNAL_SERVER_ERROR" + BatchDeleteImportDataErrorCodeOverLimit BatchDeleteImportDataErrorCode = "OVER_LIMIT" ) // Values returns all known values for BatchDeleteImportDataErrorCode. Note that @@ -75,13 +75,13 @@ type ContinuousExportStatus string // Enum values for ContinuousExportStatus const ( - ContinuousExportStatusStart_in_progress ContinuousExportStatus = "START_IN_PROGRESS" - ContinuousExportStatusStart_failed ContinuousExportStatus = "START_FAILED" - ContinuousExportStatusActive ContinuousExportStatus = "ACTIVE" - ContinuousExportStatusError ContinuousExportStatus = "ERROR" - ContinuousExportStatusStop_in_progress ContinuousExportStatus = "STOP_IN_PROGRESS" - ContinuousExportStatusStop_failed ContinuousExportStatus = "STOP_FAILED" - ContinuousExportStatusInactive ContinuousExportStatus = "INACTIVE" + ContinuousExportStatusStartInProgress ContinuousExportStatus = "START_IN_PROGRESS" + ContinuousExportStatusStartFailed ContinuousExportStatus = "START_FAILED" + ContinuousExportStatusActive ContinuousExportStatus = "ACTIVE" + ContinuousExportStatusError ContinuousExportStatus = "ERROR" + ContinuousExportStatusStopInProgress ContinuousExportStatus = "STOP_IN_PROGRESS" + ContinuousExportStatusStopFailed ContinuousExportStatus = "STOP_FAILED" + ContinuousExportStatusInactive ContinuousExportStatus = "INACTIVE" ) // Values returns all known values for ContinuousExportStatus. Note that this can @@ -137,9 +137,9 @@ type ExportStatus string // Enum values for ExportStatus const ( - ExportStatusFailed ExportStatus = "FAILED" - ExportStatusSucceeded ExportStatus = "SUCCEEDED" - ExportStatusIn_progress ExportStatus = "IN_PROGRESS" + ExportStatusFailed ExportStatus = "FAILED" + ExportStatusSucceeded ExportStatus = "SUCCEEDED" + ExportStatusInProgress ExportStatus = "IN_PROGRESS" ) // Values returns all known values for ExportStatus. Note that this can be expanded @@ -157,17 +157,17 @@ type ImportStatus string // Enum values for ImportStatus const ( - ImportStatusImport_in_progress ImportStatus = "IMPORT_IN_PROGRESS" - ImportStatusImport_complete ImportStatus = "IMPORT_COMPLETE" - ImportStatusImport_complete_with_errors ImportStatus = "IMPORT_COMPLETE_WITH_ERRORS" - ImportStatusImport_failed ImportStatus = "IMPORT_FAILED" - ImportStatusImport_failed_server_limit_exceeded ImportStatus = "IMPORT_FAILED_SERVER_LIMIT_EXCEEDED" - ImportStatusImport_failed_record_limit_exceeded ImportStatus = "IMPORT_FAILED_RECORD_LIMIT_EXCEEDED" - ImportStatusDelete_in_progress ImportStatus = "DELETE_IN_PROGRESS" - ImportStatusDelete_complete ImportStatus = "DELETE_COMPLETE" - ImportStatusDelete_failed ImportStatus = "DELETE_FAILED" - ImportStatusDelete_failed_limit_exceeded ImportStatus = "DELETE_FAILED_LIMIT_EXCEEDED" - ImportStatusInternal_error ImportStatus = "INTERNAL_ERROR" + ImportStatusImportInProgress ImportStatus = "IMPORT_IN_PROGRESS" + ImportStatusImportComplete ImportStatus = "IMPORT_COMPLETE" + ImportStatusImportCompleteWithErrors ImportStatus = "IMPORT_COMPLETE_WITH_ERRORS" + ImportStatusImportFailed ImportStatus = "IMPORT_FAILED" + ImportStatusImportFailedServerLimitExceeded ImportStatus = "IMPORT_FAILED_SERVER_LIMIT_EXCEEDED" + ImportStatusImportFailedRecordLimitExceeded ImportStatus = "IMPORT_FAILED_RECORD_LIMIT_EXCEEDED" + ImportStatusDeleteInProgress ImportStatus = "DELETE_IN_PROGRESS" + ImportStatusDeleteComplete ImportStatus = "DELETE_COMPLETE" + ImportStatusDeleteFailed ImportStatus = "DELETE_FAILED" + ImportStatusDeleteFailedLimitExceeded ImportStatus = "DELETE_FAILED_LIMIT_EXCEEDED" + ImportStatusInternalError ImportStatus = "INTERNAL_ERROR" ) // Values returns all known values for ImportStatus. Note that this can be expanded @@ -193,9 +193,9 @@ type ImportTaskFilterName string // Enum values for ImportTaskFilterName const ( - ImportTaskFilterNameImport_task_id ImportTaskFilterName = "IMPORT_TASK_ID" - ImportTaskFilterNameStatus ImportTaskFilterName = "STATUS" - ImportTaskFilterNameName ImportTaskFilterName = "NAME" + ImportTaskFilterNameImportTaskId ImportTaskFilterName = "IMPORT_TASK_ID" + ImportTaskFilterNameStatus ImportTaskFilterName = "STATUS" + ImportTaskFilterNameName ImportTaskFilterName = "NAME" ) // Values returns all known values for ImportTaskFilterName. Note that this can be diff --git a/service/applicationdiscoveryservice/types/types.go b/service/applicationdiscoveryservice/types/types.go index 5f1aa5242d5..36edd01a3c2 100644 --- a/service/applicationdiscoveryservice/types/types.go +++ b/service/applicationdiscoveryservice/types/types.go @@ -121,8 +121,8 @@ type ContinuousExportDescription struct { // An object which describes how the data is stored. // - // * databaseName - the name - // of the Glue database used to store the schema. + // * databaseName - the name of + // the Glue database used to store the schema. SchemaStorageConfig map[string]*string // The timestamp representing when the continuous export was started. @@ -130,72 +130,71 @@ type ContinuousExportDescription struct { // Describes the status of the export. Can be one of the following values: // - // * + // * // START_IN_PROGRESS - setting up resources to start continuous export. // - // * + // * // START_FAILED - an error occurred setting up continuous export. To recover, call // start-continuous-export again. // - // * ACTIVE - data is being exported to the + // * ACTIVE - data is being exported to the // customer bucket. // - // * ERROR - an error occurred during export. To fix the - // issue, call stop-continuous-export and start-continuous-export. + // * ERROR - an error occurred during export. To fix the issue, + // call stop-continuous-export and start-continuous-export. // - // * - // STOP_IN_PROGRESS - stopping the export. + // * STOP_IN_PROGRESS - + // stopping the export. // - // * STOP_FAILED - an error occurred - // stopping the export. To recover, call stop-continuous-export again. + // * STOP_FAILED - an error occurred stopping the export. To + // recover, call stop-continuous-export again. // - // * - // INACTIVE - the continuous export has been stopped. Data is no longer being - // exported to the customer bucket. + // * INACTIVE - the continuous export + // has been stopped. Data is no longer being exported to the customer bucket. Status ContinuousExportStatus // Contains information about any errors that have occurred. This data type can // have the following values: // - // * ACCESS_DENIED - You don’t have permission to - // start Data Exploration in Amazon Athena. Contact your AWS administrator for - // help. For more information, see Setting Up AWS Application Discovery Service + // * ACCESS_DENIED - You don’t have permission to start + // Data Exploration in Amazon Athena. Contact your AWS administrator for help. For + // more information, see Setting Up AWS Application Discovery Service // (http://docs.aws.amazon.com/application-discovery/latest/userguide/setting-up.html) // in the Application Discovery Service User Guide. // - // * + // * // DELIVERY_STREAM_LIMIT_FAILURE - You reached the limit for Amazon Kinesis Data // Firehose delivery streams. Reduce the number of streams or request a limit // increase and try again. For more information, see Kinesis Data Streams Limits // (http://docs.aws.amazon.com/streams/latest/dev/service-sizes-and-limits.html) in // the Amazon Kinesis Data Streams Developer Guide. // - // * FIREHOSE_ROLE_MISSING - - // The Data Exploration feature is in an error state because your IAM User is - // missing the AWSApplicationDiscoveryServiceFirehose role. Turn on Data - // Exploration in Amazon Athena and try again. For more information, see Step 3: - // Provide Application Discovery Service Access to Non-Administrator Users by - // Attaching Policies + // * FIREHOSE_ROLE_MISSING - The + // Data Exploration feature is in an error state because your IAM User is missing + // the AWSApplicationDiscoveryServiceFirehose role. Turn on Data Exploration in + // Amazon Athena and try again. For more information, see Step 3: Provide + // Application Discovery Service Access to Non-Administrator Users by Attaching + // Policies // (http://docs.aws.amazon.com/application-discovery/latest/userguide/setting-up.html#setting-up-user-policy) // in the Application Discovery Service User Guide. // - // * + // * // FIREHOSE_STREAM_DOES_NOT_EXIST - The Data Exploration feature is in an error // state because your IAM User is missing one or more of the Kinesis data delivery // streams. // - // * INTERNAL_FAILURE - The Data Exploration feature is in an error - // state because of an internal failure. Try again later. If this problem persists, + // * INTERNAL_FAILURE - The Data Exploration feature is in an error state + // because of an internal failure. Try again later. If this problem persists, // contact AWS Support. // - // * S3_BUCKET_LIMIT_FAILURE - You reached the limit for + // * S3_BUCKET_LIMIT_FAILURE - You reached the limit for // Amazon S3 buckets. Reduce the number of Amazon S3 buckets or request a limit // increase and try again. For more information, see Bucket Restrictions and // Limitations // (http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html) in the // Amazon Simple Storage Service Developer Guide. // - // * S3_NOT_SIGNED_UP - Your + // * S3_NOT_SIGNED_UP - Your // account is not signed up for the Amazon S3 service. You must sign up before you // can use Amazon S3. You can sign up at the following URL: // https://aws.amazon.com/s3 (https://aws.amazon.com/s3). diff --git a/service/applicationinsights/api_op_ListConfigurationHistory.go b/service/applicationinsights/api_op_ListConfigurationHistory.go index 4735492b3b1..0967c38420a 100644 --- a/service/applicationinsights/api_op_ListConfigurationHistory.go +++ b/service/applicationinsights/api_op_ListConfigurationHistory.go @@ -15,14 +15,14 @@ import ( // Lists the INFO, WARN, and ERROR events for periodic configuration updates // performed by Application Insights. Examples of events represented are: // -// * -// INFO: creating a new alarm or updating an alarm threshold. +// * INFO: +// creating a new alarm or updating an alarm threshold. // -// * WARN: alarm -// not created due to insufficient data points used to predict thresholds. +// * WARN: alarm not created +// due to insufficient data points used to predict thresholds. // -// * -// ERROR: alarm not created due to permission errors or exceeding quotas. +// * ERROR: alarm not +// created due to permission errors or exceeding quotas. func (c *Client) ListConfigurationHistory(ctx context.Context, params *ListConfigurationHistoryInput, optFns ...func(*Options)) (*ListConfigurationHistoryOutput, error) { if params == nil { params = &ListConfigurationHistoryInput{} diff --git a/service/applicationinsights/types/enums.go b/service/applicationinsights/types/enums.go index c75b4da5d37..bc347bb76d6 100644 --- a/service/applicationinsights/types/enums.go +++ b/service/applicationinsights/types/enums.go @@ -6,9 +6,9 @@ type CloudWatchEventSource string // Enum values for CloudWatchEventSource const ( - CloudWatchEventSourceEc2 CloudWatchEventSource = "EC2" - CloudWatchEventSourceCode_deploy CloudWatchEventSource = "CODE_DEPLOY" - CloudWatchEventSourceHealth CloudWatchEventSource = "HEALTH" + CloudWatchEventSourceEc2 CloudWatchEventSource = "EC2" + CloudWatchEventSourceCodeDeploy CloudWatchEventSource = "CODE_DEPLOY" + CloudWatchEventSourceHealth CloudWatchEventSource = "HEALTH" ) // Values returns all known values for CloudWatchEventSource. Note that this can be @@ -26,9 +26,9 @@ type ConfigurationEventResourceType string // Enum values for ConfigurationEventResourceType const ( - ConfigurationEventResourceTypeCloudwatch_alarm ConfigurationEventResourceType = "CLOUDWATCH_ALARM" - ConfigurationEventResourceTypeCloudformation ConfigurationEventResourceType = "CLOUDFORMATION" - ConfigurationEventResourceTypeSsm_association ConfigurationEventResourceType = "SSM_ASSOCIATION" + ConfigurationEventResourceTypeCloudwatchAlarm ConfigurationEventResourceType = "CLOUDWATCH_ALARM" + ConfigurationEventResourceTypeCloudformation ConfigurationEventResourceType = "CLOUDFORMATION" + ConfigurationEventResourceTypeSsmAssociation ConfigurationEventResourceType = "SSM_ASSOCIATION" ) // Values returns all known values for ConfigurationEventResourceType. Note that @@ -67,7 +67,7 @@ type FeedbackKey string // Enum values for FeedbackKey const ( - FeedbackKeyInsights_feedback FeedbackKey = "INSIGHTS_FEEDBACK" + FeedbackKeyInsightsFeedback FeedbackKey = "INSIGHTS_FEEDBACK" ) // Values returns all known values for FeedbackKey. Note that this can be expanded @@ -83,9 +83,9 @@ type FeedbackValue string // Enum values for FeedbackValue const ( - FeedbackValueNot_specified FeedbackValue = "NOT_SPECIFIED" - FeedbackValueUseful FeedbackValue = "USEFUL" - FeedbackValueNot_useful FeedbackValue = "NOT_USEFUL" + FeedbackValueNotSpecified FeedbackValue = "NOT_SPECIFIED" + FeedbackValueUseful FeedbackValue = "USEFUL" + FeedbackValueNotUseful FeedbackValue = "NOT_USEFUL" ) // Values returns all known values for FeedbackValue. Note that this can be @@ -163,11 +163,11 @@ type Tier string // Enum values for Tier const ( - TierDefault Tier = "DEFAULT" - TierDot_net_core Tier = "DOT_NET_CORE" - TierDot_net_worker Tier = "DOT_NET_WORKER" - TierDot_net_web Tier = "DOT_NET_WEB" - TierSql_server Tier = "SQL_SERVER" + TierDefault Tier = "DEFAULT" + TierDotNetCore Tier = "DOT_NET_CORE" + TierDotNetWorker Tier = "DOT_NET_WORKER" + TierDotNetWeb Tier = "DOT_NET_WEB" + TierSqlServer Tier = "SQL_SERVER" ) // Values returns all known values for Tier. Note that this can be expanded in the diff --git a/service/applicationinsights/types/types.go b/service/applicationinsights/types/types.go index 61e9898aac3..9b90a981bd5 100644 --- a/service/applicationinsights/types/types.go +++ b/service/applicationinsights/types/types.go @@ -46,11 +46,11 @@ type ApplicationInfo struct { // The issues on the user side that block Application Insights from successfully // monitoring an application. Example remarks include: // - // * “Configuring - // application, detected 1 Errors, 3 Warnings” + // * “Configuring application, + // detected 1 Errors, 3 Warnings” // - // * “Configuring application, - // detected 1 Unconfigured Components” + // * “Configuring application, detected 1 + // Unconfigured Components” Remarks *string // The name of the resource group used for the application. @@ -265,15 +265,15 @@ type RelatedObservations struct { // letters, digits, white space, or one of the following symbols: _ . : / = + -. // The following additional restrictions apply to tags: // -// * Tag keys and values -// are case sensitive. +// * Tag keys and values are +// case sensitive. // -// * For each associated resource, each tag key must be -// unique and it can have only one value. +// * For each associated resource, each tag key must be unique and +// it can have only one value. // -// * The aws: prefix is reserved for -// use by AWS; you can’t use it in any tag keys or values that you define. In -// addition, you can't edit or remove tag keys or values that use this prefix. +// * The aws: prefix is reserved for use by AWS; you +// can’t use it in any tag keys or values that you define. In addition, you can't +// edit or remove tag keys or values that use this prefix. type Tag struct { // One part of a key-value pair that defines a tag. The maximum length of a tag key diff --git a/service/appmesh/types/enums.go b/service/appmesh/types/enums.go index 7e1ecdda9bb..3b63da032c7 100644 --- a/service/appmesh/types/enums.go +++ b/service/appmesh/types/enums.go @@ -24,8 +24,8 @@ type EgressFilterType string // Enum values for EgressFilterType const ( - EgressFilterTypeAllow_all EgressFilterType = "ALLOW_ALL" - EgressFilterTypeDrop_all EgressFilterType = "DROP_ALL" + EgressFilterTypeAllowAll EgressFilterType = "ALLOW_ALL" + EgressFilterTypeDropAll EgressFilterType = "DROP_ALL" ) // Values returns all known values for EgressFilterType. Note that this can be @@ -62,11 +62,11 @@ type GrpcRetryPolicyEvent string // Enum values for GrpcRetryPolicyEvent const ( - GrpcRetryPolicyEventCancelled GrpcRetryPolicyEvent = "cancelled" - GrpcRetryPolicyEventDeadline_exceeded GrpcRetryPolicyEvent = "deadline-exceeded" - GrpcRetryPolicyEventInternal GrpcRetryPolicyEvent = "internal" - GrpcRetryPolicyEventResource_exhausted GrpcRetryPolicyEvent = "resource-exhausted" - GrpcRetryPolicyEventUnavailable GrpcRetryPolicyEvent = "unavailable" + GrpcRetryPolicyEventCancelled GrpcRetryPolicyEvent = "cancelled" + GrpcRetryPolicyEventDeadlineExceeded GrpcRetryPolicyEvent = "deadline-exceeded" + GrpcRetryPolicyEventInternal GrpcRetryPolicyEvent = "internal" + GrpcRetryPolicyEventResourceExhausted GrpcRetryPolicyEvent = "resource-exhausted" + GrpcRetryPolicyEventUnavailable GrpcRetryPolicyEvent = "unavailable" ) // Values returns all known values for GrpcRetryPolicyEvent. Note that this can be @@ -218,7 +218,7 @@ type TcpRetryPolicyEvent string // Enum values for TcpRetryPolicyEvent const ( - TcpRetryPolicyEventConnection_error TcpRetryPolicyEvent = "connection-error" + TcpRetryPolicyEventConnectionError TcpRetryPolicyEvent = "connection-error" ) // Values returns all known values for TcpRetryPolicyEvent. Note that this can be diff --git a/service/appmesh/types/types.go b/service/appmesh/types/types.go index 19e95a0b420..e026221dc24 100644 --- a/service/appmesh/types/types.go +++ b/service/appmesh/types/types.go @@ -329,16 +329,16 @@ type GrpcRetryPolicy struct { // Specify at least one of the following values. // - // * server-error – HTTP status + // * server-error – HTTP status // codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511 // - // * - // gateway-error – HTTP status codes 502, 503, and 504 + // * gateway-error + // – HTTP status codes 502, 503, and 504 // - // * client-error – HTTP - // status code 409 + // * client-error – HTTP status code 409 // - // * stream-error – Retry on refused stream + // * + // stream-error – Retry on refused stream HttpRetryEvents []*string // Specify a valid value. @@ -599,16 +599,16 @@ type HttpRetryPolicy struct { // Specify at least one of the following values. // - // * server-error – HTTP status + // * server-error – HTTP status // codes 500, 501, 502, 503, 504, 505, 506, 507, 508, 510, and 511 // - // * - // gateway-error – HTTP status codes 502, 503, and 504 + // * gateway-error + // – HTTP status codes 502, 503, and 504 // - // * client-error – HTTP - // status code 409 + // * client-error – HTTP status code 409 // - // * stream-error – Retry on refused stream + // * + // stream-error – Retry on refused stream HttpRetryEvents []*string // Specify a valid value. @@ -757,14 +757,14 @@ type ListenerTls struct { // Specify one of the following modes. // - // * STRICT – Listener only accepts + // * STRICT – Listener only accepts // connections with TLS enabled. // - // * PERMISSIVE – Listener accepts connections - // with or without TLS enabled. + // * PERMISSIVE – Listener accepts connections with + // or without TLS enabled. // - // * DISABLED – Listener only accepts connections - // without TLS. + // * DISABLED – Listener only accepts connections without + // TLS. // // This member is required. Mode ListenerTlsMode @@ -1378,14 +1378,14 @@ type VirtualGatewayListenerTls struct { // Specify one of the following modes. // - // * STRICT – Listener only accepts + // * STRICT – Listener only accepts // connections with TLS enabled. // - // * PERMISSIVE – Listener accepts connections - // with or without TLS enabled. + // * PERMISSIVE – Listener accepts connections with + // or without TLS enabled. // - // * DISABLED – Listener only accepts connections - // without TLS. + // * DISABLED – Listener only accepts connections without + // TLS. // // This member is required. Mode VirtualGatewayListenerTlsMode diff --git a/service/appstream/api_op_CreateFleet.go b/service/appstream/api_op_CreateFleet.go index 17bd46aaeaa..c7f9663864b 100644 --- a/service/appstream/api_op_CreateFleet.go +++ b/service/appstream/api_op_CreateFleet.go @@ -38,84 +38,82 @@ type CreateFleetInput struct { // The instance type to use when launching fleet instances. The following instance // types are available: // - // * stream.standard.medium + // * stream.standard.medium // - // * - // stream.standard.large + // * stream.standard.large // - // * stream.compute.large - // - // * stream.compute.xlarge + // * + // stream.compute.large // + // * stream.compute.xlarge // // * stream.compute.2xlarge // - // * stream.compute.4xlarge - // - // * - // stream.compute.8xlarge + // * + // stream.compute.4xlarge // - // * stream.memory.large + // * stream.compute.8xlarge // - // * stream.memory.xlarge + // * stream.memory.large // + // * + // stream.memory.xlarge // // * stream.memory.2xlarge // - // * stream.memory.4xlarge + // * stream.memory.4xlarge // - // * + // * // stream.memory.8xlarge // - // * stream.memory.z1d.large + // * stream.memory.z1d.large // - // * - // stream.memory.z1d.xlarge + // * stream.memory.z1d.xlarge // - // * stream.memory.z1d.2xlarge + // * + // stream.memory.z1d.2xlarge // - // * - // stream.memory.z1d.3xlarge + // * stream.memory.z1d.3xlarge // - // * stream.memory.z1d.6xlarge + // * + // stream.memory.z1d.6xlarge // - // * - // stream.memory.z1d.12xlarge + // * stream.memory.z1d.12xlarge // - // * stream.graphics-design.large + // * + // stream.graphics-design.large // - // * - // stream.graphics-design.xlarge + // * stream.graphics-design.xlarge // - // * stream.graphics-design.2xlarge + // * + // stream.graphics-design.2xlarge // - // * - // stream.graphics-design.4xlarge + // * stream.graphics-design.4xlarge // - // * stream.graphics-desktop.2xlarge + // * + // stream.graphics-desktop.2xlarge // - // * - // stream.graphics.g4dn.xlarge + // * stream.graphics.g4dn.xlarge // - // * stream.graphics.g4dn.2xlarge + // * + // stream.graphics.g4dn.2xlarge // - // * - // stream.graphics.g4dn.4xlarge + // * stream.graphics.g4dn.4xlarge // - // * stream.graphics.g4dn.8xlarge + // * + // stream.graphics.g4dn.8xlarge // - // * - // stream.graphics.g4dn.12xlarge + // * stream.graphics.g4dn.12xlarge // - // * stream.graphics.g4dn.16xlarge + // * + // stream.graphics.g4dn.16xlarge // - // * - // stream.graphics-pro.4xlarge + // * stream.graphics-pro.4xlarge // - // * stream.graphics-pro.8xlarge + // * + // stream.graphics-pro.8xlarge // - // * - // stream.graphics-pro.16xlarge + // * stream.graphics-pro.16xlarge // // This member is required. InstanceType *string diff --git a/service/appstream/api_op_CreateImageBuilder.go b/service/appstream/api_op_CreateImageBuilder.go index 73aaefeb829..2bbbc99ab94 100644 --- a/service/appstream/api_op_CreateImageBuilder.go +++ b/service/appstream/api_op_CreateImageBuilder.go @@ -34,83 +34,82 @@ type CreateImageBuilderInput struct { // The instance type to use when launching the image builder. The following // instance types are available: // - // * stream.standard.medium + // * stream.standard.medium // - // * + // * // stream.standard.large // - // * stream.compute.large + // * stream.compute.large // - // * stream.compute.xlarge + // * stream.compute.xlarge // + // * + // stream.compute.2xlarge // - // * stream.compute.2xlarge + // * stream.compute.4xlarge // - // * stream.compute.4xlarge + // * stream.compute.8xlarge // - // * - // stream.compute.8xlarge - // - // * stream.memory.large - // - // * stream.memory.xlarge + // * + // stream.memory.large // + // * stream.memory.xlarge // // * stream.memory.2xlarge // - // * stream.memory.4xlarge + // * + // stream.memory.4xlarge // - // * - // stream.memory.8xlarge + // * stream.memory.8xlarge // - // * stream.memory.z1d.large + // * stream.memory.z1d.large // - // * + // * // stream.memory.z1d.xlarge // - // * stream.memory.z1d.2xlarge + // * stream.memory.z1d.2xlarge // - // * + // * // stream.memory.z1d.3xlarge // - // * stream.memory.z1d.6xlarge + // * stream.memory.z1d.6xlarge // - // * + // * // stream.memory.z1d.12xlarge // - // * stream.graphics-design.large + // * stream.graphics-design.large // - // * + // * // stream.graphics-design.xlarge // - // * stream.graphics-design.2xlarge + // * stream.graphics-design.2xlarge // - // * + // * // stream.graphics-design.4xlarge // - // * stream.graphics-desktop.2xlarge + // * stream.graphics-desktop.2xlarge // - // * + // * // stream.graphics.g4dn.xlarge // - // * stream.graphics.g4dn.2xlarge + // * stream.graphics.g4dn.2xlarge // - // * + // * // stream.graphics.g4dn.4xlarge // - // * stream.graphics.g4dn.8xlarge + // * stream.graphics.g4dn.8xlarge // - // * + // * // stream.graphics.g4dn.12xlarge // - // * stream.graphics.g4dn.16xlarge + // * stream.graphics.g4dn.16xlarge // - // * + // * // stream.graphics-pro.4xlarge // - // * stream.graphics-pro.8xlarge + // * stream.graphics-pro.8xlarge // - // * + // * // stream.graphics-pro.16xlarge // // This member is required. diff --git a/service/appstream/api_op_DescribeUserStackAssociations.go b/service/appstream/api_op_DescribeUserStackAssociations.go index 9bd843b159b..c5fc5feacb7 100644 --- a/service/appstream/api_op_DescribeUserStackAssociations.go +++ b/service/appstream/api_op_DescribeUserStackAssociations.go @@ -14,11 +14,11 @@ import ( // Retrieves a list that describes the UserStackAssociation objects. You must // specify either or both of the following: // -// * The stack name +// * The stack name // -// * The user -// name (email address of the user associated with the stack) and the -// authentication type for the user +// * The user name +// (email address of the user associated with the stack) and the authentication +// type for the user func (c *Client) DescribeUserStackAssociations(ctx context.Context, params *DescribeUserStackAssociationsInput, optFns ...func(*Options)) (*DescribeUserStackAssociationsOutput, error) { if params == nil { params = &DescribeUserStackAssociationsInput{} diff --git a/service/appstream/api_op_UpdateFleet.go b/service/appstream/api_op_UpdateFleet.go index 3e90e9c9230..ea1a487809e 100644 --- a/service/appstream/api_op_UpdateFleet.go +++ b/service/appstream/api_op_UpdateFleet.go @@ -103,84 +103,82 @@ type UpdateFleetInput struct { // The instance type to use when launching fleet instances. The following instance // types are available: // - // * stream.standard.medium + // * stream.standard.medium // - // * - // stream.standard.large + // * stream.standard.large // - // * stream.compute.large - // - // * stream.compute.xlarge + // * + // stream.compute.large // + // * stream.compute.xlarge // // * stream.compute.2xlarge // - // * stream.compute.4xlarge - // - // * - // stream.compute.8xlarge + // * + // stream.compute.4xlarge // - // * stream.memory.large + // * stream.compute.8xlarge // - // * stream.memory.xlarge + // * stream.memory.large // + // * + // stream.memory.xlarge // // * stream.memory.2xlarge // - // * stream.memory.4xlarge + // * stream.memory.4xlarge // - // * + // * // stream.memory.8xlarge // - // * stream.memory.z1d.large + // * stream.memory.z1d.large // - // * - // stream.memory.z1d.xlarge + // * stream.memory.z1d.xlarge // - // * stream.memory.z1d.2xlarge + // * + // stream.memory.z1d.2xlarge // - // * - // stream.memory.z1d.3xlarge + // * stream.memory.z1d.3xlarge // - // * stream.memory.z1d.6xlarge + // * + // stream.memory.z1d.6xlarge // - // * - // stream.memory.z1d.12xlarge + // * stream.memory.z1d.12xlarge // - // * stream.graphics-design.large + // * + // stream.graphics-design.large // - // * - // stream.graphics-design.xlarge + // * stream.graphics-design.xlarge // - // * stream.graphics-design.2xlarge + // * + // stream.graphics-design.2xlarge // - // * - // stream.graphics-design.4xlarge + // * stream.graphics-design.4xlarge // - // * stream.graphics-desktop.2xlarge + // * + // stream.graphics-desktop.2xlarge // - // * - // stream.graphics.g4dn.xlarge + // * stream.graphics.g4dn.xlarge // - // * stream.graphics.g4dn.2xlarge + // * + // stream.graphics.g4dn.2xlarge // - // * - // stream.graphics.g4dn.4xlarge + // * stream.graphics.g4dn.4xlarge // - // * stream.graphics.g4dn.8xlarge + // * + // stream.graphics.g4dn.8xlarge // - // * - // stream.graphics.g4dn.12xlarge + // * stream.graphics.g4dn.12xlarge // - // * stream.graphics.g4dn.16xlarge + // * + // stream.graphics.g4dn.16xlarge // - // * - // stream.graphics-pro.4xlarge + // * stream.graphics-pro.4xlarge // - // * stream.graphics-pro.8xlarge + // * + // stream.graphics-pro.8xlarge // - // * - // stream.graphics-pro.16xlarge + // * stream.graphics-pro.16xlarge InstanceType *string // The maximum amount of time that a streaming session can remain active, in diff --git a/service/appstream/doc.go b/service/appstream/doc.go index 24cac88cada..c995b7072ab 100644 --- a/service/appstream/doc.go +++ b/service/appstream/doc.go @@ -17,9 +17,9 @@ // in the Amazon AppStream 2.0 Administration Guide. To learn more about AppStream // 2.0, see the following resources: // -// * Amazon AppStream 2.0 product page +// * Amazon AppStream 2.0 product page // (http://aws.amazon.com/appstream2) // -// * Amazon AppStream 2.0 documentation +// * Amazon AppStream 2.0 documentation // (http://aws.amazon.com/documentation/appstream2) package appstream diff --git a/service/appstream/types/enums.go b/service/appstream/types/enums.go index 585fe89d3c4..0a561ab6d97 100644 --- a/service/appstream/types/enums.go +++ b/service/appstream/types/enums.go @@ -22,11 +22,11 @@ type Action string // Enum values for Action const ( - ActionClipboard_copy_from_local_device Action = "CLIPBOARD_COPY_FROM_LOCAL_DEVICE" - ActionClipboard_copy_to_local_device Action = "CLIPBOARD_COPY_TO_LOCAL_DEVICE" - ActionFile_upload Action = "FILE_UPLOAD" - ActionFile_download Action = "FILE_DOWNLOAD" - ActionPrinting_to_local_device Action = "PRINTING_TO_LOCAL_DEVICE" + ActionClipboardCopyFromLocalDevice Action = "CLIPBOARD_COPY_FROM_LOCAL_DEVICE" + ActionClipboardCopyToLocalDevice Action = "CLIPBOARD_COPY_TO_LOCAL_DEVICE" + ActionFileUpload Action = "FILE_UPLOAD" + ActionFileDownload Action = "FILE_DOWNLOAD" + ActionPrintingToLocalDevice Action = "PRINTING_TO_LOCAL_DEVICE" ) // Values returns all known values for Action. Note that this can be expanded in @@ -66,10 +66,10 @@ type FleetAttribute string // Enum values for FleetAttribute const ( - FleetAttributeVpc_configuration FleetAttribute = "VPC_CONFIGURATION" - FleetAttributeVpc_configuration_security_group_ids FleetAttribute = "VPC_CONFIGURATION_SECURITY_GROUP_IDS" - FleetAttributeDomain_join_info FleetAttribute = "DOMAIN_JOIN_INFO" - FleetAttributeIam_role_arn FleetAttribute = "IAM_ROLE_ARN" + FleetAttributeVpcConfiguration FleetAttribute = "VPC_CONFIGURATION" + FleetAttributeVpcConfigurationSecurityGroupIds FleetAttribute = "VPC_CONFIGURATION_SECURITY_GROUP_IDS" + FleetAttributeDomainJoinInfo FleetAttribute = "DOMAIN_JOIN_INFO" + FleetAttributeIamRoleArn FleetAttribute = "IAM_ROLE_ARN" ) // Values returns all known values for FleetAttribute. Note that this can be @@ -88,34 +88,34 @@ type FleetErrorCode string // Enum values for FleetErrorCode const ( - FleetErrorCodeIam_service_role_missing_eni_describe_action FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION" - FleetErrorCodeIam_service_role_missing_eni_create_action FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION" - FleetErrorCodeIam_service_role_missing_eni_delete_action FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION" - FleetErrorCodeNetwork_interface_limit_exceeded FleetErrorCode = "NETWORK_INTERFACE_LIMIT_EXCEEDED" - FleetErrorCodeInternal_service_error FleetErrorCode = "INTERNAL_SERVICE_ERROR" - FleetErrorCodeIam_service_role_is_missing FleetErrorCode = "IAM_SERVICE_ROLE_IS_MISSING" - FleetErrorCodeMachine_role_is_missing FleetErrorCode = "MACHINE_ROLE_IS_MISSING" - FleetErrorCodeSts_disabled_in_region FleetErrorCode = "STS_DISABLED_IN_REGION" - FleetErrorCodeSubnet_has_insufficient_ip_addresses FleetErrorCode = "SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES" - FleetErrorCodeIam_service_role_missing_describe_subnet_action FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION" - FleetErrorCodeSubnet_not_found FleetErrorCode = "SUBNET_NOT_FOUND" - FleetErrorCodeImage_not_found FleetErrorCode = "IMAGE_NOT_FOUND" - FleetErrorCodeInvalid_subnet_configuration FleetErrorCode = "INVALID_SUBNET_CONFIGURATION" - FleetErrorCodeSecurity_groups_not_found FleetErrorCode = "SECURITY_GROUPS_NOT_FOUND" - FleetErrorCodeIgw_not_attached FleetErrorCode = "IGW_NOT_ATTACHED" - FleetErrorCodeIam_service_role_missing_describe_security_groups_action FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION" - FleetErrorCodeDomain_join_error_file_not_found FleetErrorCode = "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND" - FleetErrorCodeDomain_join_error_access_denied FleetErrorCode = "DOMAIN_JOIN_ERROR_ACCESS_DENIED" - FleetErrorCodeDomain_join_error_logon_failure FleetErrorCode = "DOMAIN_JOIN_ERROR_LOGON_FAILURE" - FleetErrorCodeDomain_join_error_invalid_parameter FleetErrorCode = "DOMAIN_JOIN_ERROR_INVALID_PARAMETER" - FleetErrorCodeDomain_join_error_more_data FleetErrorCode = "DOMAIN_JOIN_ERROR_MORE_DATA" - FleetErrorCodeDomain_join_error_no_such_domain FleetErrorCode = "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN" - FleetErrorCodeDomain_join_error_not_supported FleetErrorCode = "DOMAIN_JOIN_ERROR_NOT_SUPPORTED" - FleetErrorCodeDomain_join_nerr_invalid_workgroup_name FleetErrorCode = "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME" - FleetErrorCodeDomain_join_nerr_workstation_not_started FleetErrorCode = "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED" - FleetErrorCodeDomain_join_error_ds_machine_account_quota_exceeded FleetErrorCode = "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED" - FleetErrorCodeDomain_join_nerr_password_expired FleetErrorCode = "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED" - FleetErrorCodeDomain_join_internal_service_error FleetErrorCode = "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" + FleetErrorCodeIamServiceRoleMissingEniDescribeAction FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION" + FleetErrorCodeIamServiceRoleMissingEniCreateAction FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION" + FleetErrorCodeIamServiceRoleMissingEniDeleteAction FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION" + FleetErrorCodeNetworkInterfaceLimitExceeded FleetErrorCode = "NETWORK_INTERFACE_LIMIT_EXCEEDED" + FleetErrorCodeInternalServiceError FleetErrorCode = "INTERNAL_SERVICE_ERROR" + FleetErrorCodeIamServiceRoleIsMissing FleetErrorCode = "IAM_SERVICE_ROLE_IS_MISSING" + FleetErrorCodeMachineRoleIsMissing FleetErrorCode = "MACHINE_ROLE_IS_MISSING" + FleetErrorCodeStsDisabledInRegion FleetErrorCode = "STS_DISABLED_IN_REGION" + FleetErrorCodeSubnetHasInsufficientIpAddresses FleetErrorCode = "SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES" + FleetErrorCodeIamServiceRoleMissingDescribeSubnetAction FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION" + FleetErrorCodeSubnetNotFound FleetErrorCode = "SUBNET_NOT_FOUND" + FleetErrorCodeImageNotFound FleetErrorCode = "IMAGE_NOT_FOUND" + FleetErrorCodeInvalidSubnetConfiguration FleetErrorCode = "INVALID_SUBNET_CONFIGURATION" + FleetErrorCodeSecurityGroupsNotFound FleetErrorCode = "SECURITY_GROUPS_NOT_FOUND" + FleetErrorCodeIgwNotAttached FleetErrorCode = "IGW_NOT_ATTACHED" + FleetErrorCodeIamServiceRoleMissingDescribeSecurityGroupsAction FleetErrorCode = "IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION" + FleetErrorCodeDomainJoinErrorFileNotFound FleetErrorCode = "DOMAIN_JOIN_ERROR_FILE_NOT_FOUND" + FleetErrorCodeDomainJoinErrorAccessDenied FleetErrorCode = "DOMAIN_JOIN_ERROR_ACCESS_DENIED" + FleetErrorCodeDomainJoinErrorLogonFailure FleetErrorCode = "DOMAIN_JOIN_ERROR_LOGON_FAILURE" + FleetErrorCodeDomainJoinErrorInvalidParameter FleetErrorCode = "DOMAIN_JOIN_ERROR_INVALID_PARAMETER" + FleetErrorCodeDomainJoinErrorMoreData FleetErrorCode = "DOMAIN_JOIN_ERROR_MORE_DATA" + FleetErrorCodeDomainJoinErrorNoSuchDomain FleetErrorCode = "DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN" + FleetErrorCodeDomainJoinErrorNotSupported FleetErrorCode = "DOMAIN_JOIN_ERROR_NOT_SUPPORTED" + FleetErrorCodeDomainJoinNerrInvalidWorkgroupName FleetErrorCode = "DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME" + FleetErrorCodeDomainJoinNerrWorkstationNotStarted FleetErrorCode = "DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED" + FleetErrorCodeDomainJoinErrorDsMachineAccountQuotaExceeded FleetErrorCode = "DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED" + FleetErrorCodeDomainJoinNerrPasswordExpired FleetErrorCode = "DOMAIN_JOIN_NERR_PASSWORD_EXPIRED" + FleetErrorCodeDomainJoinInternalServiceError FleetErrorCode = "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" ) // Values returns all known values for FleetErrorCode. Note that this can be @@ -180,8 +180,8 @@ type FleetType string // Enum values for FleetType const ( - FleetTypeAlways_on FleetType = "ALWAYS_ON" - FleetTypeOn_demand FleetType = "ON_DEMAND" + FleetTypeAlwaysOn FleetType = "ALWAYS_ON" + FleetTypeOnDemand FleetType = "ON_DEMAND" ) // Values returns all known values for FleetType. Note that this can be expanded in @@ -198,15 +198,15 @@ type ImageBuilderState string // Enum values for ImageBuilderState const ( - ImageBuilderStatePending ImageBuilderState = "PENDING" - ImageBuilderStateUpdating_agent ImageBuilderState = "UPDATING_AGENT" - ImageBuilderStateRunning ImageBuilderState = "RUNNING" - ImageBuilderStateStopping ImageBuilderState = "STOPPING" - ImageBuilderStateStopped ImageBuilderState = "STOPPED" - ImageBuilderStateRebooting ImageBuilderState = "REBOOTING" - ImageBuilderStateSnapshotting ImageBuilderState = "SNAPSHOTTING" - ImageBuilderStateDeleting ImageBuilderState = "DELETING" - ImageBuilderStateFailed ImageBuilderState = "FAILED" + ImageBuilderStatePending ImageBuilderState = "PENDING" + ImageBuilderStateUpdatingAgent ImageBuilderState = "UPDATING_AGENT" + ImageBuilderStateRunning ImageBuilderState = "RUNNING" + ImageBuilderStateStopping ImageBuilderState = "STOPPING" + ImageBuilderStateStopped ImageBuilderState = "STOPPED" + ImageBuilderStateRebooting ImageBuilderState = "REBOOTING" + ImageBuilderStateSnapshotting ImageBuilderState = "SNAPSHOTTING" + ImageBuilderStateDeleting ImageBuilderState = "DELETING" + ImageBuilderStateFailed ImageBuilderState = "FAILED" ) // Values returns all known values for ImageBuilderState. Note that this can be @@ -230,8 +230,8 @@ type ImageBuilderStateChangeReasonCode string // Enum values for ImageBuilderStateChangeReasonCode const ( - ImageBuilderStateChangeReasonCodeInternal_error ImageBuilderStateChangeReasonCode = "INTERNAL_ERROR" - ImageBuilderStateChangeReasonCodeImage_unavailable ImageBuilderStateChangeReasonCode = "IMAGE_UNAVAILABLE" + ImageBuilderStateChangeReasonCodeInternalError ImageBuilderStateChangeReasonCode = "INTERNAL_ERROR" + ImageBuilderStateChangeReasonCodeImageUnavailable ImageBuilderStateChangeReasonCode = "IMAGE_UNAVAILABLE" ) // Values returns all known values for ImageBuilderStateChangeReasonCode. Note that @@ -273,9 +273,9 @@ type ImageStateChangeReasonCode string // Enum values for ImageStateChangeReasonCode const ( - ImageStateChangeReasonCodeInternal_error ImageStateChangeReasonCode = "INTERNAL_ERROR" - ImageStateChangeReasonCodeImage_builder_not_available ImageStateChangeReasonCode = "IMAGE_BUILDER_NOT_AVAILABLE" - ImageStateChangeReasonCodeImage_copy_failure ImageStateChangeReasonCode = "IMAGE_COPY_FAILURE" + ImageStateChangeReasonCodeInternalError ImageStateChangeReasonCode = "INTERNAL_ERROR" + ImageStateChangeReasonCodeImageBuilderNotAvailable ImageStateChangeReasonCode = "IMAGE_BUILDER_NOT_AVAILABLE" + ImageStateChangeReasonCodeImageCopyFailure ImageStateChangeReasonCode = "IMAGE_COPY_FAILURE" ) // Values returns all known values for ImageStateChangeReasonCode. Note that this @@ -329,9 +329,9 @@ type PlatformType string // Enum values for PlatformType const ( - PlatformTypeWindows PlatformType = "WINDOWS" - PlatformTypeWindows_server_2016 PlatformType = "WINDOWS_SERVER_2016" - PlatformTypeWindows_server_2019 PlatformType = "WINDOWS_SERVER_2019" + PlatformTypeWindows PlatformType = "WINDOWS" + PlatformTypeWindowsServer2016 PlatformType = "WINDOWS_SERVER_2016" + PlatformTypeWindowsServer2019 PlatformType = "WINDOWS_SERVER_2019" ) // Values returns all known values for PlatformType. Note that this can be expanded @@ -349,8 +349,8 @@ type SessionConnectionState string // Enum values for SessionConnectionState const ( - SessionConnectionStateConnected SessionConnectionState = "CONNECTED" - SessionConnectionStateNot_connected SessionConnectionState = "NOT_CONNECTED" + SessionConnectionStateConnected SessionConnectionState = "CONNECTED" + SessionConnectionStateNotConnected SessionConnectionState = "NOT_CONNECTED" ) // Values returns all known values for SessionConnectionState. Note that this can @@ -387,17 +387,17 @@ type StackAttribute string // Enum values for StackAttribute const ( - StackAttributeStorage_connectors StackAttribute = "STORAGE_CONNECTORS" - StackAttributeStorage_connector_homefolders StackAttribute = "STORAGE_CONNECTOR_HOMEFOLDERS" - StackAttributeStorage_connector_google_drive StackAttribute = "STORAGE_CONNECTOR_GOOGLE_DRIVE" - StackAttributeStorage_connector_one_drive StackAttribute = "STORAGE_CONNECTOR_ONE_DRIVE" - StackAttributeRedirect_url StackAttribute = "REDIRECT_URL" - StackAttributeFeedback_url StackAttribute = "FEEDBACK_URL" - StackAttributeTheme_name StackAttribute = "THEME_NAME" - StackAttributeUser_settings StackAttribute = "USER_SETTINGS" - StackAttributeEmbed_host_domains StackAttribute = "EMBED_HOST_DOMAINS" - StackAttributeIam_role_arn StackAttribute = "IAM_ROLE_ARN" - StackAttributeAccess_endpoints StackAttribute = "ACCESS_ENDPOINTS" + StackAttributeStorageConnectors StackAttribute = "STORAGE_CONNECTORS" + StackAttributeStorageConnectorHomefolders StackAttribute = "STORAGE_CONNECTOR_HOMEFOLDERS" + StackAttributeStorageConnectorGoogleDrive StackAttribute = "STORAGE_CONNECTOR_GOOGLE_DRIVE" + StackAttributeStorageConnectorOneDrive StackAttribute = "STORAGE_CONNECTOR_ONE_DRIVE" + StackAttributeRedirectUrl StackAttribute = "REDIRECT_URL" + StackAttributeFeedbackUrl StackAttribute = "FEEDBACK_URL" + StackAttributeThemeName StackAttribute = "THEME_NAME" + StackAttributeUserSettings StackAttribute = "USER_SETTINGS" + StackAttributeEmbedHostDomains StackAttribute = "EMBED_HOST_DOMAINS" + StackAttributeIamRoleArn StackAttribute = "IAM_ROLE_ARN" + StackAttributeAccessEndpoints StackAttribute = "ACCESS_ENDPOINTS" ) // Values returns all known values for StackAttribute. Note that this can be @@ -423,8 +423,8 @@ type StackErrorCode string // Enum values for StackErrorCode const ( - StackErrorCodeStorage_connector_error StackErrorCode = "STORAGE_CONNECTOR_ERROR" - StackErrorCodeInternal_service_error StackErrorCode = "INTERNAL_SERVICE_ERROR" + StackErrorCodeStorageConnectorError StackErrorCode = "STORAGE_CONNECTOR_ERROR" + StackErrorCodeInternalServiceError StackErrorCode = "INTERNAL_SERVICE_ERROR" ) // Values returns all known values for StackErrorCode. Note that this can be @@ -441,9 +441,9 @@ type StorageConnectorType string // Enum values for StorageConnectorType const ( - StorageConnectorTypeHomefolders StorageConnectorType = "HOMEFOLDERS" - StorageConnectorTypeGoogle_drive StorageConnectorType = "GOOGLE_DRIVE" - StorageConnectorTypeOne_drive StorageConnectorType = "ONE_DRIVE" + StorageConnectorTypeHomefolders StorageConnectorType = "HOMEFOLDERS" + StorageConnectorTypeGoogleDrive StorageConnectorType = "GOOGLE_DRIVE" + StorageConnectorTypeOneDrive StorageConnectorType = "ONE_DRIVE" ) // Values returns all known values for StorageConnectorType. Note that this can be @@ -479,9 +479,9 @@ type UsageReportExecutionErrorCode string // Enum values for UsageReportExecutionErrorCode const ( - UsageReportExecutionErrorCodeResource_not_found UsageReportExecutionErrorCode = "RESOURCE_NOT_FOUND" - UsageReportExecutionErrorCodeAccess_denied UsageReportExecutionErrorCode = "ACCESS_DENIED" - UsageReportExecutionErrorCodeInternal_service_error UsageReportExecutionErrorCode = "INTERNAL_SERVICE_ERROR" + UsageReportExecutionErrorCodeResourceNotFound UsageReportExecutionErrorCode = "RESOURCE_NOT_FOUND" + UsageReportExecutionErrorCodeAccessDenied UsageReportExecutionErrorCode = "ACCESS_DENIED" + UsageReportExecutionErrorCodeInternalServiceError UsageReportExecutionErrorCode = "INTERNAL_SERVICE_ERROR" ) // Values returns all known values for UsageReportExecutionErrorCode. Note that @@ -516,10 +516,10 @@ type UserStackAssociationErrorCode string // Enum values for UserStackAssociationErrorCode const ( - UserStackAssociationErrorCodeStack_not_found UserStackAssociationErrorCode = "STACK_NOT_FOUND" - UserStackAssociationErrorCodeUser_name_not_found UserStackAssociationErrorCode = "USER_NAME_NOT_FOUND" - UserStackAssociationErrorCodeDirectory_not_found UserStackAssociationErrorCode = "DIRECTORY_NOT_FOUND" - UserStackAssociationErrorCodeInternal_error UserStackAssociationErrorCode = "INTERNAL_ERROR" + UserStackAssociationErrorCodeStackNotFound UserStackAssociationErrorCode = "STACK_NOT_FOUND" + UserStackAssociationErrorCodeUserNameNotFound UserStackAssociationErrorCode = "USER_NAME_NOT_FOUND" + UserStackAssociationErrorCodeDirectoryNotFound UserStackAssociationErrorCode = "DIRECTORY_NOT_FOUND" + UserStackAssociationErrorCodeInternalError UserStackAssociationErrorCode = "INTERNAL_ERROR" ) // Values returns all known values for UserStackAssociationErrorCode. Note that diff --git a/service/appstream/types/types.go b/service/appstream/types/types.go index 7e539926772..1390017fe5e 100644 --- a/service/appstream/types/types.go +++ b/service/appstream/types/types.go @@ -155,84 +155,82 @@ type Fleet struct { // The instance type to use when launching fleet instances. The following instance // types are available: // - // * stream.standard.medium + // * stream.standard.medium // - // * - // stream.standard.large + // * stream.standard.large // - // * stream.compute.large - // - // * stream.compute.xlarge + // * + // stream.compute.large // + // * stream.compute.xlarge // // * stream.compute.2xlarge // - // * stream.compute.4xlarge - // - // * - // stream.compute.8xlarge + // * + // stream.compute.4xlarge // - // * stream.memory.large + // * stream.compute.8xlarge // - // * stream.memory.xlarge + // * stream.memory.large // + // * + // stream.memory.xlarge // // * stream.memory.2xlarge // - // * stream.memory.4xlarge + // * stream.memory.4xlarge // - // * + // * // stream.memory.8xlarge // - // * stream.memory.z1d.large + // * stream.memory.z1d.large // - // * - // stream.memory.z1d.xlarge + // * stream.memory.z1d.xlarge // - // * stream.memory.z1d.2xlarge + // * + // stream.memory.z1d.2xlarge // - // * - // stream.memory.z1d.3xlarge + // * stream.memory.z1d.3xlarge // - // * stream.memory.z1d.6xlarge + // * + // stream.memory.z1d.6xlarge // - // * - // stream.memory.z1d.12xlarge + // * stream.memory.z1d.12xlarge // - // * stream.graphics-design.large + // * + // stream.graphics-design.large // - // * - // stream.graphics-design.xlarge + // * stream.graphics-design.xlarge // - // * stream.graphics-design.2xlarge + // * + // stream.graphics-design.2xlarge // - // * - // stream.graphics-design.4xlarge + // * stream.graphics-design.4xlarge // - // * stream.graphics-desktop.2xlarge + // * + // stream.graphics-desktop.2xlarge // - // * - // stream.graphics.g4dn.xlarge + // * stream.graphics.g4dn.xlarge // - // * stream.graphics.g4dn.2xlarge + // * + // stream.graphics.g4dn.2xlarge // - // * - // stream.graphics.g4dn.4xlarge + // * stream.graphics.g4dn.4xlarge // - // * stream.graphics.g4dn.8xlarge + // * + // stream.graphics.g4dn.8xlarge // - // * - // stream.graphics.g4dn.12xlarge + // * stream.graphics.g4dn.12xlarge // - // * stream.graphics.g4dn.16xlarge + // * + // stream.graphics.g4dn.16xlarge // - // * - // stream.graphics-pro.4xlarge + // * stream.graphics-pro.4xlarge // - // * stream.graphics-pro.8xlarge + // * + // stream.graphics-pro.8xlarge // - // * - // stream.graphics-pro.16xlarge + // * stream.graphics-pro.16xlarge // // This member is required. InstanceType *string @@ -461,84 +459,82 @@ type ImageBuilder struct { // The instance type for the image builder. The following instance types are // available: // - // * stream.standard.medium + // * stream.standard.medium // - // * stream.standard.large + // * stream.standard.large // - // * + // * // stream.compute.large // - // * stream.compute.xlarge + // * stream.compute.xlarge // - // * - // stream.compute.2xlarge - // - // * stream.compute.4xlarge + // * stream.compute.2xlarge // - // * - // stream.compute.8xlarge + // * + // stream.compute.4xlarge // - // * stream.memory.large + // * stream.compute.8xlarge // - // * stream.memory.xlarge + // * stream.memory.large // + // * + // stream.memory.xlarge // // * stream.memory.2xlarge // - // * stream.memory.4xlarge + // * stream.memory.4xlarge // - // * + // * // stream.memory.8xlarge // - // * stream.memory.z1d.large + // * stream.memory.z1d.large // - // * - // stream.memory.z1d.xlarge + // * stream.memory.z1d.xlarge // - // * stream.memory.z1d.2xlarge + // * + // stream.memory.z1d.2xlarge // - // * - // stream.memory.z1d.3xlarge + // * stream.memory.z1d.3xlarge // - // * stream.memory.z1d.6xlarge + // * + // stream.memory.z1d.6xlarge // - // * - // stream.memory.z1d.12xlarge + // * stream.memory.z1d.12xlarge // - // * stream.graphics-design.large + // * + // stream.graphics-design.large // - // * - // stream.graphics-design.xlarge + // * stream.graphics-design.xlarge // - // * stream.graphics-design.2xlarge + // * + // stream.graphics-design.2xlarge // - // * - // stream.graphics-design.4xlarge + // * stream.graphics-design.4xlarge // - // * stream.graphics-desktop.2xlarge + // * + // stream.graphics-desktop.2xlarge // - // * - // stream.graphics.g4dn.xlarge + // * stream.graphics.g4dn.xlarge // - // * stream.graphics.g4dn.2xlarge + // * + // stream.graphics.g4dn.2xlarge // - // * - // stream.graphics.g4dn.4xlarge + // * stream.graphics.g4dn.4xlarge // - // * stream.graphics.g4dn.8xlarge + // * + // stream.graphics.g4dn.8xlarge // - // * - // stream.graphics.g4dn.12xlarge + // * stream.graphics.g4dn.12xlarge // - // * stream.graphics.g4dn.16xlarge + // * + // stream.graphics.g4dn.16xlarge // - // * - // stream.graphics-pro.4xlarge + // * stream.graphics-pro.4xlarge // - // * stream.graphics-pro.8xlarge + // * + // stream.graphics-pro.8xlarge // - // * - // stream.graphics-pro.16xlarge + // * stream.graphics-pro.16xlarge InstanceType *string // Describes the network details of the fleet or image builder instance. @@ -831,18 +827,18 @@ type User struct { // The status of the user in the user pool. The status can be one of the // following: // - // * UNCONFIRMED – The user is created but not confirmed. + // * UNCONFIRMED – The user is created but not confirmed. // - // * - // CONFIRMED – The user is confirmed. + // * CONFIRMED + // – The user is confirmed. // - // * ARCHIVED – The user is no longer - // active. + // * ARCHIVED – The user is no longer active. // - // * COMPROMISED – The user is disabled because of a potential - // security threat. + // * + // COMPROMISED – The user is disabled because of a potential security threat. // - // * UNKNOWN – The user status is not known. + // * + // UNKNOWN – The user status is not known. Status *string // The email address of the user. Users' email addresses are case-sensitive. diff --git a/service/appsync/api_op_CreateApiCache.go b/service/appsync/api_op_CreateApiCache.go index 6790662a5c0..be995786bbf 100644 --- a/service/appsync/api_op_CreateApiCache.go +++ b/service/appsync/api_op_CreateApiCache.go @@ -32,10 +32,10 @@ type CreateApiCacheInput struct { // Caching behavior. // - // * FULL_REQUEST_CACHING: All requests are fully cached. + // * FULL_REQUEST_CACHING: All requests are fully cached. // - // - // * PER_RESOLVER_CACHING: Individual resolvers that you specify are cached. + // * + // PER_RESOLVER_CACHING: Individual resolvers that you specify are cached. // // This member is required. ApiCachingBehavior types.ApiCachingBehavior @@ -52,46 +52,45 @@ type CreateApiCacheInput struct { // The cache instance type. Valid values are // - // * SMALL + // * SMALL // - // * MEDIUM + // * MEDIUM // - // * - // LARGE + // * LARGE // - // * XLARGE + // * + // XLARGE // - // * LARGE_2X + // * LARGE_2X // - // * LARGE_4X + // * LARGE_4X // - // * LARGE_8X (not - // available in all regions) + // * LARGE_8X (not available in all regions) // - // * LARGE_12X + // * + // LARGE_12X // - // Historically, instance types were - // identified by an EC2-style value. As of July 2020, this is deprecated, and the - // generic identifiers above should be used. The following legacy instance types - // are available, but their use is discouraged: + // Historically, instance types were identified by an EC2-style value. + // As of July 2020, this is deprecated, and the generic identifiers above should be + // used. The following legacy instance types are available, but their use is + // discouraged: // - // * T2_SMALL: A t2.small - // instance type. + // * T2_SMALL: A t2.small instance type. // - // * T2_MEDIUM: A t2.medium instance type. + // * T2_MEDIUM: A t2.medium + // instance type. // - // * R4_LARGE: A - // r4.large instance type. + // * R4_LARGE: A r4.large instance type. // - // * R4_XLARGE: A r4.xlarge instance type. + // * R4_XLARGE: A r4.xlarge + // instance type. // - // * - // R4_2XLARGE: A r4.2xlarge instance type. + // * R4_2XLARGE: A r4.2xlarge instance type. // - // * R4_4XLARGE: A r4.4xlarge instance - // type. + // * R4_4XLARGE: A + // r4.4xlarge instance type. // - // * R4_8XLARGE: A r4.8xlarge instance type. + // * R4_8XLARGE: A r4.8xlarge instance type. // // This member is required. Type types.ApiCacheType diff --git a/service/appsync/api_op_CreateResolver.go b/service/appsync/api_op_CreateResolver.go index 5e2dc65864b..84bd0d847ce 100644 --- a/service/appsync/api_op_CreateResolver.go +++ b/service/appsync/api_op_CreateResolver.go @@ -54,14 +54,13 @@ type CreateResolverInput struct { // The resolver type. // - // * UNIT: A UNIT resolver type. A UNIT resolver is the - // default resolver type. A UNIT resolver enables you to execute a GraphQL query - // against a single data source. + // * UNIT: A UNIT resolver type. A UNIT resolver is the default + // resolver type. A UNIT resolver enables you to execute a GraphQL query against a + // single data source. // - // * PIPELINE: A PIPELINE resolver type. A - // PIPELINE resolver enables you to execute a series of Function in a serial - // manner. You can use a pipeline resolver to execute a GraphQL query against - // multiple data sources. + // * PIPELINE: A PIPELINE resolver type. A PIPELINE resolver + // enables you to execute a series of Function in a serial manner. You can use a + // pipeline resolver to execute a GraphQL query against multiple data sources. Kind types.ResolverKind // The PipelineConfig. diff --git a/service/appsync/api_op_UpdateApiCache.go b/service/appsync/api_op_UpdateApiCache.go index 2cf1f6b63e3..02c5b75b7a3 100644 --- a/service/appsync/api_op_UpdateApiCache.go +++ b/service/appsync/api_op_UpdateApiCache.go @@ -32,10 +32,10 @@ type UpdateApiCacheInput struct { // Caching behavior. // - // * FULL_REQUEST_CACHING: All requests are fully cached. + // * FULL_REQUEST_CACHING: All requests are fully cached. // - // - // * PER_RESOLVER_CACHING: Individual resolvers that you specify are cached. + // * + // PER_RESOLVER_CACHING: Individual resolvers that you specify are cached. // // This member is required. ApiCachingBehavior types.ApiCachingBehavior @@ -52,46 +52,45 @@ type UpdateApiCacheInput struct { // The cache instance type. Valid values are // - // * SMALL + // * SMALL // - // * MEDIUM + // * MEDIUM // - // * - // LARGE + // * LARGE // - // * XLARGE + // * + // XLARGE // - // * LARGE_2X + // * LARGE_2X // - // * LARGE_4X + // * LARGE_4X // - // * LARGE_8X (not - // available in all regions) + // * LARGE_8X (not available in all regions) // - // * LARGE_12X + // * + // LARGE_12X // - // Historically, instance types were - // identified by an EC2-style value. As of July 2020, this is deprecated, and the - // generic identifiers above should be used. The following legacy instance types - // are available, but their use is discouraged: + // Historically, instance types were identified by an EC2-style value. + // As of July 2020, this is deprecated, and the generic identifiers above should be + // used. The following legacy instance types are available, but their use is + // discouraged: // - // * T2_SMALL: A t2.small - // instance type. + // * T2_SMALL: A t2.small instance type. // - // * T2_MEDIUM: A t2.medium instance type. + // * T2_MEDIUM: A t2.medium + // instance type. // - // * R4_LARGE: A - // r4.large instance type. + // * R4_LARGE: A r4.large instance type. // - // * R4_XLARGE: A r4.xlarge instance type. + // * R4_XLARGE: A r4.xlarge + // instance type. // - // * - // R4_2XLARGE: A r4.2xlarge instance type. + // * R4_2XLARGE: A r4.2xlarge instance type. // - // * R4_4XLARGE: A r4.4xlarge instance - // type. + // * R4_4XLARGE: A + // r4.4xlarge instance type. // - // * R4_8XLARGE: A r4.8xlarge instance type. + // * R4_8XLARGE: A r4.8xlarge instance type. // // This member is required. Type types.ApiCacheType diff --git a/service/appsync/api_op_UpdateResolver.go b/service/appsync/api_op_UpdateResolver.go index b8199b624a4..f7d548ae679 100644 --- a/service/appsync/api_op_UpdateResolver.go +++ b/service/appsync/api_op_UpdateResolver.go @@ -52,14 +52,13 @@ type UpdateResolverInput struct { // The resolver type. // - // * UNIT: A UNIT resolver type. A UNIT resolver is the - // default resolver type. A UNIT resolver enables you to execute a GraphQL query - // against a single data source. + // * UNIT: A UNIT resolver type. A UNIT resolver is the default + // resolver type. A UNIT resolver enables you to execute a GraphQL query against a + // single data source. // - // * PIPELINE: A PIPELINE resolver type. A - // PIPELINE resolver enables you to execute a series of Function in a serial - // manner. You can use a pipeline resolver to execute a GraphQL query against - // multiple data sources. + // * PIPELINE: A PIPELINE resolver type. A PIPELINE resolver + // enables you to execute a series of Function in a serial manner. You can use a + // pipeline resolver to execute a GraphQL query against multiple data sources. Kind types.ResolverKind // The PipelineConfig. diff --git a/service/appsync/types/enums.go b/service/appsync/types/enums.go index 9ad946427b1..ff4252b6bdb 100644 --- a/service/appsync/types/enums.go +++ b/service/appsync/types/enums.go @@ -30,21 +30,21 @@ type ApiCacheType string // Enum values for ApiCacheType const ( - ApiCacheTypeT2_small ApiCacheType = "T2_SMALL" - ApiCacheTypeT2_medium ApiCacheType = "T2_MEDIUM" - ApiCacheTypeR4_large ApiCacheType = "R4_LARGE" - ApiCacheTypeR4_xlarge ApiCacheType = "R4_XLARGE" - ApiCacheTypeR4_2xlarge ApiCacheType = "R4_2XLARGE" - ApiCacheTypeR4_4xlarge ApiCacheType = "R4_4XLARGE" - ApiCacheTypeR4_8xlarge ApiCacheType = "R4_8XLARGE" - ApiCacheTypeSmall ApiCacheType = "SMALL" - ApiCacheTypeMedium ApiCacheType = "MEDIUM" - ApiCacheTypeLarge ApiCacheType = "LARGE" - ApiCacheTypeXlarge ApiCacheType = "XLARGE" - ApiCacheTypeLarge_2x ApiCacheType = "LARGE_2X" - ApiCacheTypeLarge_4x ApiCacheType = "LARGE_4X" - ApiCacheTypeLarge_8x ApiCacheType = "LARGE_8X" - ApiCacheTypeLarge_12x ApiCacheType = "LARGE_12X" + ApiCacheTypeT2Small ApiCacheType = "T2_SMALL" + ApiCacheTypeT2Medium ApiCacheType = "T2_MEDIUM" + ApiCacheTypeR4Large ApiCacheType = "R4_LARGE" + ApiCacheTypeR4Xlarge ApiCacheType = "R4_XLARGE" + ApiCacheTypeR42xlarge ApiCacheType = "R4_2XLARGE" + ApiCacheTypeR44xlarge ApiCacheType = "R4_4XLARGE" + ApiCacheTypeR48xlarge ApiCacheType = "R4_8XLARGE" + ApiCacheTypeSmall ApiCacheType = "SMALL" + ApiCacheTypeMedium ApiCacheType = "MEDIUM" + ApiCacheTypeLarge ApiCacheType = "LARGE" + ApiCacheTypeXlarge ApiCacheType = "XLARGE" + ApiCacheTypeLarge2x ApiCacheType = "LARGE_2X" + ApiCacheTypeLarge4x ApiCacheType = "LARGE_4X" + ApiCacheTypeLarge8x ApiCacheType = "LARGE_8X" + ApiCacheTypeLarge12x ApiCacheType = "LARGE_12X" ) // Values returns all known values for ApiCacheType. Note that this can be expanded @@ -74,8 +74,8 @@ type ApiCachingBehavior string // Enum values for ApiCachingBehavior const ( - ApiCachingBehaviorFull_request_caching ApiCachingBehavior = "FULL_REQUEST_CACHING" - ApiCachingBehaviorPer_resolver_caching ApiCachingBehavior = "PER_RESOLVER_CACHING" + ApiCachingBehaviorFullRequestCaching ApiCachingBehavior = "FULL_REQUEST_CACHING" + ApiCachingBehaviorPerResolverCaching ApiCachingBehavior = "PER_RESOLVER_CACHING" ) // Values returns all known values for ApiCachingBehavior. Note that this can be @@ -92,10 +92,10 @@ type AuthenticationType string // Enum values for AuthenticationType const ( - AuthenticationTypeApi_key AuthenticationType = "API_KEY" - AuthenticationTypeAws_iam AuthenticationType = "AWS_IAM" - AuthenticationTypeAmazon_cognito_user_pools AuthenticationType = "AMAZON_COGNITO_USER_POOLS" - AuthenticationTypeOpenid_connect AuthenticationType = "OPENID_CONNECT" + AuthenticationTypeApiKey AuthenticationType = "API_KEY" + AuthenticationTypeAwsIam AuthenticationType = "AWS_IAM" + AuthenticationTypeAmazonCognitoUserPools AuthenticationType = "AMAZON_COGNITO_USER_POOLS" + AuthenticationTypeOpenidConnect AuthenticationType = "OPENID_CONNECT" ) // Values returns all known values for AuthenticationType. Note that this can be @@ -114,7 +114,7 @@ type AuthorizationType string // Enum values for AuthorizationType const ( - AuthorizationTypeAws_iam AuthorizationType = "AWS_IAM" + AuthorizationTypeAwsIam AuthorizationType = "AWS_IAM" ) // Values returns all known values for AuthorizationType. Note that this can be @@ -148,10 +148,10 @@ type ConflictHandlerType string // Enum values for ConflictHandlerType const ( - ConflictHandlerTypeOptimistic_concurrency ConflictHandlerType = "OPTIMISTIC_CONCURRENCY" - ConflictHandlerTypeLambda ConflictHandlerType = "LAMBDA" - ConflictHandlerTypeAutomerge ConflictHandlerType = "AUTOMERGE" - ConflictHandlerTypeNone ConflictHandlerType = "NONE" + ConflictHandlerTypeOptimisticConcurrency ConflictHandlerType = "OPTIMISTIC_CONCURRENCY" + ConflictHandlerTypeLambda ConflictHandlerType = "LAMBDA" + ConflictHandlerTypeAutomerge ConflictHandlerType = "AUTOMERGE" + ConflictHandlerTypeNone ConflictHandlerType = "NONE" ) // Values returns all known values for ConflictHandlerType. Note that this can be @@ -170,12 +170,12 @@ type DataSourceType string // Enum values for DataSourceType const ( - DataSourceTypeAws_lambda DataSourceType = "AWS_LAMBDA" - DataSourceTypeAmazon_dynamodb DataSourceType = "AMAZON_DYNAMODB" - DataSourceTypeAmazon_elasticsearch DataSourceType = "AMAZON_ELASTICSEARCH" - DataSourceTypeNone DataSourceType = "NONE" - DataSourceTypeHttp DataSourceType = "HTTP" - DataSourceTypeRelational_database DataSourceType = "RELATIONAL_DATABASE" + DataSourceTypeAwsLambda DataSourceType = "AWS_LAMBDA" + DataSourceTypeAmazonDynamodb DataSourceType = "AMAZON_DYNAMODB" + DataSourceTypeAmazonElasticsearch DataSourceType = "AMAZON_ELASTICSEARCH" + DataSourceTypeNone DataSourceType = "NONE" + DataSourceTypeHttp DataSourceType = "HTTP" + DataSourceTypeRelationalDatabase DataSourceType = "RELATIONAL_DATABASE" ) // Values returns all known values for DataSourceType. Note that this can be @@ -252,7 +252,7 @@ type RelationalDatabaseSourceType string // Enum values for RelationalDatabaseSourceType const ( - RelationalDatabaseSourceTypeRds_http_endpoint RelationalDatabaseSourceType = "RDS_HTTP_ENDPOINT" + RelationalDatabaseSourceTypeRdsHttpEndpoint RelationalDatabaseSourceType = "RDS_HTTP_ENDPOINT" ) // Values returns all known values for RelationalDatabaseSourceType. Note that this diff --git a/service/appsync/types/types.go b/service/appsync/types/types.go index d439b3dfb2b..897ac1345a7 100644 --- a/service/appsync/types/types.go +++ b/service/appsync/types/types.go @@ -20,10 +20,10 @@ type ApiCache struct { // Caching behavior. // - // * FULL_REQUEST_CACHING: All requests are fully cached. + // * FULL_REQUEST_CACHING: All requests are fully cached. // - // - // * PER_RESOLVER_CACHING: Individual resolvers that you specify are cached. + // * + // PER_RESOLVER_CACHING: Individual resolvers that you specify are cached. ApiCachingBehavior ApiCachingBehavior // At rest encryption flag for cache. This setting cannot be updated after @@ -32,18 +32,18 @@ type ApiCache struct { // The cache instance status. // - // * AVAILABLE: The instance is available for - // use. + // * AVAILABLE: The instance is available for use. // - // * CREATING: The instance is currently creating. + // * + // CREATING: The instance is currently creating. // - // * DELETING: The - // instance is currently deleting. + // * DELETING: The instance is + // currently deleting. // - // * MODIFYING: The instance is currently - // modifying. + // * MODIFYING: The instance is currently modifying. // - // * FAILED: The instance has failed creation. + // * + // FAILED: The instance has failed creation. Status ApiCacheStatus // Transit encryption flag when connecting to cache. This setting cannot be updated @@ -55,46 +55,45 @@ type ApiCache struct { // The cache instance type. Valid values are // - // * SMALL + // * SMALL // - // * MEDIUM + // * MEDIUM // - // * - // LARGE + // * LARGE // - // * XLARGE + // * + // XLARGE // - // * LARGE_2X + // * LARGE_2X // - // * LARGE_4X + // * LARGE_4X // - // * LARGE_8X (not - // available in all regions) + // * LARGE_8X (not available in all regions) // - // * LARGE_12X + // * + // LARGE_12X // - // Historically, instance types were - // identified by an EC2-style value. As of July 2020, this is deprecated, and the - // generic identifiers above should be used. The following legacy instance types - // are available, but their use is discouraged: + // Historically, instance types were identified by an EC2-style value. + // As of July 2020, this is deprecated, and the generic identifiers above should be + // used. The following legacy instance types are available, but their use is + // discouraged: // - // * T2_SMALL: A t2.small - // instance type. + // * T2_SMALL: A t2.small instance type. // - // * T2_MEDIUM: A t2.medium instance type. + // * T2_MEDIUM: A t2.medium + // instance type. // - // * R4_LARGE: A - // r4.large instance type. + // * R4_LARGE: A r4.large instance type. // - // * R4_XLARGE: A r4.xlarge instance type. + // * R4_XLARGE: A r4.xlarge + // instance type. // - // * - // R4_2XLARGE: A r4.2xlarge instance type. + // * R4_2XLARGE: A r4.2xlarge instance type. // - // * R4_4XLARGE: A r4.4xlarge instance - // type. + // * R4_4XLARGE: A + // r4.4xlarge instance type. // - // * R4_8XLARGE: A r4.8xlarge instance type. + // * R4_8XLARGE: A r4.8xlarge instance type. Type ApiCacheType } @@ -104,48 +103,47 @@ type ApiCache struct { // days. Key expiration is managed by Amazon DynamoDB TTL. The keys ceased to be // valid after February 21, 2018 and should not be used after that date. // -// * +// * // ListApiKeys returns the expiration time in milliseconds. // -// * CreateApiKey -// returns the expiration time in milliseconds. -// -// * UpdateApiKey is not -// available for this key version. +// * CreateApiKey returns +// the expiration time in milliseconds. // -// * DeleteApiKey deletes the item from the -// table. +// * UpdateApiKey is not available for this +// key version. // -// * Expiration is stored in Amazon DynamoDB as milliseconds. This -// results in a bug where keys are not automatically deleted because DynamoDB -// expects the TTL to be stored in seconds. As a one-time action, we will delete -// these keys from the table after February 21, 2018. +// * DeleteApiKey deletes the item from the table. // -// da2: This version was -// introduced in February 2018 when AppSync added support to extend key -// expiration. +// * Expiration is +// stored in Amazon DynamoDB as milliseconds. This results in a bug where keys are +// not automatically deleted because DynamoDB expects the TTL to be stored in +// seconds. As a one-time action, we will delete these keys from the table after +// February 21, 2018. // -// * ListApiKeys returns the expiration time and deletion time in -// seconds. +// da2: This version was introduced in February 2018 when +// AppSync added support to extend key expiration. // -// * CreateApiKey returns the expiration time and deletion time in -// seconds and accepts a user-provided expiration time in seconds. +// * ListApiKeys returns the +// expiration time and deletion time in seconds. // -// * -// UpdateApiKey returns the expiration time and and deletion time in seconds and -// accepts a user-provided expiration time in seconds. Expired API keys are kept -// for 60 days after the expiration time. Key expiration time can be updated while -// the key is not deleted. +// * CreateApiKey returns the +// expiration time and deletion time in seconds and accepts a user-provided +// expiration time in seconds. // -// * DeleteApiKey deletes the item from the table. +// * UpdateApiKey returns the expiration time and and +// deletion time in seconds and accepts a user-provided expiration time in seconds. +// Expired API keys are kept for 60 days after the expiration time. Key expiration +// time can be updated while the key is not deleted. // +// * DeleteApiKey deletes the +// item from the table. // -// * Expiration is stored in Amazon DynamoDB as seconds. After the expiration time, -// using the key to authenticate will fail. But the key can be reinstated before -// deletion. +// * Expiration is stored in Amazon DynamoDB as seconds. +// After the expiration time, using the key to authenticate will fail. But the key +// can be reinstated before deletion. // -// * Deletion is stored in Amazon DynamoDB as seconds. The key will -// be deleted after deletion time. +// * Deletion is stored in Amazon DynamoDB as +// seconds. The key will be deleted after deletion time. type ApiKey struct { // The time after which the API key is deleted. The date is represented as seconds @@ -168,7 +166,7 @@ type AuthorizationConfig struct { // The authorization type required by the HTTP endpoint. // - // * AWS_IAM: The + // * AWS_IAM: The // authorization type is Sigv4. // // This member is required. @@ -252,24 +250,24 @@ type DataSource struct { // The type of the data source. // - // * AMAZON_DYNAMODB: The data source is an - // Amazon DynamoDB table. + // * AMAZON_DYNAMODB: The data source is an Amazon + // DynamoDB table. // - // * AMAZON_ELASTICSEARCH: The data source is an Amazon + // * AMAZON_ELASTICSEARCH: The data source is an Amazon // Elasticsearch Service domain. // - // * AWS_LAMBDA: The data source is an AWS - // Lambda function. - // - // * NONE: There is no data source. This type is used when - // you wish to invoke a GraphQL operation without connecting to a data source, such - // as performing data transformation with resolvers or triggering a subscription to - // be invoked from a mutation. + // * AWS_LAMBDA: The data source is an AWS Lambda + // function. // - // * HTTP: The data source is an HTTP endpoint. + // * NONE: There is no data source. This type is used when you wish to + // invoke a GraphQL operation without connecting to a data source, such as + // performing data transformation with resolvers or triggering a subscription to be + // invoked from a mutation. // + // * HTTP: The data source is an HTTP endpoint. // - // * RELATIONAL_DATABASE: The data source is a relational database. + // * + // RELATIONAL_DATABASE: The data source is a relational database. Type DataSourceType } @@ -436,28 +434,27 @@ type LogConfig struct { // The field logging level. Values can be NONE, ERROR, or ALL. // - // * NONE: No + // * NONE: No // field-level logs are captured. // - // * ERROR: Logs the following information only - // for the fields that are in error: + // * ERROR: Logs the following information only for + // the fields that are in error: // - // * The error section in the server - // response. + // * The error section in the server response. // - // * Field-level errors. + // * + // Field-level errors. // - // * The generated - // request/response functions that got resolved for error fields. + // * The generated request/response functions that got + // resolved for error fields. // - // * ALL: The - // following information is logged for all fields in the query: + // * ALL: The following information is logged for all + // fields in the query: // - // * - // Field-level tracing information. + // * Field-level tracing information. // - // * The generated request/response - // functions that got resolved for each field. + // * The generated + // request/response functions that got resolved for each field. // // This member is required. FieldLogLevel FieldLogLevel @@ -523,8 +520,8 @@ type RelationalDatabaseDataSourceConfig struct { // Source type for the relational database. // - // * RDS_HTTP_ENDPOINT: The - // relational database source type is an Amazon RDS HTTP endpoint. + // * RDS_HTTP_ENDPOINT: The relational + // database source type is an Amazon RDS HTTP endpoint. RelationalDatabaseSourceType RelationalDatabaseSourceType } @@ -542,14 +539,13 @@ type Resolver struct { // The resolver type. // - // * UNIT: A UNIT resolver type. A UNIT resolver is the - // default resolver type. A UNIT resolver enables you to execute a GraphQL query - // against a single data source. + // * UNIT: A UNIT resolver type. A UNIT resolver is the default + // resolver type. A UNIT resolver enables you to execute a GraphQL query against a + // single data source. // - // * PIPELINE: A PIPELINE resolver type. A - // PIPELINE resolver enables you to execute a series of Function in a serial - // manner. You can use a pipeline resolver to execute a GraphQL query against - // multiple data sources. + // * PIPELINE: A PIPELINE resolver type. A PIPELINE resolver + // enables you to execute a series of Function in a serial manner. You can use a + // pipeline resolver to execute a GraphQL query against multiple data sources. Kind ResolverKind // The PipelineConfig. @@ -578,25 +574,24 @@ type SyncConfig struct { // The Conflict Detection strategy to use. // - // * VERSION: Detect conflicts based - // on object versions for this resolver. + // * VERSION: Detect conflicts based on + // object versions for this resolver. // - // * NONE: Do not detect conflicts when + // * NONE: Do not detect conflicts when // executing this resolver. ConflictDetection ConflictDetectionType // The Conflict Resolution strategy to perform in the event of a conflict. // - // * + // * // OPTIMISTIC_CONCURRENCY: Resolve conflicts by rejecting mutations when versions // do not match the latest version at the server. // - // * AUTOMERGE: Resolve - // conflicts with the Automerge conflict resolution strategy. + // * AUTOMERGE: Resolve conflicts + // with the Automerge conflict resolution strategy. // - // * LAMBDA: - // Resolve conflicts with a Lambda function supplied in the - // LambdaConflictHandlerConfig. + // * LAMBDA: Resolve conflicts + // with a Lambda function supplied in the LambdaConflictHandlerConfig. ConflictHandler ConflictHandlerType // The LambdaConflictHandlerConfig when configuring LAMBDA as the Conflict Handler. diff --git a/service/athena/api_op_CreateDataCatalog.go b/service/athena/api_op_CreateDataCatalog.go index 515d50e119a..06327549180 100644 --- a/service/athena/api_op_CreateDataCatalog.go +++ b/service/athena/api_op_CreateDataCatalog.go @@ -49,25 +49,25 @@ type CreateDataCatalogInput struct { // Specifies the Lambda function or functions to use for creating the data catalog. // This is a mapping whose values depend on the catalog type. // - // * For the HIVE - // data catalog type, use the following syntax. The metadata-function parameter is + // * For the HIVE data + // catalog type, use the following syntax. The metadata-function parameter is // required. The sdk-version parameter is optional and defaults to the currently // supported version. metadata-function=lambda_arn, sdk-version=version_number // - // - // * For the LAMBDA data catalog type, use one of the following sets of required + // * + // For the LAMBDA data catalog type, use one of the following sets of required // parameters, but not both. // - // * If you have one Lambda function that - // processes metadata and another for reading the actual data, use the following - // syntax. Both parameters are required. metadata-function=lambda_arn, + // * If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. Both + // parameters are required. metadata-function=lambda_arn, // record-function=lambda_arn // - // * If you have a composite Lambda function - // that processes both metadata and data, use the following syntax to specify your + // * If you have a composite Lambda function that + // processes both metadata and data, use the following syntax to specify your // Lambda function. function=lambda_arn // - // * The GLUE type has no parameters. + // * The GLUE type has no parameters. Parameters map[string]*string // A list of comma separated tags to add to the data catalog that is created. diff --git a/service/athena/api_op_UpdateDataCatalog.go b/service/athena/api_op_UpdateDataCatalog.go index 9e2cec2dcc5..a871f0baea5 100644 --- a/service/athena/api_op_UpdateDataCatalog.go +++ b/service/athena/api_op_UpdateDataCatalog.go @@ -48,25 +48,25 @@ type UpdateDataCatalogInput struct { // Specifies the Lambda function or functions to use for updating the data catalog. // This is a mapping whose values depend on the catalog type. // - // * For the HIVE - // data catalog type, use the following syntax. The metadata-function parameter is + // * For the HIVE data + // catalog type, use the following syntax. The metadata-function parameter is // required. The sdk-version parameter is optional and defaults to the currently // supported version. metadata-function=lambda_arn, sdk-version=version_number // - // - // * For the LAMBDA data catalog type, use one of the following sets of required + // * + // For the LAMBDA data catalog type, use one of the following sets of required // parameters, but not both. // - // * If you have one Lambda function that - // processes metadata and another for reading the actual data, use the following - // syntax. Both parameters are required. metadata-function=lambda_arn, + // * If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. Both + // parameters are required. metadata-function=lambda_arn, // record-function=lambda_arn // - // * If you have a composite Lambda function - // that processes both metadata and data, use the following syntax to specify your + // * If you have a composite Lambda function that + // processes both metadata and data, use the following syntax to specify your // Lambda function. function=lambda_arn // - // * The GLUE type has no parameters. + // * The GLUE type has no parameters. Parameters map[string]*string } diff --git a/service/athena/types/enums.go b/service/athena/types/enums.go index 7d986bc50e4..ef6a3efb4bc 100644 --- a/service/athena/types/enums.go +++ b/service/athena/types/enums.go @@ -6,7 +6,7 @@ type ColumnNullable string // Enum values for ColumnNullable const ( - ColumnNullableNot_null ColumnNullable = "NOT_NULL" + ColumnNullableNotNull ColumnNullable = "NOT_NULL" ColumnNullableNullable ColumnNullable = "NULLABLE" ColumnNullableUnknown ColumnNullable = "UNKNOWN" ) @@ -46,9 +46,9 @@ type EncryptionOption string // Enum values for EncryptionOption const ( - EncryptionOptionSse_s3 EncryptionOption = "SSE_S3" - EncryptionOptionSse_kms EncryptionOption = "SSE_KMS" - EncryptionOptionCse_kms EncryptionOption = "CSE_KMS" + EncryptionOptionSseS3 EncryptionOption = "SSE_S3" + EncryptionOptionSseKms EncryptionOption = "SSE_KMS" + EncryptionOptionCseKms EncryptionOption = "CSE_KMS" ) // Values returns all known values for EncryptionOption. Note that this can be @@ -110,7 +110,7 @@ type ThrottleReason string // Enum values for ThrottleReason const ( - ThrottleReasonConcurrent_query_limit_exceeded ThrottleReason = "CONCURRENT_QUERY_LIMIT_EXCEEDED" + ThrottleReasonConcurrentQueryLimitExceeded ThrottleReason = "CONCURRENT_QUERY_LIMIT_EXCEEDED" ) // Values returns all known values for ThrottleReason. Note that this can be diff --git a/service/athena/types/types.go b/service/athena/types/types.go index 5e37611e502..326443bd938 100644 --- a/service/athena/types/types.go +++ b/service/athena/types/types.go @@ -98,25 +98,25 @@ type DataCatalog struct { // Specifies the Lambda function or functions to use for the data catalog. This is // a mapping whose values depend on the catalog type. // - // * For the HIVE data - // catalog type, use the following syntax. The metadata-function parameter is - // required. The sdk-version parameter is optional and defaults to the currently - // supported version. metadata-function=lambda_arn, sdk-version=version_number + // * For the HIVE data catalog + // type, use the following syntax. The metadata-function parameter is required. The + // sdk-version parameter is optional and defaults to the currently supported + // version. metadata-function=lambda_arn, sdk-version=version_number // + // * For the + // LAMBDA data catalog type, use one of the following sets of required parameters, + // but not both. // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. + // * If you have one Lambda function that processes metadata and + // another for reading the actual data, use the following syntax. Both parameters + // are required. metadata-function=lambda_arn, record-function=lambda_arn // - // * If you have one Lambda function that - // processes metadata and another for reading the actual data, use the following - // syntax. Both parameters are required. metadata-function=lambda_arn, - // record-function=lambda_arn + // * If you + // have a composite Lambda function that processes both metadata and data, use the + // following syntax to specify your Lambda function. function=lambda_arn // - // * If you have a composite Lambda function - // that processes both metadata and data, use the following syntax to specify your - // Lambda function. function=lambda_arn - // - // * The GLUE type has no parameters. + // * The + // GLUE type has no parameters. Parameters map[string]*string } diff --git a/service/autoscaling/api_op_CompleteLifecycleAction.go b/service/autoscaling/api_op_CompleteLifecycleAction.go index b713e09e32f..90556bf4f00 100644 --- a/service/autoscaling/api_op_CompleteLifecycleAction.go +++ b/service/autoscaling/api_op_CompleteLifecycleAction.go @@ -14,23 +14,23 @@ import ( // specified result. This step is a part of the procedure for adding a lifecycle // hook to an Auto Scaling group: // -// * (Optional) Create a Lambda function and a -// rule that allows CloudWatch Events to invoke your Lambda function when Amazon -// EC2 Auto Scaling launches or terminates instances. +// * (Optional) Create a Lambda function and a rule +// that allows CloudWatch Events to invoke your Lambda function when Amazon EC2 +// Auto Scaling launches or terminates instances. // -// * (Optional) Create a +// * (Optional) Create a // notification target and an IAM role. The target can be either an Amazon SQS // queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish // lifecycle notifications to the target. // -// * Create the lifecycle hook. Specify +// * Create the lifecycle hook. Specify // whether the hook is used when the instances launch or terminate. // -// * If you -// need more time, record the lifecycle action heartbeat to keep the instance in a +// * If you need +// more time, record the lifecycle action heartbeat to keep the instance in a // pending state. // -// * If you finish before the timeout period ends, complete the +// * If you finish before the timeout period ends, complete the // lifecycle action. // // For more information, see Amazon EC2 Auto Scaling Lifecycle diff --git a/service/autoscaling/api_op_DescribeAdjustmentTypes.go b/service/autoscaling/api_op_DescribeAdjustmentTypes.go index 56603e5dfd7..3a1a01fbc22 100644 --- a/service/autoscaling/api_op_DescribeAdjustmentTypes.go +++ b/service/autoscaling/api_op_DescribeAdjustmentTypes.go @@ -16,12 +16,12 @@ import ( // policies; they do not apply to target tracking scaling policies. The following // adjustment types are supported: // -// * ChangeInCapacity +// * ChangeInCapacity // -// * ExactCapacity +// * ExactCapacity // -// -// * PercentChangeInCapacity +// * +// PercentChangeInCapacity func (c *Client) DescribeAdjustmentTypes(ctx context.Context, params *DescribeAdjustmentTypesInput, optFns ...func(*Options)) (*DescribeAdjustmentTypesOutput, error) { if params == nil { params = &DescribeAdjustmentTypesInput{} diff --git a/service/autoscaling/api_op_DescribeInstanceRefreshes.go b/service/autoscaling/api_op_DescribeInstanceRefreshes.go index 55793fcbf4a..df6216e69cb 100644 --- a/service/autoscaling/api_op_DescribeInstanceRefreshes.go +++ b/service/autoscaling/api_op_DescribeInstanceRefreshes.go @@ -15,27 +15,27 @@ import ( // request by looking at the Status parameter. The following are the possible // statuses: // -// * Pending - The request was created, but the operation has not +// * Pending - The request was created, but the operation has not // started. // -// * InProgress - The operation is in progress. +// * InProgress - The operation is in progress. // -// * Successful - -// The operation completed successfully. +// * Successful - The +// operation completed successfully. // -// * Failed - The operation failed to -// complete. You can troubleshoot using the status reason and the scaling -// activities. +// * Failed - The operation failed to complete. +// You can troubleshoot using the status reason and the scaling activities. // -// * Cancelling - An ongoing operation is being cancelled. -// Cancellation does not roll back any replacements that have already been -// completed, but it prevents new replacements from being started. +// * +// Cancelling - An ongoing operation is being cancelled. Cancellation does not roll +// back any replacements that have already been completed, but it prevents new +// replacements from being started. // -// * Cancelled -// - The operation is cancelled. +// * Cancelled - The operation is cancelled. // -// For more information, see Replacing Auto Scaling -// Instances Based on an Instance Refresh +// For +// more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). func (c *Client) DescribeInstanceRefreshes(ctx context.Context, params *DescribeInstanceRefreshesInput, optFns ...func(*Options)) (*DescribeInstanceRefreshesOutput, error) { if params == nil { diff --git a/service/autoscaling/api_op_DescribeLifecycleHookTypes.go b/service/autoscaling/api_op_DescribeLifecycleHookTypes.go index 5ad0611ca2f..759db72aedb 100644 --- a/service/autoscaling/api_op_DescribeLifecycleHookTypes.go +++ b/service/autoscaling/api_op_DescribeLifecycleHookTypes.go @@ -13,9 +13,9 @@ import ( // Describes the available types of lifecycle hooks. The following hook types are // supported: // -// * autoscaling:EC2_INSTANCE_LAUNCHING +// * autoscaling:EC2_INSTANCE_LAUNCHING // -// * +// * // autoscaling:EC2_INSTANCE_TERMINATING func (c *Client) DescribeLifecycleHookTypes(ctx context.Context, params *DescribeLifecycleHookTypesInput, optFns ...func(*Options)) (*DescribeLifecycleHookTypesOutput, error) { if params == nil { diff --git a/service/autoscaling/api_op_DisableMetricsCollection.go b/service/autoscaling/api_op_DisableMetricsCollection.go index 09e98d5fc06..31dd29e4c87 100644 --- a/service/autoscaling/api_op_DisableMetricsCollection.go +++ b/service/autoscaling/api_op_DisableMetricsCollection.go @@ -35,39 +35,38 @@ type DisableMetricsCollectionInput struct { // Specifies one or more of the following metrics: // - // * GroupMinSize + // * GroupMinSize // - // * + // * // GroupMaxSize // - // * GroupDesiredCapacity + // * GroupDesiredCapacity // - // * GroupInServiceInstances + // * GroupInServiceInstances // - // * + // * // GroupPendingInstances // - // * GroupStandbyInstances + // * GroupStandbyInstances // - // * - // GroupTerminatingInstances + // * GroupTerminatingInstances // - // * GroupTotalInstances + // * + // GroupTotalInstances // - // * - // GroupInServiceCapacity + // * GroupInServiceCapacity // - // * GroupPendingCapacity - // - // * GroupStandbyCapacity + // * GroupPendingCapacity // + // * + // GroupStandbyCapacity // // * GroupTerminatingCapacity // - // * GroupTotalCapacity + // * GroupTotalCapacity // - // If you omit this - // parameter, all metrics are disabled. + // If you + // omit this parameter, all metrics are disabled. Metrics []*string } diff --git a/service/autoscaling/api_op_EnableMetricsCollection.go b/service/autoscaling/api_op_EnableMetricsCollection.go index 21c5efa007d..f1b72700fd7 100644 --- a/service/autoscaling/api_op_EnableMetricsCollection.go +++ b/service/autoscaling/api_op_EnableMetricsCollection.go @@ -45,42 +45,41 @@ type EnableMetricsCollectionInput struct { // Specifies which group-level metrics to start collecting. You can specify one or // more of the following metrics: // - // * GroupMinSize + // * GroupMinSize // - // * GroupMaxSize + // * GroupMaxSize // - // * + // * // GroupDesiredCapacity // - // * GroupInServiceInstances + // * GroupInServiceInstances // - // * - // GroupPendingInstances + // * GroupPendingInstances // - // * GroupStandbyInstances + // * + // GroupStandbyInstances // - // * - // GroupTerminatingInstances + // * GroupTerminatingInstances // - // * GroupTotalInstances + // * GroupTotalInstances // - // The instance weighting - // feature supports the following additional metrics: + // The + // instance weighting feature supports the following additional metrics: // - // * + // * // GroupInServiceCapacity // - // * GroupPendingCapacity + // * GroupPendingCapacity // - // * GroupStandbyCapacity + // * GroupStandbyCapacity // + // * + // GroupTerminatingCapacity // - // * GroupTerminatingCapacity + // * GroupTotalCapacity // - // * GroupTotalCapacity - // - // If you omit this - // parameter, all metrics are enabled. + // If you omit this parameter, all + // metrics are enabled. Metrics []*string } diff --git a/service/autoscaling/api_op_PutLifecycleHook.go b/service/autoscaling/api_op_PutLifecycleHook.go index ae0474d4d82..b1e4e75b21d 100644 --- a/service/autoscaling/api_op_PutLifecycleHook.go +++ b/service/autoscaling/api_op_PutLifecycleHook.go @@ -16,24 +16,24 @@ import ( // terminates (before it is fully terminated). This step is a part of the procedure // for adding a lifecycle hook to an Auto Scaling group: // -// * (Optional) Create a +// * (Optional) Create a // Lambda function and a rule that allows CloudWatch Events to invoke your Lambda // function when Amazon EC2 Auto Scaling launches or terminates instances. // -// * +// * // (Optional) Create a notification target and an IAM role. The target can be // either an Amazon SQS queue or an Amazon SNS topic. The role allows Amazon EC2 // Auto Scaling to publish lifecycle notifications to the target. // -// * Create the +// * Create the // lifecycle hook. Specify whether the hook is used when the instances launch or // terminate. // -// * If you need more time, record the lifecycle action heartbeat -// to keep the instance in a pending state using the RecordLifecycleActionHeartbeat +// * If you need more time, record the lifecycle action heartbeat to +// keep the instance in a pending state using the RecordLifecycleActionHeartbeat // API call. // -// * If you finish before the timeout period ends, complete the +// * If you finish before the timeout period ends, complete the // lifecycle action using the CompleteLifecycleAction API call. // // For more @@ -87,9 +87,9 @@ type PutLifecycleHookInput struct { // The instance state to which you want to attach the lifecycle hook. The valid // values are: // - // * autoscaling:EC2_INSTANCE_LAUNCHING + // * autoscaling:EC2_INSTANCE_LAUNCHING // - // * + // * // autoscaling:EC2_INSTANCE_TERMINATING // // Required for new lifecycle hooks, but diff --git a/service/autoscaling/api_op_PutScalingPolicy.go b/service/autoscaling/api_op_PutScalingPolicy.go index 9b79a0832de..ca9d70eec29 100644 --- a/service/autoscaling/api_op_PutScalingPolicy.go +++ b/service/autoscaling/api_op_PutScalingPolicy.go @@ -98,12 +98,12 @@ type PutScalingPolicyInput struct { // One of the following policy types: // - // * TargetTrackingScaling + // * TargetTrackingScaling // - // * - // StepScaling + // * StepScaling // - // * SimpleScaling (default) + // * + // SimpleScaling (default) PolicyType *string // The amount by which to scale, based on the specified adjustment type. A positive @@ -121,19 +121,19 @@ type PutScalingPolicyInput struct { // A target tracking scaling policy. Includes support for predefined or customized // metrics. The following predefined metrics are available: // - // * + // * // ASGAverageCPUUtilization // - // * ASGAverageNetworkIn + // * ASGAverageNetworkIn // - // * - // ASGAverageNetworkOut + // * ASGAverageNetworkOut // - // * ALBRequestCountPerTarget + // * + // ALBRequestCountPerTarget // - // If you specify - // ALBRequestCountPerTarget for the metric, you must specify the ResourceLabel - // parameter with the PredefinedMetricSpecification. For more information, see + // If you specify ALBRequestCountPerTarget for the + // metric, you must specify the ResourceLabel parameter with the + // PredefinedMetricSpecification. For more information, see // TargetTrackingConfiguration // (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_TargetTrackingConfiguration.html) // in the Amazon EC2 Auto Scaling API Reference. Required if the policy type is diff --git a/service/autoscaling/api_op_RecordLifecycleActionHeartbeat.go b/service/autoscaling/api_op_RecordLifecycleActionHeartbeat.go index c497ee724a1..1df0fa34aa5 100644 --- a/service/autoscaling/api_op_RecordLifecycleActionHeartbeat.go +++ b/service/autoscaling/api_op_RecordLifecycleActionHeartbeat.go @@ -15,27 +15,26 @@ import ( // PutLifecycleHook API call. This step is a part of the procedure for adding a // lifecycle hook to an Auto Scaling group: // -// * (Optional) Create a Lambda -// function and a rule that allows CloudWatch Events to invoke your Lambda function -// when Amazon EC2 Auto Scaling launches or terminates instances. +// * (Optional) Create a Lambda function +// and a rule that allows CloudWatch Events to invoke your Lambda function when +// Amazon EC2 Auto Scaling launches or terminates instances. // -// * (Optional) -// Create a notification target and an IAM role. The target can be either an Amazon -// SQS queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to -// publish lifecycle notifications to the target. +// * (Optional) Create a +// notification target and an IAM role. The target can be either an Amazon SQS +// queue or an Amazon SNS topic. The role allows Amazon EC2 Auto Scaling to publish +// lifecycle notifications to the target. // -// * Create the lifecycle hook. -// Specify whether the hook is used when the instances launch or terminate. +// * Create the lifecycle hook. Specify +// whether the hook is used when the instances launch or terminate. // -// * -// If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state. +// * If you need +// more time, record the lifecycle action heartbeat to keep the instance in a +// pending state. // -// * If you finish before the timeout period -// ends, complete the lifecycle action. +// * If you finish before the timeout period ends, complete the +// lifecycle action. // -// For more information, see Auto Scaling -// Lifecycle +// For more information, see Auto Scaling Lifecycle // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html) // in the Amazon EC2 Auto Scaling User Guide. func (c *Client) RecordLifecycleActionHeartbeat(ctx context.Context, params *RecordLifecycleActionHeartbeatInput, optFns ...func(*Options)) (*RecordLifecycleActionHeartbeatOutput, error) { diff --git a/service/autoscaling/api_op_ResumeProcesses.go b/service/autoscaling/api_op_ResumeProcesses.go index ba3e291e7a6..4bc4f22f44e 100644 --- a/service/autoscaling/api_op_ResumeProcesses.go +++ b/service/autoscaling/api_op_ResumeProcesses.go @@ -39,28 +39,28 @@ type ResumeProcessesInput struct { // One or more of the following processes: // - // * Launch + // * Launch // - // * Terminate + // * Terminate // - // * + // * // AddToLoadBalancer // - // * AlarmNotification + // * AlarmNotification // - // * AZRebalance + // * AZRebalance // - // * - // HealthCheck + // * HealthCheck // - // * InstanceRefresh + // * + // InstanceRefresh // - // * ReplaceUnhealthy + // * ReplaceUnhealthy // - // * - // ScheduledActions + // * ScheduledActions // - // If you omit this parameter, all processes are specified. + // If you omit this + // parameter, all processes are specified. ScalingProcesses []*string } diff --git a/service/autoscaling/api_op_SuspendProcesses.go b/service/autoscaling/api_op_SuspendProcesses.go index 162405c6adc..7f7ff8cda0b 100644 --- a/service/autoscaling/api_op_SuspendProcesses.go +++ b/service/autoscaling/api_op_SuspendProcesses.go @@ -41,28 +41,28 @@ type SuspendProcessesInput struct { // One or more of the following processes: // - // * Launch + // * Launch // - // * Terminate + // * Terminate // - // * + // * // AddToLoadBalancer // - // * AlarmNotification + // * AlarmNotification // - // * AZRebalance + // * AZRebalance // - // * - // HealthCheck + // * HealthCheck // - // * InstanceRefresh + // * + // InstanceRefresh // - // * ReplaceUnhealthy + // * ReplaceUnhealthy // - // * - // ScheduledActions + // * ScheduledActions // - // If you omit this parameter, all processes are specified. + // If you omit this + // parameter, all processes are specified. ScalingProcesses []*string } diff --git a/service/autoscaling/api_op_UpdateAutoScalingGroup.go b/service/autoscaling/api_op_UpdateAutoScalingGroup.go index 25bb2c27883..0f66ff9ef3d 100644 --- a/service/autoscaling/api_op_UpdateAutoScalingGroup.go +++ b/service/autoscaling/api_op_UpdateAutoScalingGroup.go @@ -29,25 +29,24 @@ import ( // of your application. Note the following about changing DesiredCapacity, MaxSize, // or MinSize: // -// * If a scale-in activity occurs as a result of a new +// * If a scale-in activity occurs as a result of a new // DesiredCapacity value that is lower than the current size of the group, the Auto // Scaling group uses its termination policy to determine which instances to // terminate. // -// * If you specify a new value for MinSize without specifying a -// value for DesiredCapacity, and the new MinSize is larger than the current size -// of the group, this sets the group's DesiredCapacity to the new MinSize value. +// * If you specify a new value for MinSize without specifying a value +// for DesiredCapacity, and the new MinSize is larger than the current size of the +// group, this sets the group's DesiredCapacity to the new MinSize value. // +// * If you +// specify a new value for MaxSize without specifying a value for DesiredCapacity, +// and the new MaxSize is smaller than the current size of the group, this sets the +// group's DesiredCapacity to the new MaxSize value. // -// * If you specify a new value for MaxSize without specifying a value for -// DesiredCapacity, and the new MaxSize is smaller than the current size of the -// group, this sets the group's DesiredCapacity to the new MaxSize value. -// -// To see -// which parameters have been set, call the DescribeAutoScalingGroups API. To view -// the scaling policies for an Auto Scaling group, call the DescribePolicies API. -// If the group has scaling policies, you can update them by calling the -// PutScalingPolicy API. +// To see which parameters have +// been set, call the DescribeAutoScalingGroups API. To view the scaling policies +// for an Auto Scaling group, call the DescribePolicies API. If the group has +// scaling policies, you can update them by calling the PutScalingPolicy API. func (c *Client) UpdateAutoScalingGroup(ctx context.Context, params *UpdateAutoScalingGroupInput, optFns ...func(*Options)) (*UpdateAutoScalingGroupOutput, error) { if params == nil { params = &UpdateAutoScalingGroupInput{} diff --git a/service/autoscaling/types/enums.go b/service/autoscaling/types/enums.go index 3a5e0d3b967..e072605dc17 100644 --- a/service/autoscaling/types/enums.go +++ b/service/autoscaling/types/enums.go @@ -70,19 +70,19 @@ type LifecycleState string // Enum values for LifecycleState const ( - LifecycleStatePending LifecycleState = "Pending" - LifecycleStatePending_wait LifecycleState = "Pending:Wait" - LifecycleStatePending_proceed LifecycleState = "Pending:Proceed" - LifecycleStateQuarantined LifecycleState = "Quarantined" - LifecycleStateIn_service LifecycleState = "InService" - LifecycleStateTerminating LifecycleState = "Terminating" - LifecycleStateTerminating_wait LifecycleState = "Terminating:Wait" - LifecycleStateTerminating_proceed LifecycleState = "Terminating:Proceed" - LifecycleStateTerminated LifecycleState = "Terminated" - LifecycleStateDetaching LifecycleState = "Detaching" - LifecycleStateDetached LifecycleState = "Detached" - LifecycleStateEntering_standby LifecycleState = "EnteringStandby" - LifecycleStateStandby LifecycleState = "Standby" + LifecycleStatePending LifecycleState = "Pending" + LifecycleStatePendingWait LifecycleState = "Pending:Wait" + LifecycleStatePendingProceed LifecycleState = "Pending:Proceed" + LifecycleStateQuarantined LifecycleState = "Quarantined" + LifecycleStateInService LifecycleState = "InService" + LifecycleStateTerminating LifecycleState = "Terminating" + LifecycleStateTerminatingWait LifecycleState = "Terminating:Wait" + LifecycleStateTerminatingProceed LifecycleState = "Terminating:Proceed" + LifecycleStateTerminated LifecycleState = "Terminated" + LifecycleStateDetaching LifecycleState = "Detaching" + LifecycleStateDetached LifecycleState = "Detached" + LifecycleStateEnteringStandby LifecycleState = "EnteringStandby" + LifecycleStateStandby LifecycleState = "Standby" ) // Values returns all known values for LifecycleState. Note that this can be diff --git a/service/autoscaling/types/types.go b/service/autoscaling/types/types.go index 8091bbe565c..7b22ecd67a1 100644 --- a/service/autoscaling/types/types.go +++ b/service/autoscaling/types/types.go @@ -258,14 +258,14 @@ type BlockDeviceMapping struct { // policy to use with Amazon EC2 Auto Scaling. To create your customized metric // specification: // -// * Add values for each required parameter from CloudWatch. -// You can use an existing metric, or a new metric that you create. To use your own +// * Add values for each required parameter from CloudWatch. You +// can use an existing metric, or a new metric that you create. To use your own // metric, you must first publish the metric to CloudWatch. For more information, // see Publish Custom Metrics // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) // in the Amazon CloudWatch User Guide. // -// * Choose a metric that changes +// * Choose a metric that changes // proportionally with capacity. The value of the metric should increase or // decrease in inverse proportion to the number of capacity units. That is, the // value of the metric should decrease when capacity increases. @@ -369,36 +369,35 @@ type EnabledMetric struct { // One of the following metrics: // - // * GroupMinSize + // * GroupMinSize // - // * GroupMaxSize + // * GroupMaxSize // - // * + // * // GroupDesiredCapacity // - // * GroupInServiceInstances + // * GroupInServiceInstances // - // * - // GroupPendingInstances + // * GroupPendingInstances // - // * GroupStandbyInstances + // * + // GroupStandbyInstances // - // * - // GroupTerminatingInstances + // * GroupTerminatingInstances // - // * GroupTotalInstances + // * GroupTotalInstances // - // * + // * // GroupInServiceCapacity // - // * GroupPendingCapacity + // * GroupPendingCapacity // - // * GroupStandbyCapacity + // * GroupStandbyCapacity // + // * + // GroupTerminatingCapacity // - // * GroupTerminatingCapacity - // - // * GroupTotalCapacity + // * GroupTotalCapacity Metric *string } @@ -542,24 +541,24 @@ type InstanceRefresh struct { // The current status for the instance refresh operation: // - // * Pending - The - // request was created, but the operation has not started. + // * Pending - The request + // was created, but the operation has not started. // - // * InProgress - The - // operation is in progress. + // * InProgress - The operation is + // in progress. // - // * Successful - The operation completed - // successfully. + // * Successful - The operation completed successfully. // - // * Failed - The operation failed to complete. You can - // troubleshoot using the status reason and the scaling activities. + // * Failed - + // The operation failed to complete. You can troubleshoot using the status reason + // and the scaling activities. // - // * - // Cancelling - An ongoing operation is being cancelled. Cancellation does not roll - // back any replacements that have already been completed, but it prevents new - // replacements from being started. + // * Cancelling - An ongoing operation is being + // cancelled. Cancellation does not roll back any replacements that have already + // been completed, but it prevents new replacements from being started. // - // * Cancelled - The operation is cancelled. + // * + // Cancelled - The operation is cancelled. Status InstanceRefreshStatus // Provides more details about the current status of the instance refresh. @@ -878,9 +877,9 @@ type LifecycleHook struct { // The state of the EC2 instance to which to attach the lifecycle hook. The // following are possible values: // - // * autoscaling:EC2_INSTANCE_LAUNCHING + // * autoscaling:EC2_INSTANCE_LAUNCHING // - // * + // * // autoscaling:EC2_INSTANCE_TERMINATING LifecycleTransition *string @@ -904,27 +903,27 @@ type LifecycleHook struct { // instance terminates (before it is fully terminated). This step is a part of the // procedure for creating a lifecycle hook for an Auto Scaling group: // -// * -// (Optional) Create a Lambda function and a rule that allows CloudWatch Events to -// invoke your Lambda function when Amazon EC2 Auto Scaling launches or terminates +// * (Optional) +// Create a Lambda function and a rule that allows CloudWatch Events to invoke your +// Lambda function when Amazon EC2 Auto Scaling launches or terminates // instances. // -// * (Optional) Create a notification target and an IAM role. The +// * (Optional) Create a notification target and an IAM role. The // target can be either an Amazon SQS queue or an Amazon SNS topic. The role allows // Amazon EC2 Auto Scaling to publish lifecycle notifications to the target. // -// * +// * // Create the lifecycle hook. Specify whether the hook is used when the instances // launch or terminate. // -// * If you need more time, record the lifecycle action +// * If you need more time, record the lifecycle action // heartbeat to keep the instance in a pending state. // -// * If you finish before -// the timeout period ends, complete the lifecycle action. +// * If you finish before the +// timeout period ends, complete the lifecycle action. // -// For more information, -// see Amazon EC2 Auto Scaling Lifecycle Hooks +// For more information, see +// Amazon EC2 Auto Scaling Lifecycle Hooks // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) in // the Amazon EC2 Auto Scaling User Guide. type LifecycleHookSpecification struct { @@ -937,9 +936,9 @@ type LifecycleHookSpecification struct { // The state of the EC2 instance to which you want to attach the lifecycle hook. // The valid values are: // - // * autoscaling:EC2_INSTANCE_LAUNCHING + // * autoscaling:EC2_INSTANCE_LAUNCHING // - // * + // * // autoscaling:EC2_INSTANCE_TERMINATING // // This member is required. @@ -987,22 +986,22 @@ type LoadBalancerState struct { // One of the following load balancer states: // - // * Adding - The instances in the + // * Adding - The instances in the // group are being registered with the load balancer. // - // * Added - All instances - // in the group are registered with the load balancer. + // * Added - All instances in + // the group are registered with the load balancer. // - // * InService - At least - // one instance in the group passed an ELB health check. + // * InService - At least one + // instance in the group passed an ELB health check. // - // * Removing - The - // instances in the group are being deregistered from the load balancer. If - // connection draining is enabled, Elastic Load Balancing waits for in-flight - // requests to complete before deregistering the instances. + // * Removing - The instances in + // the group are being deregistered from the load balancer. If connection draining + // is enabled, Elastic Load Balancing waits for in-flight requests to complete + // before deregistering the instances. // - // * Removed - All - // instances in the group are deregistered from the load balancer. + // * Removed - All instances in the group are + // deregistered from the load balancer. State *string } @@ -1020,22 +1019,22 @@ type LoadBalancerTargetGroupState struct { // The state of the target group. // - // * Adding - The Auto Scaling instances are - // being registered with the target group. + // * Adding - The Auto Scaling instances are being + // registered with the target group. // - // * Added - All Auto Scaling - // instances are registered with the target group. + // * Added - All Auto Scaling instances are + // registered with the target group. // - // * InService - At least one - // Auto Scaling instance passed an ELB health check. + // * InService - At least one Auto Scaling + // instance passed an ELB health check. // - // * Removing - The Auto - // Scaling instances are being deregistered from the target group. If connection - // draining is enabled, Elastic Load Balancing waits for in-flight requests to - // complete before deregistering the instances. + // * Removing - The Auto Scaling instances + // are being deregistered from the target group. If connection draining is enabled, + // Elastic Load Balancing waits for in-flight requests to complete before + // deregistering the instances. // - // * Removed - All Auto Scaling - // instances are deregistered from the target group. + // * Removed - All Auto Scaling instances are + // deregistered from the target group. State *string } @@ -1044,36 +1043,35 @@ type MetricCollectionType struct { // One of the following metrics: // - // * GroupMinSize + // * GroupMinSize // - // * GroupMaxSize + // * GroupMaxSize // - // * + // * // GroupDesiredCapacity // - // * GroupInServiceInstances + // * GroupInServiceInstances // - // * - // GroupPendingInstances + // * GroupPendingInstances // - // * GroupStandbyInstances + // * + // GroupStandbyInstances // - // * - // GroupTerminatingInstances + // * GroupTerminatingInstances // - // * GroupTotalInstances + // * GroupTotalInstances // - // * + // * // GroupInServiceCapacity // - // * GroupPendingCapacity - // - // * GroupStandbyCapacity + // * GroupPendingCapacity // + // * GroupStandbyCapacity // - // * GroupTerminatingCapacity + // * + // GroupTerminatingCapacity // - // * GroupTotalCapacity + // * GroupTotalCapacity Metric *string } @@ -1128,18 +1126,18 @@ type NotificationConfiguration struct { // One of the following event notification types: // - // * + // * // autoscaling:EC2_INSTANCE_LAUNCH // - // * autoscaling:EC2_INSTANCE_LAUNCH_ERROR - // + // * autoscaling:EC2_INSTANCE_LAUNCH_ERROR // - // * autoscaling:EC2_INSTANCE_TERMINATE + // * + // autoscaling:EC2_INSTANCE_TERMINATE // - // * + // * // autoscaling:EC2_INSTANCE_TERMINATE_ERROR // - // * autoscaling:TEST_NOTIFICATION + // * autoscaling:TEST_NOTIFICATION NotificationType *string // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (Amazon @@ -1153,18 +1151,18 @@ type PredefinedMetricSpecification struct { // The metric type. The following predefined metrics are available: // - // * + // * // ASGAverageCPUUtilization - Average CPU utilization of the Auto Scaling group. // + // * + // ASGAverageNetworkIn - Average number of bytes received on all network interfaces + // by the Auto Scaling group. // - // * ASGAverageNetworkIn - Average number of bytes received on all network - // interfaces by the Auto Scaling group. - // - // * ASGAverageNetworkOut - Average - // number of bytes sent out on all network interfaces by the Auto Scaling group. - // + // * ASGAverageNetworkOut - Average number of bytes + // sent out on all network interfaces by the Auto Scaling group. // - // * ALBRequestCountPerTarget - Number of requests completed per target in an + // * + // ALBRequestCountPerTarget - Number of requests completed per target in an // Application Load Balancer target group. // // This member is required. @@ -1178,14 +1176,14 @@ type PredefinedMetricSpecification struct { // app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id // , where // - // * app/load-balancer-name/load-balancer-id is the final portion of - // the load balancer ARN, and + // * app/load-balancer-name/load-balancer-id is the final portion of the + // load balancer ARN, and // - // * targetgroup/target-group-name/target-group-id - // is the final portion of the target group ARN. + // * targetgroup/target-group-name/target-group-id is the + // final portion of the target group ARN. // - // To find the ARN for an - // Application Load Balancer, use the DescribeLoadBalancers + // To find the ARN for an Application Load + // Balancer, use the DescribeLoadBalancers // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) // API operation. To find the ARN for the target group, use the // DescribeTargetGroups @@ -1201,26 +1199,25 @@ type ProcessType struct { // One of the following processes: // - // * Launch + // * Launch // - // * Terminate + // * Terminate // - // * - // AddToLoadBalancer + // * AddToLoadBalancer // - // * AlarmNotification + // * + // AlarmNotification // - // * AZRebalance + // * AZRebalance // - // * - // HealthCheck + // * HealthCheck // - // * InstanceRefresh + // * InstanceRefresh // - // * ReplaceUnhealthy + // * + // ReplaceUnhealthy // - // * - // ScheduledActions + // * ScheduledActions // // This member is required. ProcessName *string @@ -1285,15 +1282,15 @@ type ScalingPolicy struct { // One of the following policy types: // - // * TargetTrackingScaling + // * TargetTrackingScaling // - // * - // StepScaling + // * StepScaling // - // * SimpleScaling (default) + // * + // SimpleScaling (default) // - // For more information, see Target - // Tracking Scaling Policies + // For more information, see Target Tracking Scaling + // Policies // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html) // and Step and Simple Scaling Policies // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html) @@ -1397,30 +1394,30 @@ type ScheduledUpdateGroupActionRequest struct { // policy. For the following examples, suppose that you have an alarm with a breach // threshold of 50: // -// * To trigger the adjustment when the metric is greater -// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper -// bound of 10. +// * To trigger the adjustment when the metric is greater than or +// equal to 50 and less than 60, specify a lower bound of 0 and an upper bound of +// 10. // -// * To trigger the adjustment when the metric is greater than 40 -// and less than or equal to 50, specify a lower bound of -10 and an upper bound of +// * To trigger the adjustment when the metric is greater than 40 and less +// than or equal to 50, specify a lower bound of -10 and an upper bound of // 0. // // There are a few rules for the step adjustments for your step policy: // -// * -// The ranges of your step adjustments can't overlap or have a gap. +// * The +// ranges of your step adjustments can't overlap or have a gap. // -// * At most, -// one step adjustment can have a null lower bound. If one step adjustment has a +// * At most, one +// step adjustment can have a null lower bound. If one step adjustment has a // negative lower bound, then there must be a step adjustment with a null lower // bound. // -// * At most, one step adjustment can have a null upper bound. If one -// step adjustment has a positive upper bound, then there must be a step adjustment -// with a null upper bound. +// * At most, one step adjustment can have a null upper bound. If one step +// adjustment has a positive upper bound, then there must be a step adjustment with +// a null upper bound. // -// * The upper and lower bound can't be null in the -// same step adjustment. +// * The upper and lower bound can't be null in the same step +// adjustment. // // For more information, see Step Adjustments // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-steps) diff --git a/service/autoscalingplans/api_op_GetScalingPlanResourceForecastData.go b/service/autoscalingplans/api_op_GetScalingPlanResourceForecastData.go index ef0d61110cf..95edfb86ea8 100644 --- a/service/autoscalingplans/api_op_GetScalingPlanResourceForecastData.go +++ b/service/autoscalingplans/api_op_GetScalingPlanResourceForecastData.go @@ -44,20 +44,19 @@ type GetScalingPlanResourceForecastDataInput struct { // The type of forecast data to get. // - // * LoadForecast: The load metric - // forecast. + // * LoadForecast: The load metric forecast. // - // * CapacityForecast: The capacity forecast. + // * + // CapacityForecast: The capacity forecast. // - // * - // ScheduledActionMinCapacity: The minimum capacity for each scheduled scaling - // action. This data is calculated as the larger of two values: the capacity - // forecast or the minimum capacity in the scaling instruction. + // * ScheduledActionMinCapacity: The + // minimum capacity for each scheduled scaling action. This data is calculated as + // the larger of two values: the capacity forecast or the minimum capacity in the + // scaling instruction. // - // * - // ScheduledActionMaxCapacity: The maximum capacity for each scheduled scaling - // action. The calculation used is determined by the predictive scaling maximum - // capacity behavior setting in the scaling instruction. + // * ScheduledActionMaxCapacity: The maximum capacity for + // each scheduled scaling action. The calculation used is determined by the + // predictive scaling maximum capacity behavior setting in the scaling instruction. // // This member is required. ForecastDataType types.ForecastDataType @@ -65,28 +64,28 @@ type GetScalingPlanResourceForecastDataInput struct { // The ID of the resource. This string consists of the resource type and unique // identifier. // - // * Auto Scaling group - The resource type is autoScalingGroup - // and the unique identifier is the name of the Auto Scaling group. Example: + // * Auto Scaling group - The resource type is autoScalingGroup and + // the unique identifier is the name of the Auto Scaling group. Example: // autoScalingGroup/my-asg. // - // * ECS service - The resource type is service and - // the unique identifier is the cluster name and service name. Example: + // * ECS service - The resource type is service and the + // unique identifier is the cluster name and service name. Example: // service/default/sample-webapp. // - // * Spot Fleet request - The resource type is + // * Spot Fleet request - The resource type is // spot-fleet-request and the unique identifier is the Spot Fleet request ID. // Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * + // * // DynamoDB table - The resource type is table and the unique identifier is the // resource ID. Example: table/my-table. // - // * DynamoDB global secondary index - - // The resource type is index and the unique identifier is the resource ID. - // Example: table/my-table/index/my-table-index. + // * DynamoDB global secondary index - The + // resource type is index and the unique identifier is the resource ID. Example: + // table/my-table/index/my-table-index. // - // * Aurora DB cluster - The - // resource type is cluster and the unique identifier is the cluster name. Example: + // * Aurora DB cluster - The resource type is + // cluster and the unique identifier is the cluster name. Example: // cluster:my-db-cluster. // // This member is required. diff --git a/service/autoscalingplans/types/types.go b/service/autoscalingplans/types/types.go index 60ea6019811..adb459770f2 100644 --- a/service/autoscalingplans/types/types.go +++ b/service/autoscalingplans/types/types.go @@ -63,14 +63,14 @@ type CustomizedLoadMetricSpecification struct { // scaling as part of a target tracking scaling policy. To create your customized // scaling metric specification: // -// * Add values for each required parameter from +// * Add values for each required parameter from // CloudWatch. You can use an existing metric, or a new metric that you create. To // use your own metric, you must first publish the metric to CloudWatch. For more // information, see Publish Custom Metrics // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) // in the Amazon CloudWatch User Guide. // -// * Choose a metric that changes +// * Choose a metric that changes // proportionally with capacity. The value of the metric should increase or // decrease in inverse proportion to the number of capacity units. That is, the // value of the metric should decrease when capacity increases. @@ -141,11 +141,11 @@ type PredefinedLoadMetricSpecification struct { // target group for an Application Load Balancer attached to the Auto Scaling // group. The format is app///targetgroup//, where: // - // * app// is the final - // portion of the load balancer ARN. + // * app// is the final portion + // of the load balancer ARN. // - // * targetgroup// is the final portion of - // the target group ARN. + // * targetgroup// is the final portion of the target + // group ARN. ResourceLabel *string } @@ -165,10 +165,10 @@ type PredefinedScalingMetricSpecification struct { // group, Spot Fleet request, or ECS service. The format is app///targetgroup//, // where: // - // * app// is the final portion of the load balancer ARN. + // * app// is the final portion of the load balancer ARN. // - // * - // targetgroup// is the final portion of the target group ARN. + // * targetgroup// + // is the final portion of the target group ARN. ResourceLabel *string } @@ -208,28 +208,28 @@ type ScalingInstruction struct { // The ID of the resource. This string consists of the resource type and unique // identifier. // - // * Auto Scaling group - The resource type is autoScalingGroup - // and the unique identifier is the name of the Auto Scaling group. Example: + // * Auto Scaling group - The resource type is autoScalingGroup and + // the unique identifier is the name of the Auto Scaling group. Example: // autoScalingGroup/my-asg. // - // * ECS service - The resource type is service and - // the unique identifier is the cluster name and service name. Example: + // * ECS service - The resource type is service and the + // unique identifier is the cluster name and service name. Example: // service/default/sample-webapp. // - // * Spot Fleet request - The resource type is + // * Spot Fleet request - The resource type is // spot-fleet-request and the unique identifier is the Spot Fleet request ID. // Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * + // * // DynamoDB table - The resource type is table and the unique identifier is the // resource ID. Example: table/my-table. // - // * DynamoDB global secondary index - - // The resource type is index and the unique identifier is the resource ID. - // Example: table/my-table/index/my-table-index. + // * DynamoDB global secondary index - The + // resource type is index and the unique identifier is the resource ID. Example: + // table/my-table/index/my-table-index. // - // * Aurora DB cluster - The - // resource type is cluster and the unique identifier is the cluster name. Example: + // * Aurora DB cluster - The resource type is + // cluster and the unique identifier is the cluster name. Example: // cluster:my-db-cluster. // // This member is required. @@ -237,33 +237,33 @@ type ScalingInstruction struct { // The scalable dimension associated with the resource. // - // * + // * // autoscaling:autoScalingGroup:DesiredCapacity - The desired capacity of an Auto // Scaling group. // - // * ecs:service:DesiredCount - The desired task count of an - // ECS service. + // * ecs:service:DesiredCount - The desired task count of an ECS + // service. // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity - // of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a + // Spot Fleet request. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // // This member is required. ScalableDimension ScalableDimension @@ -304,18 +304,18 @@ type ScalingInstruction struct { // or exceeds the maximum capacity specified for the resource. The default value is // SetForecastCapacityToMaxCapacity. The following are possible values: // - // * + // * // SetForecastCapacityToMaxCapacity - AWS Auto Scaling cannot scale resource // capacity higher than the maximum capacity. The maximum capacity is enforced as a // hard limit. // - // * SetMaxCapacityToForecastCapacity - AWS Auto Scaling may scale + // * SetMaxCapacityToForecastCapacity - AWS Auto Scaling may scale // resource capacity higher than the maximum capacity to equal but not exceed // forecast capacity. // - // * SetMaxCapacityAboveForecastCapacity - AWS Auto Scaling - // may scale resource capacity higher than the maximum capacity by a specified - // buffer value. The intention is to give the target tracking scaling policy extra + // * SetMaxCapacityAboveForecastCapacity - AWS Auto Scaling may + // scale resource capacity higher than the maximum capacity by a specified buffer + // value. The intention is to give the target tracking scaling policy extra // capacity if unexpected traffic occurs. // // Only valid when configuring predictive @@ -385,28 +385,28 @@ type ScalingPlan struct { // The status of the scaling plan. // - // * Active - The scaling plan is active. + // * Active - The scaling plan is active. // - // - // * ActiveWithProblems - The scaling plan is active, but the scaling configuration + // * + // ActiveWithProblems - The scaling plan is active, but the scaling configuration // for one or more resources could not be applied. // - // * CreationInProgress - The + // * CreationInProgress - The // scaling plan is being created. // - // * CreationFailed - The scaling plan could - // not be created. - // - // * DeletionInProgress - The scaling plan is being deleted. + // * CreationFailed - The scaling plan could not be + // created. // + // * DeletionInProgress - The scaling plan is being deleted. // - // * DeletionFailed - The scaling plan could not be deleted. + // * + // DeletionFailed - The scaling plan could not be deleted. // - // * - // UpdateInProgress - The scaling plan is being updated. + // * UpdateInProgress - + // The scaling plan is being updated. // - // * UpdateFailed - The - // scaling plan could not be updated. + // * UpdateFailed - The scaling plan could not + // be updated. // // This member is required. StatusCode ScalingPlanStatusCode @@ -427,28 +427,28 @@ type ScalingPlanResource struct { // The ID of the resource. This string consists of the resource type and unique // identifier. // - // * Auto Scaling group - The resource type is autoScalingGroup - // and the unique identifier is the name of the Auto Scaling group. Example: + // * Auto Scaling group - The resource type is autoScalingGroup and + // the unique identifier is the name of the Auto Scaling group. Example: // autoScalingGroup/my-asg. // - // * ECS service - The resource type is service and - // the unique identifier is the cluster name and service name. Example: + // * ECS service - The resource type is service and the + // unique identifier is the cluster name and service name. Example: // service/default/sample-webapp. // - // * Spot Fleet request - The resource type is + // * Spot Fleet request - The resource type is // spot-fleet-request and the unique identifier is the Spot Fleet request ID. // Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. // - // * + // * // DynamoDB table - The resource type is table and the unique identifier is the // resource ID. Example: table/my-table. // - // * DynamoDB global secondary index - - // The resource type is index and the unique identifier is the resource ID. - // Example: table/my-table/index/my-table-index. + // * DynamoDB global secondary index - The + // resource type is index and the unique identifier is the resource ID. Example: + // table/my-table/index/my-table-index. // - // * Aurora DB cluster - The - // resource type is cluster and the unique identifier is the cluster name. Example: + // * Aurora DB cluster - The resource type is + // cluster and the unique identifier is the cluster name. Example: // cluster:my-db-cluster. // // This member is required. @@ -456,33 +456,33 @@ type ScalingPlanResource struct { // The scalable dimension for the resource. // - // * + // * // autoscaling:autoScalingGroup:DesiredCapacity - The desired capacity of an Auto // Scaling group. // - // * ecs:service:DesiredCount - The desired task count of an - // ECS service. + // * ecs:service:DesiredCount - The desired task count of an ECS + // service. // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity - // of a Spot Fleet request. + // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a + // Spot Fleet request. // - // * dynamodb:table:ReadCapacityUnits - The - // provisioned read capacity for a DynamoDB table. + // * dynamodb:table:ReadCapacityUnits - The provisioned read + // capacity for a DynamoDB table. // - // * - // dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB table. + // * dynamodb:table:WriteCapacityUnits - The + // provisioned write capacity for a DynamoDB table. // - // * dynamodb:index:ReadCapacityUnits - The provisioned read - // capacity for a DynamoDB global secondary index. + // * + // dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB + // global secondary index. // - // * - // dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a - // DynamoDB global secondary index. + // * dynamodb:index:WriteCapacityUnits - The provisioned + // write capacity for a DynamoDB global secondary index. // - // * rds:cluster:ReadReplicaCount - The count - // of Aurora Replicas in an Aurora DB cluster. Available for Aurora - // MySQL-compatible edition and Aurora PostgreSQL-compatible edition. + // * + // rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB + // cluster. Available for Aurora MySQL-compatible edition and Aurora + // PostgreSQL-compatible edition. // // This member is required. ScalableDimension ScalableDimension @@ -499,17 +499,17 @@ type ScalingPlanResource struct { // The scaling status of the resource. // - // * Active - The scaling configuration is + // * Active - The scaling configuration is // active. // - // * Inactive - The scaling configuration is not active because the + // * Inactive - The scaling configuration is not active because the // scaling plan is being created or the scaling configuration could not be applied. // Check the status message for more information. // - // * PartiallyActive - The - // scaling configuration is partially active because the scaling plan is being - // created or deleted or the scaling configuration could not be fully applied. - // Check the status message for more information. + // * PartiallyActive - The scaling + // configuration is partially active because the scaling plan is being created or + // deleted or the scaling configuration could not be fully applied. Check the + // status message for more information. // // This member is required. ScalingStatusCode ScalingStatusCode diff --git a/service/backup/api_op_CreateBackupSelection.go b/service/backup/api_op_CreateBackupSelection.go index dfe375ad4d5..438d8c22853 100644 --- a/service/backup/api_op_CreateBackupSelection.go +++ b/service/backup/api_op_CreateBackupSelection.go @@ -16,13 +16,12 @@ import ( // plan. Resources can be included by specifying patterns for a ListOfTags and // selected Resources. For example, consider the following patterns: // -// * -// Resources: "arn:aws:ec2:region:account-id:volume/volume-id" +// * Resources: +// "arn:aws:ec2:region:account-id:volume/volume-id" // -// * +// * // ConditionKey:"department"ConditionValue:"finance"ConditionType:"StringEquals" // -// // * // ConditionKey:"importance"ConditionValue:"critical"ConditionType:"StringEquals" // diff --git a/service/backup/api_op_GetSupportedResourceTypes.go b/service/backup/api_op_GetSupportedResourceTypes.go index 912c6b5b300..31eb7573225 100644 --- a/service/backup/api_op_GetSupportedResourceTypes.go +++ b/service/backup/api_op_GetSupportedResourceTypes.go @@ -33,21 +33,20 @@ type GetSupportedResourceTypesOutput struct { // Contains a string with the supported AWS resource types: // - // * DynamoDB for - // Amazon DynamoDB + // * DynamoDB for Amazon + // DynamoDB // - // * EBS for Amazon Elastic Block Store + // * EBS for Amazon Elastic Block Store // - // * EC2 for Amazon - // Elastic Compute Cloud + // * EC2 for Amazon Elastic Compute + // Cloud // - // * EFS for Amazon Elastic File System + // * EFS for Amazon Elastic File System // - // * RDS for - // Amazon Relational Database Service + // * RDS for Amazon Relational + // Database Service // - // * Storage Gateway for AWS Storage - // Gateway + // * Storage Gateway for AWS Storage Gateway ResourceTypes []*string // Metadata pertaining to the operation's result. diff --git a/service/backup/api_op_ListBackupJobs.go b/service/backup/api_op_ListBackupJobs.go index 592ab34b889..e5f2b9564e1 100644 --- a/service/backup/api_op_ListBackupJobs.go +++ b/service/backup/api_op_ListBackupJobs.go @@ -52,20 +52,20 @@ type ListBackupJobsInput struct { // Returns only backup jobs for the specified resources: // - // * DynamoDB for Amazon + // * DynamoDB for Amazon // DynamoDB // - // * EBS for Amazon Elastic Block Store + // * EBS for Amazon Elastic Block Store // - // * EC2 for Amazon Elastic - // Compute Cloud + // * EC2 for Amazon Elastic Compute + // Cloud // - // * EFS for Amazon Elastic File System + // * EFS for Amazon Elastic File System // - // * RDS for Amazon - // Relational Database Service + // * RDS for Amazon Relational + // Database Service // - // * Storage Gateway for AWS Storage Gateway + // * Storage Gateway for AWS Storage Gateway ByResourceType *string // Returns only backup jobs that are in the specified state. diff --git a/service/backup/api_op_ListCopyJobs.go b/service/backup/api_op_ListCopyJobs.go index 9bb04e1e128..b277edd3734 100644 --- a/service/backup/api_op_ListCopyJobs.go +++ b/service/backup/api_op_ListCopyJobs.go @@ -51,20 +51,20 @@ type ListCopyJobsInput struct { // Returns only backup jobs for the specified resources: // - // * DynamoDB for Amazon + // * DynamoDB for Amazon // DynamoDB // - // * EBS for Amazon Elastic Block Store + // * EBS for Amazon Elastic Block Store // - // * EC2 for Amazon Elastic - // Compute Cloud + // * EC2 for Amazon Elastic Compute + // Cloud // - // * EFS for Amazon Elastic File System + // * EFS for Amazon Elastic File System // - // * RDS for Amazon - // Relational Database Service + // * RDS for Amazon Relational + // Database Service // - // * Storage Gateway for AWS Storage Gateway + // * Storage Gateway for AWS Storage Gateway ByResourceType *string // Returns only copy jobs that are in the specified state. diff --git a/service/backup/api_op_StartRestoreJob.go b/service/backup/api_op_StartRestoreJob.go index 9c12cefe9d9..3cea587a60c 100644 --- a/service/backup/api_op_StartRestoreJob.go +++ b/service/backup/api_op_StartRestoreJob.go @@ -43,29 +43,29 @@ type StartRestoreJobInput struct { // exists. You need to specify specific metadata to restore an Amazon Elastic File // System (Amazon EFS) instance: // - // * file-system-id: The ID of the Amazon EFS - // file system that is backed up by AWS Backup. Returned in + // * file-system-id: The ID of the Amazon EFS file + // system that is backed up by AWS Backup. Returned in // GetRecoveryPointRestoreMetadata. // - // * Encrypted: A Boolean value that, if - // true, specifies that the file system is encrypted. If KmsKeyId is specified, - // Encrypted must be set to true. + // * Encrypted: A Boolean value that, if true, + // specifies that the file system is encrypted. If KmsKeyId is specified, Encrypted + // must be set to true. // - // * KmsKeyId: Specifies the AWS KMS key that - // is used to encrypt the restored file system. You can specify a key from another - // AWS account provided that key it is properly shared with your account via AWS - // KMS. + // * KmsKeyId: Specifies the AWS KMS key that is used to + // encrypt the restored file system. You can specify a key from another AWS account + // provided that key it is properly shared with your account via AWS KMS. // - // * PerformanceMode: Specifies the throughput mode of the file system. + // * + // PerformanceMode: Specifies the throughput mode of the file system. // - // - // * CreationToken: A user-supplied value that ensures the uniqueness (idempotency) + // * + // CreationToken: A user-supplied value that ensures the uniqueness (idempotency) // of the request. // - // * newFileSystem: A Boolean value that, if true, specifies - // that the recovery point is restored to a new Amazon EFS file system. + // * newFileSystem: A Boolean value that, if true, specifies that + // the recovery point is restored to a new Amazon EFS file system. // - // * + // * // ItemsToRestore : A serialized list of up to five strings where each string is a // file path. Use ItemsToRestore to restore specific files or directories rather // than the entire file system. This parameter is optional. @@ -85,21 +85,20 @@ type StartRestoreJobInput struct { // Starts a job to restore a recovery point for one of the following resources: // + // * + // DynamoDB for Amazon DynamoDB // - // * DynamoDB for Amazon DynamoDB - // - // * EBS for Amazon Elastic Block Store - // - // * - // EC2 for Amazon Elastic Compute Cloud + // * EBS for Amazon Elastic Block Store // - // * EFS for Amazon Elastic File System + // * EC2 for + // Amazon Elastic Compute Cloud // + // * EFS for Amazon Elastic File System // - // * RDS for Amazon Relational Database Service + // * RDS for + // Amazon Relational Database Service // - // * Storage Gateway for AWS - // Storage Gateway + // * Storage Gateway for AWS Storage Gateway ResourceType *string } diff --git a/service/backup/types/enums.go b/service/backup/types/enums.go index 4ead983a329..cbafee9720e 100644 --- a/service/backup/types/enums.go +++ b/service/backup/types/enums.go @@ -36,21 +36,21 @@ type BackupVaultEvent string // Enum values for BackupVaultEvent const ( - BackupVaultEventBackup_job_started BackupVaultEvent = "BACKUP_JOB_STARTED" - BackupVaultEventBackup_job_completed BackupVaultEvent = "BACKUP_JOB_COMPLETED" - BackupVaultEventBackup_job_successful BackupVaultEvent = "BACKUP_JOB_SUCCESSFUL" - BackupVaultEventBackup_job_failed BackupVaultEvent = "BACKUP_JOB_FAILED" - BackupVaultEventBackup_job_expired BackupVaultEvent = "BACKUP_JOB_EXPIRED" - BackupVaultEventRestore_job_started BackupVaultEvent = "RESTORE_JOB_STARTED" - BackupVaultEventRestore_job_completed BackupVaultEvent = "RESTORE_JOB_COMPLETED" - BackupVaultEventRestore_job_successful BackupVaultEvent = "RESTORE_JOB_SUCCESSFUL" - BackupVaultEventRestore_job_failed BackupVaultEvent = "RESTORE_JOB_FAILED" - BackupVaultEventCopy_job_started BackupVaultEvent = "COPY_JOB_STARTED" - BackupVaultEventCopy_job_successful BackupVaultEvent = "COPY_JOB_SUCCESSFUL" - BackupVaultEventCopy_job_failed BackupVaultEvent = "COPY_JOB_FAILED" - BackupVaultEventRecovery_point_modified BackupVaultEvent = "RECOVERY_POINT_MODIFIED" - BackupVaultEventBackup_plan_created BackupVaultEvent = "BACKUP_PLAN_CREATED" - BackupVaultEventBackup_plan_modified BackupVaultEvent = "BACKUP_PLAN_MODIFIED" + BackupVaultEventBackupJobStarted BackupVaultEvent = "BACKUP_JOB_STARTED" + BackupVaultEventBackupJobCompleted BackupVaultEvent = "BACKUP_JOB_COMPLETED" + BackupVaultEventBackupJobSuccessful BackupVaultEvent = "BACKUP_JOB_SUCCESSFUL" + BackupVaultEventBackupJobFailed BackupVaultEvent = "BACKUP_JOB_FAILED" + BackupVaultEventBackupJobExpired BackupVaultEvent = "BACKUP_JOB_EXPIRED" + BackupVaultEventRestoreJobStarted BackupVaultEvent = "RESTORE_JOB_STARTED" + BackupVaultEventRestoreJobCompleted BackupVaultEvent = "RESTORE_JOB_COMPLETED" + BackupVaultEventRestoreJobSuccessful BackupVaultEvent = "RESTORE_JOB_SUCCESSFUL" + BackupVaultEventRestoreJobFailed BackupVaultEvent = "RESTORE_JOB_FAILED" + BackupVaultEventCopyJobStarted BackupVaultEvent = "COPY_JOB_STARTED" + BackupVaultEventCopyJobSuccessful BackupVaultEvent = "COPY_JOB_SUCCESSFUL" + BackupVaultEventCopyJobFailed BackupVaultEvent = "COPY_JOB_FAILED" + BackupVaultEventRecoveryPointModified BackupVaultEvent = "RECOVERY_POINT_MODIFIED" + BackupVaultEventBackupPlanCreated BackupVaultEvent = "BACKUP_PLAN_CREATED" + BackupVaultEventBackupPlanModified BackupVaultEvent = "BACKUP_PLAN_MODIFIED" ) // Values returns all known values for BackupVaultEvent. Note that this can be diff --git a/service/batch/api_op_CreateComputeEnvironment.go b/service/batch/api_op_CreateComputeEnvironment.go index c86372ed126..2be532108b5 100644 --- a/service/batch/api_op_CreateComputeEnvironment.go +++ b/service/batch/api_op_CreateComputeEnvironment.go @@ -41,15 +41,15 @@ import ( // you install on the compute resources. To use a new AMI for your AWS Batch // jobs: // -// * Create a new compute environment with the new AMI. +// * Create a new compute environment with the new AMI. // -// * Add the -// compute environment to an existing job queue. +// * Add the compute +// environment to an existing job queue. // -// * Remove the old compute -// environment from your job queue. +// * Remove the old compute environment from +// your job queue. // -// * Delete the old compute environment. +// * Delete the old compute environment. func (c *Client) CreateComputeEnvironment(ctx context.Context, params *CreateComputeEnvironmentInput, optFns ...func(*Options)) (*CreateComputeEnvironmentOutput, error) { if params == nil { params = &CreateComputeEnvironmentInput{} diff --git a/service/batch/api_op_ListJobs.go b/service/batch/api_op_ListJobs.go index 1126bde9b3c..5cc8d90b60a 100644 --- a/service/batch/api_op_ListJobs.go +++ b/service/batch/api_op_ListJobs.go @@ -13,13 +13,13 @@ import ( // Returns a list of AWS Batch jobs. You must specify only one of the following: // +// * +// a job queue ID to return a list of jobs in that job queue // -// * a job queue ID to return a list of jobs in that job queue -// -// * a multi-node +// * a multi-node // parallel job ID to return a list of that job's nodes // -// * an array job ID to +// * an array job ID to // return a list of that job's children // // You can filter the results by job status diff --git a/service/batch/types/enums.go b/service/batch/types/enums.go index a3f7b6a1886..fb9eb06d3b3 100644 --- a/service/batch/types/enums.go +++ b/service/batch/types/enums.go @@ -6,7 +6,7 @@ type ArrayJobDependency string // Enum values for ArrayJobDependency const ( - ArrayJobDependencyN_to_n ArrayJobDependency = "N_TO_N" + ArrayJobDependencyNToN ArrayJobDependency = "N_TO_N" ArrayJobDependencySequential ArrayJobDependency = "SEQUENTIAL" ) @@ -86,9 +86,9 @@ type CRAllocationStrategy string // Enum values for CRAllocationStrategy const ( - CRAllocationStrategyBest_fit CRAllocationStrategy = "BEST_FIT" - CRAllocationStrategyBest_fit_progressive CRAllocationStrategy = "BEST_FIT_PROGRESSIVE" - CRAllocationStrategySpot_capacity_optimized CRAllocationStrategy = "SPOT_CAPACITY_OPTIMIZED" + CRAllocationStrategyBestFit CRAllocationStrategy = "BEST_FIT" + CRAllocationStrategyBestFitProgressive CRAllocationStrategy = "BEST_FIT_PROGRESSIVE" + CRAllocationStrategySpotCapacityOptimized CRAllocationStrategy = "SPOT_CAPACITY_OPTIMIZED" ) // Values returns all known values for CRAllocationStrategy. Note that this can be @@ -234,13 +234,13 @@ type LogDriver string // Enum values for LogDriver const ( - LogDriverJson_file LogDriver = "json-file" - LogDriverSyslog LogDriver = "syslog" - LogDriverJournald LogDriver = "journald" - LogDriverGelf LogDriver = "gelf" - LogDriverFluentd LogDriver = "fluentd" - LogDriverAwslogs LogDriver = "awslogs" - LogDriverSplunk LogDriver = "splunk" + LogDriverJsonFile LogDriver = "json-file" + LogDriverSyslog LogDriver = "syslog" + LogDriverJournald LogDriver = "journald" + LogDriverGelf LogDriver = "gelf" + LogDriverFluentd LogDriver = "fluentd" + LogDriverAwslogs LogDriver = "awslogs" + LogDriverSplunk LogDriver = "splunk" ) // Values returns all known values for LogDriver. Note that this can be expanded in diff --git a/service/batch/types/types.go b/service/batch/types/types.go index 861a44a95bf..e2c70e3ac39 100644 --- a/service/batch/types/types.go +++ b/service/batch/types/types.go @@ -485,18 +485,18 @@ type ContainerProperties struct { // Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the IMAGE // parameter of docker run (https://docs.docker.com/engine/reference/run/). // - // * + // * // Images in Amazon ECR repositories use the full registry and repository URI (for // example, 012345678910.dkr.ecr..amazonaws.com/). // - // * Images in official + // * Images in official // repositories on Docker Hub use a single name (for example, ubuntu or mongo). // - // - // * Images in other repositories on Docker Hub are qualified with an organization + // * + // Images in other repositories on Docker Hub are qualified with an organization // name (for example, amazon/amazon-ecs-agent). // - // * Images in other online + // * Images in other online // repositories are qualified further by a domain name (for example, // quay.io/assemblyline/ubuntu). Image *string @@ -1145,14 +1145,14 @@ type NodeOverrides struct { // the number of nodes that are specified in the job definition. To use this // override: // - // * There must be at least one node range in your job definition - // that has an open upper boundary (such as : or n:). + // * There must be at least one node range in your job definition that + // has an open upper boundary (such as : or n:). // - // * The lower boundary of - // the node range specified in the job definition must be fewer than the number of - // nodes specified in the override. + // * The lower boundary of the node + // range specified in the job definition must be fewer than the number of nodes + // specified in the override. // - // * The main node index specified in the job + // * The main node index specified in the job // definition must be fewer than the number of nodes specified in the override. NumNodes *int32 } @@ -1266,15 +1266,15 @@ type RetryStrategy struct { // An object representing the secret to expose to your container. Secrets can be // exposed to a container in the following ways: // -// * To inject sensitive data -// into your containers as environment variables, use the secrets container -// definition parameter. -// -// * To reference sensitive information in the log -// configuration of a container, use the secretOptions container definition +// * To inject sensitive data into +// your containers as environment variables, use the secrets container definition // parameter. // -// For more information, see Specifying Sensitive Data +// * To reference sensitive information in the log configuration of a +// container, use the secretOptions container definition parameter. +// +// For more +// information, see Specifying Sensitive Data // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) // in the Amazon Elastic Container Service Developer Guide. type Secret struct { diff --git a/service/budgets/doc.go b/service/budgets/doc.go index a3ec08e97b0..5ba53fdb5af 100644 --- a/service/budgets/doc.go +++ b/service/budgets/doc.go @@ -9,44 +9,44 @@ // for AWS Budgets. Budgets provide you with a way to see the following // information: // -// * How close your plan is to your budgeted amount or to the -// free tier limits +// * How close your plan is to your budgeted amount or to the free +// tier limits // -// * Your usage-to-date, including how much you've used of -// your Reserved Instances (RIs) +// * Your usage-to-date, including how much you've used of your +// Reserved Instances (RIs) // -// * Your current estimated charges from AWS, -// and how much your predicted usage will accrue in charges by the end of the -// month +// * Your current estimated charges from AWS, and how +// much your predicted usage will accrue in charges by the end of the month // -// * How much of your budget has been used +// * How +// much of your budget has been used // -// AWS updates your budget -// status several times a day. Budgets track your unblended costs, subscriptions, -// refunds, and RIs. You can create the following types of budgets: +// AWS updates your budget status several times +// a day. Budgets track your unblended costs, subscriptions, refunds, and RIs. You +// can create the following types of budgets: // -// * Cost -// budgets - Plan how much you want to spend on a service. +// * Cost budgets - Plan how much you +// want to spend on a service. // -// * Usage budgets - -// Plan how much you want to use one or more services. +// * Usage budgets - Plan how much you want to use one +// or more services. // -// * RI utilization -// budgets - Define a utilization threshold, and receive alerts when your RI usage -// falls below that threshold. This lets you see if your RIs are unused or -// under-utilized. +// * RI utilization budgets - Define a utilization threshold, +// and receive alerts when your RI usage falls below that threshold. This lets you +// see if your RIs are unused or under-utilized. // -// * RI coverage budgets - Define a coverage threshold, and -// receive alerts when the number of your instance hours that are covered by RIs -// fall below that threshold. This lets you see how much of your instance usage is -// covered by a reservation. +// * RI coverage budgets - Define a +// coverage threshold, and receive alerts when the number of your instance hours +// that are covered by RIs fall below that threshold. This lets you see how much of +// your instance usage is covered by a reservation. // -// Service Endpoint The AWS Budgets API provides the -// following endpoint: +// Service Endpoint The AWS +// Budgets API provides the following endpoint: // -// * https://budgets.amazonaws.com +// * +// https://budgets.amazonaws.com // -// For information about -// costs that are associated with the AWS Budgets API, see AWS Cost Management -// Pricing (https://aws.amazon.com/aws-cost-management/pricing/). +// For information about costs that are associated +// with the AWS Budgets API, see AWS Cost Management Pricing +// (https://aws.amazon.com/aws-cost-management/pricing/). package budgets diff --git a/service/budgets/types/enums.go b/service/budgets/types/enums.go index a1b4b1db365..709b46194d1 100644 --- a/service/budgets/types/enums.go +++ b/service/budgets/types/enums.go @@ -6,16 +6,16 @@ type ActionStatus string // Enum values for ActionStatus const ( - ActionStatusStandby ActionStatus = "STANDBY" - ActionStatusPending ActionStatus = "PENDING" - ActionStatusExecution_in_progress ActionStatus = "EXECUTION_IN_PROGRESS" - ActionStatusExecution_success ActionStatus = "EXECUTION_SUCCESS" - ActionStatusExecution_failure ActionStatus = "EXECUTION_FAILURE" - ActionStatusReverse_in_progress ActionStatus = "REVERSE_IN_PROGRESS" - ActionStatusReverse_success ActionStatus = "REVERSE_SUCCESS" - ActionStatusReverse_failure ActionStatus = "REVERSE_FAILURE" - ActionStatusReset_in_progress ActionStatus = "RESET_IN_PROGRESS" - ActionStatusReset_failure ActionStatus = "RESET_FAILURE" + ActionStatusStandby ActionStatus = "STANDBY" + ActionStatusPending ActionStatus = "PENDING" + ActionStatusExecutionInProgress ActionStatus = "EXECUTION_IN_PROGRESS" + ActionStatusExecutionSuccess ActionStatus = "EXECUTION_SUCCESS" + ActionStatusExecutionFailure ActionStatus = "EXECUTION_FAILURE" + ActionStatusReverseInProgress ActionStatus = "REVERSE_IN_PROGRESS" + ActionStatusReverseSuccess ActionStatus = "REVERSE_SUCCESS" + ActionStatusReverseFailure ActionStatus = "REVERSE_FAILURE" + ActionStatusResetInProgress ActionStatus = "RESET_IN_PROGRESS" + ActionStatusResetFailure ActionStatus = "RESET_FAILURE" ) // Values returns all known values for ActionStatus. Note that this can be expanded @@ -40,8 +40,8 @@ type ActionSubType string // Enum values for ActionSubType const ( - ActionSubTypeStop_ec2 ActionSubType = "STOP_EC2_INSTANCES" - ActionSubTypeStop_rds ActionSubType = "STOP_RDS_INSTANCES" + ActionSubTypeStopEc2 ActionSubType = "STOP_EC2_INSTANCES" + ActionSubTypeStopRds ActionSubType = "STOP_RDS_INSTANCES" ) // Values returns all known values for ActionSubType. Note that this can be @@ -122,9 +122,9 @@ type ComparisonOperator string // Enum values for ComparisonOperator const ( - ComparisonOperatorGreater_than ComparisonOperator = "GREATER_THAN" - ComparisonOperatorLess_than ComparisonOperator = "LESS_THAN" - ComparisonOperatorEqual_to ComparisonOperator = "EQUAL_TO" + ComparisonOperatorGreaterThan ComparisonOperator = "GREATER_THAN" + ComparisonOperatorLessThan ComparisonOperator = "LESS_THAN" + ComparisonOperatorEqualTo ComparisonOperator = "EQUAL_TO" ) // Values returns all known values for ComparisonOperator. Note that this can be @@ -242,8 +242,8 @@ type ThresholdType string // Enum values for ThresholdType const ( - ThresholdTypePercentage ThresholdType = "PERCENTAGE" - ThresholdTypeAbsolute_value ThresholdType = "ABSOLUTE_VALUE" + ThresholdTypePercentage ThresholdType = "PERCENTAGE" + ThresholdTypeAbsoluteValue ThresholdType = "ABSOLUTE_VALUE" ) // Values returns all known values for ThresholdType. Note that this can be diff --git a/service/budgets/types/types.go b/service/budgets/types/types.go index ea62f0cd12b..2bb704f2acd 100644 --- a/service/budgets/types/types.go +++ b/service/budgets/types/types.go @@ -154,18 +154,17 @@ type Budget struct { // The cost filters, such as service or tag, that are applied to a budget. AWS // Budgets supports the following services as a filter for RI budgets: // - // * - // Amazon Elastic Compute Cloud - Compute + // * Amazon + // Elastic Compute Cloud - Compute // - // * Amazon Redshift + // * Amazon Redshift // - // * Amazon - // Relational Database Service + // * Amazon Relational Database + // Service // - // * Amazon ElastiCache + // * Amazon ElastiCache // - // * Amazon - // Elasticsearch Service + // * Amazon Elasticsearch Service CostFilters map[string][]*string // The types of costs that are included in this COST budget. USAGE, RI_UTILIZATION, @@ -352,15 +351,15 @@ type IamActionDefinition struct { // you want to be notified when you go over 160 dollars, create a notification with // the following parameters: // -// * A notificationType of ACTUAL +// * A notificationType of ACTUAL // -// * A -// thresholdType of PERCENTAGE +// * A thresholdType of +// PERCENTAGE // -// * A comparisonOperator of GREATER_THAN +// * A comparisonOperator of GREATER_THAN // -// * A -// notification threshold of 80 +// * A notification threshold +// of 80 type Notification struct { // The comparison that is used for this notification. @@ -427,10 +426,10 @@ type ScpActionDefinition struct { // The amount of cost or usage that is measured for a budget. For example, a Spend // for 3 GB of S3 usage would have the following parameters: // -// * An Amount of -// 3 +// * An Amount of 3 // -// * A unit of GB +// * A +// unit of GB type Spend struct { // The cost or usage amount that is associated with a budget forecast, actual @@ -469,10 +468,10 @@ type SsmActionDefinition struct { // subscription type and either an Amazon SNS topic or an email address. For // example, an email subscriber would have the following parameters: // -// * A +// * A // subscriptionType of EMAIL // -// * An address of example@example.com +// * An address of example@example.com type Subscriber struct { // The address that AWS sends budget notifications to, either an SNS topic or an diff --git a/service/chime/types/enums.go b/service/chime/types/enums.go index c10e92658e0..7535b54b994 100644 --- a/service/chime/types/enums.go +++ b/service/chime/types/enums.go @@ -490,8 +490,8 @@ type VoiceConnectorAwsRegion string // Enum values for VoiceConnectorAwsRegion const ( - VoiceConnectorAwsRegionUs_east_1 VoiceConnectorAwsRegion = "us-east-1" - VoiceConnectorAwsRegionUs_west_2 VoiceConnectorAwsRegion = "us-west-2" + VoiceConnectorAwsRegionUsEast1 VoiceConnectorAwsRegion = "us-east-1" + VoiceConnectorAwsRegionUsWest2 VoiceConnectorAwsRegion = "us-west-2" ) // Values returns all known values for VoiceConnectorAwsRegion. Note that this can diff --git a/service/cloud9/api_op_CreateEnvironmentMembership.go b/service/cloud9/api_op_CreateEnvironmentMembership.go index 04641be5565..243d4f1eb3e 100644 --- a/service/cloud9/api_op_CreateEnvironmentMembership.go +++ b/service/cloud9/api_op_CreateEnvironmentMembership.go @@ -37,11 +37,10 @@ type CreateEnvironmentMembershipInput struct { // The type of environment member permissions you want to associate with this // environment member. Available values include: // - // * read-only: Has read-only - // access to the environment. + // * read-only: Has read-only access + // to the environment. // - // * read-write: Has read-write access to the - // environment. + // * read-write: Has read-write access to the environment. // // This member is required. Permissions types.MemberPermissions diff --git a/service/cloud9/api_op_DescribeEnvironmentMemberships.go b/service/cloud9/api_op_DescribeEnvironmentMemberships.go index 24b55798b9a..e935445bc7d 100644 --- a/service/cloud9/api_op_DescribeEnvironmentMemberships.go +++ b/service/cloud9/api_op_DescribeEnvironmentMemberships.go @@ -47,13 +47,13 @@ type DescribeEnvironmentMembershipsInput struct { // The type of environment member permissions to get information about. Available // values include: // - // * owner: Owns the environment. + // * owner: Owns the environment. // - // * read-only: Has - // read-only access to the environment. + // * read-only: Has read-only + // access to the environment. // - // * read-write: Has read-write access to - // the environment. + // * read-write: Has read-write access to the + // environment. // // If no value is specified, information about all environment // members are returned. diff --git a/service/cloud9/api_op_DescribeEnvironmentStatus.go b/service/cloud9/api_op_DescribeEnvironmentStatus.go index 52d93baa1eb..0c035d7a3ef 100644 --- a/service/cloud9/api_op_DescribeEnvironmentStatus.go +++ b/service/cloud9/api_op_DescribeEnvironmentStatus.go @@ -42,23 +42,23 @@ type DescribeEnvironmentStatusOutput struct { // The status of the environment. Available values include: // - // * connecting: The + // * connecting: The // environment is connecting. // - // * creating: The environment is being created. + // * creating: The environment is being created. // + // * + // deleting: The environment is being deleted. // - // * deleting: The environment is being deleted. + // * error: The environment is in an + // error state. // - // * error: The environment is - // in an error state. + // * ready: The environment is ready. // - // * ready: The environment is ready. + // * stopped: The environment is + // stopped. // - // * stopped: The - // environment is stopped. - // - // * stopping: The environment is stopping. + // * stopping: The environment is stopping. Status types.EnvironmentStatus // Metadata pertaining to the operation's result. diff --git a/service/cloud9/api_op_UpdateEnvironmentMembership.go b/service/cloud9/api_op_UpdateEnvironmentMembership.go index d82cabdd2b9..3cd061906c4 100644 --- a/service/cloud9/api_op_UpdateEnvironmentMembership.go +++ b/service/cloud9/api_op_UpdateEnvironmentMembership.go @@ -39,11 +39,11 @@ type UpdateEnvironmentMembershipInput struct { // The replacement type of environment member permissions you want to associate // with this environment member. Available values include: // - // * read-only: Has + // * read-only: Has // read-only access to the environment. // - // * read-write: Has read-write access to - // the environment. + // * read-write: Has read-write access to the + // environment. // // This member is required. Permissions types.MemberPermissions diff --git a/service/cloud9/doc.go b/service/cloud9/doc.go index d07c2462f0e..82197001e4e 100644 --- a/service/cloud9/doc.go +++ b/service/cloud9/doc.go @@ -9,44 +9,43 @@ // (https://docs.aws.amazon.com/cloud9/latest/user-guide). AWS Cloud9 supports // these operations: // -// * CreateEnvironmentEC2: Creates an AWS Cloud9 development +// * CreateEnvironmentEC2: Creates an AWS Cloud9 development // environment, launches an Amazon EC2 instance, and then connects from the // instance to the environment. // -// * CreateEnvironmentMembership: Adds an -// environment member to an environment. +// * CreateEnvironmentMembership: Adds an environment +// member to an environment. // -// * DeleteEnvironment: Deletes an -// environment. If an Amazon EC2 instance is connected to the environment, also -// terminates the instance. +// * DeleteEnvironment: Deletes an environment. If an +// Amazon EC2 instance is connected to the environment, also terminates the +// instance. // -// * DeleteEnvironmentMembership: Deletes an -// environment member from an environment. +// * DeleteEnvironmentMembership: Deletes an environment member from an +// environment. // -// * DescribeEnvironmentMemberships: -// Gets information about environment members for an environment. +// * DescribeEnvironmentMemberships: Gets information about +// environment members for an environment. // -// * -// DescribeEnvironments: Gets information about environments. +// * DescribeEnvironments: Gets +// information about environments. // -// * -// DescribeEnvironmentStatus: Gets status information for an environment. +// * DescribeEnvironmentStatus: Gets status +// information for an environment. // -// * -// ListEnvironments: Gets a list of environment identifiers. +// * ListEnvironments: Gets a list of environment +// identifiers. // -// * -// ListTagsForResource: Gets the tags for an environment. +// * ListTagsForResource: Gets the tags for an environment. // -// * TagResource: Adds -// tags to an environment. +// * +// TagResource: Adds tags to an environment. // -// * UntagResource: Removes tags from an +// * UntagResource: Removes tags from an // environment. // -// * UpdateEnvironment: Changes the settings of an existing +// * UpdateEnvironment: Changes the settings of an existing // environment. // -// * UpdateEnvironmentMembership: Changes the settings of an -// existing environment member for an environment. +// * UpdateEnvironmentMembership: Changes the settings of an existing +// environment member for an environment. package cloud9 diff --git a/service/cloud9/types/enums.go b/service/cloud9/types/enums.go index 20f7730a4ea..bb073330162 100644 --- a/service/cloud9/types/enums.go +++ b/service/cloud9/types/enums.go @@ -6,8 +6,8 @@ type ConnectionType string // Enum values for ConnectionType const ( - ConnectionTypeConnect_ssh ConnectionType = "CONNECT_SSH" - ConnectionTypeConnect_ssm ConnectionType = "CONNECT_SSM" + ConnectionTypeConnectSsh ConnectionType = "CONNECT_SSH" + ConnectionTypeConnectSsm ConnectionType = "CONNECT_SSM" ) // Values returns all known values for ConnectionType. Note that this can be @@ -24,11 +24,11 @@ type EnvironmentLifecycleStatus string // Enum values for EnvironmentLifecycleStatus const ( - EnvironmentLifecycleStatusCreating EnvironmentLifecycleStatus = "CREATING" - EnvironmentLifecycleStatusCreated EnvironmentLifecycleStatus = "CREATED" - EnvironmentLifecycleStatusCreate_failed EnvironmentLifecycleStatus = "CREATE_FAILED" - EnvironmentLifecycleStatusDeleting EnvironmentLifecycleStatus = "DELETING" - EnvironmentLifecycleStatusDelete_failed EnvironmentLifecycleStatus = "DELETE_FAILED" + EnvironmentLifecycleStatusCreating EnvironmentLifecycleStatus = "CREATING" + EnvironmentLifecycleStatusCreated EnvironmentLifecycleStatus = "CREATED" + EnvironmentLifecycleStatusCreateFailed EnvironmentLifecycleStatus = "CREATE_FAILED" + EnvironmentLifecycleStatusDeleting EnvironmentLifecycleStatus = "DELETING" + EnvironmentLifecycleStatusDeleteFailed EnvironmentLifecycleStatus = "DELETE_FAILED" ) // Values returns all known values for EnvironmentLifecycleStatus. Note that this @@ -94,8 +94,8 @@ type MemberPermissions string // Enum values for MemberPermissions const ( - MemberPermissionsRead_write MemberPermissions = "read-write" - MemberPermissionsRead_only MemberPermissions = "read-only" + MemberPermissionsReadWrite MemberPermissions = "read-write" + MemberPermissionsReadOnly MemberPermissions = "read-only" ) // Values returns all known values for MemberPermissions. Note that this can be @@ -112,9 +112,9 @@ type Permissions string // Enum values for Permissions const ( - PermissionsOwner Permissions = "owner" - PermissionsRead_write Permissions = "read-write" - PermissionsRead_only Permissions = "read-only" + PermissionsOwner Permissions = "owner" + PermissionsReadWrite Permissions = "read-write" + PermissionsReadOnly Permissions = "read-only" ) // Values returns all known values for Permissions. Note that this can be expanded diff --git a/service/cloud9/types/types.go b/service/cloud9/types/types.go index fbdf63a539c..6dad19fe795 100644 --- a/service/cloud9/types/types.go +++ b/service/cloud9/types/types.go @@ -32,11 +32,11 @@ type Environment struct { // The type of environment. Valid values include the following: // - // * ec2: An - // Amazon Elastic Compute Cloud (Amazon EC2) instance connects to the - // environment. + // * ec2: An Amazon + // Elastic Compute Cloud (Amazon EC2) instance connects to the environment. // - // * ssh: Your own server connects to the environment. + // * ssh: + // Your own server connects to the environment. Type EnvironmentType } @@ -53,19 +53,19 @@ type EnvironmentLifecycle struct { // The current creation or deletion lifecycle state of the environment. // - // * + // * // CREATING: The environment is in the process of being created. // - // * CREATED: - // The environment was successfully created. + // * CREATED: The + // environment was successfully created. // - // * CREATE_FAILED: The environment - // failed to be created. + // * CREATE_FAILED: The environment failed + // to be created. // - // * DELETING: The environment is in the process of - // being deleted. + // * DELETING: The environment is in the process of being + // deleted. // - // * DELETE_FAILED: The environment failed to delete. + // * DELETE_FAILED: The environment failed to delete. Status EnvironmentLifecycleStatus } @@ -83,13 +83,13 @@ type EnvironmentMember struct { // The type of environment member permissions associated with this environment // member. Available values include: // - // * owner: Owns the environment. + // * owner: Owns the environment. // - // * - // read-only: Has read-only access to the environment. + // * read-only: + // Has read-only access to the environment. // - // * read-write: Has - // read-write access to the environment. + // * read-write: Has read-write access to + // the environment. Permissions Permissions // The Amazon Resource Name (ARN) of the environment member. diff --git a/service/clouddirectory/api_op_AttachObject.go b/service/clouddirectory/api_op_AttachObject.go index a44e9d27e93..2c0f18667f0 100644 --- a/service/clouddirectory/api_op_AttachObject.go +++ b/service/clouddirectory/api_op_AttachObject.go @@ -14,9 +14,9 @@ import ( // Attaches an existing object to another object. An object can be accessed in two // ways: // -// * Using the path +// * Using the path // -// * Using ObjectIdentifier +// * Using ObjectIdentifier func (c *Client) AttachObject(ctx context.Context, params *AttachObjectInput, optFns ...func(*Options)) (*AttachObjectOutput, error) { if params == nil { params = &AttachObjectInput{} diff --git a/service/clouddirectory/api_op_CreateFacet.go b/service/clouddirectory/api_op_CreateFacet.go index a5fe0617a35..dbbaa30c009 100644 --- a/service/clouddirectory/api_op_CreateFacet.go +++ b/service/clouddirectory/api_op_CreateFacet.go @@ -52,18 +52,18 @@ type CreateFacetInput struct { // Specifies whether a given object created from this facet is of type node, leaf // node, policy or index. // - // * Node: Can have multiple children but one parent. + // * Node: Can have multiple children but one parent. // + // * + // Leaf node: Cannot have children but can have multiple parents. // - // * Leaf node: Cannot have children but can have multiple parents. - // - // * Policy: - // Allows you to store a policy document and policy type. For more information, see + // * Policy: Allows + // you to store a policy document and policy type. For more information, see // Policies // (https://docs.aws.amazon.com/clouddirectory/latest/developerguide/key_concepts_directory.html#key_concepts_policies). // - // - // * Index: Can be created with the Index API. + // * + // Index: Can be created with the Index API. ObjectType types.ObjectType } diff --git a/service/clouddirectory/api_op_CreateSchema.go b/service/clouddirectory/api_op_CreateSchema.go index 75329e04414..25a55b870dc 100644 --- a/service/clouddirectory/api_op_CreateSchema.go +++ b/service/clouddirectory/api_op_CreateSchema.go @@ -13,14 +13,14 @@ import ( // Creates a new schema in a development state. A schema can exist in three // phases: // -// * Development: This is a mutable phase of the schema. All new -// schemas are in the development phase. Once the schema is finalized, it can be +// * Development: This is a mutable phase of the schema. All new schemas +// are in the development phase. Once the schema is finalized, it can be // published. // -// * Published: Published schemas are immutable and have a version +// * Published: Published schemas are immutable and have a version // associated with them. // -// * Applied: Applied schemas are mutable in a way that +// * Applied: Applied schemas are mutable in a way that // allows you to add new schema facets. You can also add new, nonrequired // attributes to existing schema facets. You can apply only published schemas to // directories. diff --git a/service/clouddirectory/api_op_UpdateFacet.go b/service/clouddirectory/api_op_UpdateFacet.go index 35b5c10eb76..0545025a83f 100644 --- a/service/clouddirectory/api_op_UpdateFacet.go +++ b/service/clouddirectory/api_op_UpdateFacet.go @@ -13,13 +13,13 @@ import ( // Does the following: // -// * Adds new Attributes, Rules, or ObjectTypes. +// * Adds new Attributes, Rules, or ObjectTypes. // -// * -// Updates existing Attributes, Rules, or ObjectTypes. +// * Updates +// existing Attributes, Rules, or ObjectTypes. // -// * Deletes existing -// Attributes, Rules, or ObjectTypes. +// * Deletes existing Attributes, +// Rules, or ObjectTypes. func (c *Client) UpdateFacet(ctx context.Context, params *UpdateFacetInput, optFns ...func(*Options)) (*UpdateFacetOutput, error) { if params == nil { params = &UpdateFacetInput{} diff --git a/service/clouddirectory/types/enums.go b/service/clouddirectory/types/enums.go index 25f9ef17f40..b34e007f601 100644 --- a/service/clouddirectory/types/enums.go +++ b/service/clouddirectory/types/enums.go @@ -178,10 +178,10 @@ type ObjectType string // Enum values for ObjectType const ( - ObjectTypeNode ObjectType = "NODE" - ObjectTypeLeaf_node ObjectType = "LEAF_NODE" - ObjectTypePolicy ObjectType = "POLICY" - ObjectTypeIndex ObjectType = "INDEX" + ObjectTypeNode ObjectType = "NODE" + ObjectTypeLeafNode ObjectType = "LEAF_NODE" + ObjectTypePolicy ObjectType = "POLICY" + ObjectTypeIndex ObjectType = "INDEX" ) // Values returns all known values for ObjectType. Note that this can be expanded @@ -200,11 +200,11 @@ type RangeMode string // Enum values for RangeMode const ( - RangeModeFirst RangeMode = "FIRST" - RangeModeLast RangeMode = "LAST" - RangeModeLast_before_missing_values RangeMode = "LAST_BEFORE_MISSING_VALUES" - RangeModeInclusive RangeMode = "INCLUSIVE" - RangeModeExclusive RangeMode = "EXCLUSIVE" + RangeModeFirst RangeMode = "FIRST" + RangeModeLast RangeMode = "LAST" + RangeModeLastBeforeMissingValues RangeMode = "LAST_BEFORE_MISSING_VALUES" + RangeModeInclusive RangeMode = "INCLUSIVE" + RangeModeExclusive RangeMode = "EXCLUSIVE" ) // Values returns all known values for RangeMode. Note that this can be expanded in @@ -224,8 +224,8 @@ type RequiredAttributeBehavior string // Enum values for RequiredAttributeBehavior const ( - RequiredAttributeBehaviorRequired_always RequiredAttributeBehavior = "REQUIRED_ALWAYS" - RequiredAttributeBehaviorNot_required RequiredAttributeBehavior = "NOT_REQUIRED" + RequiredAttributeBehaviorRequiredAlways RequiredAttributeBehavior = "REQUIRED_ALWAYS" + RequiredAttributeBehaviorNotRequired RequiredAttributeBehavior = "NOT_REQUIRED" ) // Values returns all known values for RequiredAttributeBehavior. Note that this @@ -242,10 +242,10 @@ type RuleType string // Enum values for RuleType const ( - RuleTypeBinary_length RuleType = "BINARY_LENGTH" - RuleTypeNumber_comparison RuleType = "NUMBER_COMPARISON" - RuleTypeString_from_set RuleType = "STRING_FROM_SET" - RuleTypeString_length RuleType = "STRING_LENGTH" + RuleTypeBinaryLength RuleType = "BINARY_LENGTH" + RuleTypeNumberComparison RuleType = "NUMBER_COMPARISON" + RuleTypeStringFromSet RuleType = "STRING_FROM_SET" + RuleTypeStringLength RuleType = "STRING_LENGTH" ) // Values returns all known values for RuleType. Note that this can be expanded in @@ -264,8 +264,8 @@ type UpdateActionType string // Enum values for UpdateActionType const ( - UpdateActionTypeCreate_or_update UpdateActionType = "CREATE_OR_UPDATE" - UpdateActionTypeDelete UpdateActionType = "DELETE" + UpdateActionTypeCreateOrUpdate UpdateActionType = "CREATE_OR_UPDATE" + UpdateActionTypeDelete UpdateActionType = "DELETE" ) // Values returns all known values for UpdateActionType. Note that this can be diff --git a/service/clouddirectory/types/types.go b/service/clouddirectory/types/types.go index 744e00cff14..2eae59fad1b 100644 --- a/service/clouddirectory/types/types.go +++ b/service/clouddirectory/types/types.go @@ -1239,17 +1239,16 @@ type ObjectReference struct { // (https://docs.aws.amazon.com/clouddirectory/latest/developerguide/directory_objects_access_objects.html). // You can identify an object in one of the following ways: // - // * - // $ObjectIdentifier - An object identifier is an opaque string provided by Amazon - // Cloud Directory. When creating objects, the system will provide you with the - // identifier of the created object. An object’s identifier is immutable and no two - // objects will ever share the same object identifier + // * $ObjectIdentifier - + // An object identifier is an opaque string provided by Amazon Cloud Directory. + // When creating objects, the system will provide you with the identifier of the + // created object. An object’s identifier is immutable and no two objects will ever + // share the same object identifier // - // * /some/path - - // Identifies the object based on path + // * /some/path - Identifies the object based on + // path // - // * #SomeBatchReference - Identifies the - // object in a batch call + // * #SomeBatchReference - Identifies the object in a batch call Selector *string } diff --git a/service/cloudformation/api_op_CreateChangeSet.go b/service/cloudformation/api_op_CreateChangeSet.go index 55790d66535..fbc48b3e213 100644 --- a/service/cloudformation/api_op_CreateChangeSet.go +++ b/service/cloudformation/api_op_CreateChangeSet.go @@ -66,7 +66,7 @@ type CreateChangeSetInput struct { // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to create the stack. // - // * + // * // CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include // resources that can affect permissions in your AWS account; for example, by // creating new AWS Identity and Access Management (IAM) users. For those stacks, @@ -74,54 +74,54 @@ type CreateChangeSetInput struct { // The following IAM resources require you to specify either the CAPABILITY_IAM or // CAPABILITY_NAMED_IAM capability. // - // * If you have IAM resources, you can - // specify either capability. + // * If you have IAM resources, you can specify + // either capability. // - // * If you have IAM resources with custom - // names, you must specify CAPABILITY_NAMED_IAM. + // * If you have IAM resources with custom names, you must + // specify CAPABILITY_NAMED_IAM. // - // * If you don't specify - // either of these capabilities, AWS CloudFormation returns an - // InsufficientCapabilities error. + // * If you don't specify either of these + // capabilities, AWS CloudFormation returns an InsufficientCapabilities error. // - // If your stack template contains these - // resources, we recommend that you review all permissions associated with them and - // edit their permissions if necessary. + // If + // your stack template contains these resources, we recommend that you review all + // permissions associated with them and edit their permissions if necessary. // - // * AWS::IAM::AccessKey + // * + // AWS::IAM::AccessKey // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) // - // - // * AWS::IAM::Group + // * + // AWS::IAM::Group // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) // - // - // * AWS::IAM::InstanceProfile + // * + // AWS::IAM::InstanceProfile // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) // - // - // * AWS::IAM::Policy + // * + // AWS::IAM::Policy // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) // - // - // * AWS::IAM::Role + // * + // AWS::IAM::Role // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) // - // - // * AWS::IAM::User + // * + // AWS::IAM::User // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) // - // - // * AWS::IAM::UserToGroupAddition + // * + // AWS::IAM::UserToGroupAddition // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) // - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // For + // more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // - // * CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom + // * + // CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom // processing on templates; this can include simple actions like find-and-replace // operations, all the way to extensive transformations of entire templates. // Because of this, users typically create a change set from the processed diff --git a/service/cloudformation/api_op_CreateStack.go b/service/cloudformation/api_op_CreateStack.go index 0c88183a82c..4cae0615618 100644 --- a/service/cloudformation/api_op_CreateStack.go +++ b/service/cloudformation/api_op_CreateStack.go @@ -43,7 +43,7 @@ type CreateStackInput struct { // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to create the stack. // - // * + // * // CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include // resources that can affect permissions in your AWS account; for example, by // creating new AWS Identity and Access Management (IAM) users. For those stacks, @@ -51,54 +51,54 @@ type CreateStackInput struct { // The following IAM resources require you to specify either the CAPABILITY_IAM or // CAPABILITY_NAMED_IAM capability. // - // * If you have IAM resources, you can - // specify either capability. + // * If you have IAM resources, you can specify + // either capability. // - // * If you have IAM resources with custom - // names, you must specify CAPABILITY_NAMED_IAM. + // * If you have IAM resources with custom names, you must + // specify CAPABILITY_NAMED_IAM. // - // * If you don't specify - // either of these capabilities, AWS CloudFormation returns an - // InsufficientCapabilities error. + // * If you don't specify either of these + // capabilities, AWS CloudFormation returns an InsufficientCapabilities error. // - // If your stack template contains these - // resources, we recommend that you review all permissions associated with them and - // edit their permissions if necessary. + // If + // your stack template contains these resources, we recommend that you review all + // permissions associated with them and edit their permissions if necessary. // - // * AWS::IAM::AccessKey + // * + // AWS::IAM::AccessKey // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) // - // - // * AWS::IAM::Group + // * + // AWS::IAM::Group // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) // - // - // * AWS::IAM::InstanceProfile + // * + // AWS::IAM::InstanceProfile // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) // - // - // * AWS::IAM::Policy + // * + // AWS::IAM::Policy // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) // - // - // * AWS::IAM::Role + // * + // AWS::IAM::Role // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) // - // - // * AWS::IAM::User + // * + // AWS::IAM::User // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) // - // - // * AWS::IAM::UserToGroupAddition + // * + // AWS::IAM::UserToGroupAddition // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) // - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // For + // more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // - // * CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom + // * + // CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom // processing on templates; this can include simple actions like find-and-replace // operations, all the way to extensive transformations of entire templates. // Because of this, users typically create a change set from the processed diff --git a/service/cloudformation/api_op_CreateStackInstances.go b/service/cloudformation/api_op_CreateStackInstances.go index bd3a99371ce..19ddce17561 100644 --- a/service/cloudformation/api_op_CreateStackInstances.go +++ b/service/cloudformation/api_op_CreateStackInstances.go @@ -73,30 +73,30 @@ type CreateStackInstancesInput struct { // their values, be aware of how AWS CloudFormation sets parameter values during // stack instance operations: // - // * To override the current value for a parameter, + // * To override the current value for a parameter, // include the parameter and specify its value. // - // * To leave a parameter set to - // its present value, you can do one of the following: + // * To leave a parameter set to its + // present value, you can do one of the following: // - // * Do not include - // the parameter in the list. + // * Do not include the parameter + // in the list. // - // * Include the parameter and specify - // UsePreviousValue as true. (You cannot specify both a value and set - // UsePreviousValue to true.) + // * Include the parameter and specify UsePreviousValue as true. (You + // cannot specify both a value and set UsePreviousValue to true.) // - // * To set all overridden parameter back to the - // values specified in the stack set, specify a parameter list but do not include - // any parameters. + // * To set all + // overridden parameter back to the values specified in the stack set, specify a + // parameter list but do not include any parameters. // - // * To leave all parameters set to their present values, do - // not specify this property at all. + // * To leave all parameters set + // to their present values, do not specify this property at all. // - // During stack set updates, any parameter - // values overridden for a stack instance are not updated, but retain their - // overridden value. You can only override the parameter values that are specified - // in the stack set; to add or delete a parameter itself, use UpdateStackSet + // During stack set + // updates, any parameter values overridden for a stack instance are not updated, + // but retain their overridden value. You can only override the parameter values + // that are specified in the stack set; to add or delete a parameter itself, use + // UpdateStackSet // (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_UpdateStackSet.html) // to update the stack set template. ParameterOverrides []*types.Parameter diff --git a/service/cloudformation/api_op_CreateStackSet.go b/service/cloudformation/api_op_CreateStackSet.go index 0a7cec98762..de3189e7f94 100644 --- a/service/cloudformation/api_op_CreateStackSet.go +++ b/service/cloudformation/api_op_CreateStackSet.go @@ -56,7 +56,7 @@ type CreateStackSetInput struct { // contains certain capabilities in order for AWS CloudFormation to create the // stack set and related stack instances. // - // * CAPABILITY_IAM and + // * CAPABILITY_IAM and // CAPABILITY_NAMED_IAM Some stack templates might include resources that can // affect permissions in your AWS account; for example, by creating new AWS // Identity and Access Management (IAM) users. For those stack sets, you must @@ -64,54 +64,54 @@ type CreateStackSetInput struct { // following IAM resources require you to specify either the CAPABILITY_IAM or // CAPABILITY_NAMED_IAM capability. // - // * If you have IAM resources, you can - // specify either capability. + // * If you have IAM resources, you can specify + // either capability. // - // * If you have IAM resources with custom - // names, you must specify CAPABILITY_NAMED_IAM. + // * If you have IAM resources with custom names, you must + // specify CAPABILITY_NAMED_IAM. // - // * If you don't specify - // either of these capabilities, AWS CloudFormation returns an - // InsufficientCapabilities error. + // * If you don't specify either of these + // capabilities, AWS CloudFormation returns an InsufficientCapabilities error. // - // If your stack template contains these - // resources, we recommend that you review all permissions associated with them and - // edit their permissions if necessary. + // If + // your stack template contains these resources, we recommend that you review all + // permissions associated with them and edit their permissions if necessary. // - // * AWS::IAM::AccessKey + // * + // AWS::IAM::AccessKey // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) // - // - // * AWS::IAM::Group + // * + // AWS::IAM::Group // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) // - // - // * AWS::IAM::InstanceProfile + // * + // AWS::IAM::InstanceProfile // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) // - // - // * AWS::IAM::Policy + // * + // AWS::IAM::Policy // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) // - // - // * AWS::IAM::Role + // * + // AWS::IAM::Role // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) // - // - // * AWS::IAM::User + // * + // AWS::IAM::User // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) // - // - // * AWS::IAM::UserToGroupAddition + // * + // AWS::IAM::UserToGroupAddition // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) // - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // For + // more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // - // * CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack template + // * + // CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack template // contains one or more macros, and you choose to create a stack directly from the // processed template, without first reviewing the resulting changes in a change // set, you must acknowledge this capability. For more information, see Using AWS @@ -151,14 +151,14 @@ type CreateStackSetInput struct { // Describes how the IAM roles required for stack set operations are created. By // default, SELF-MANAGED is specified. // - // * With self-managed permissions, you - // must create the administrator and execution roles required to deploy to target + // * With self-managed permissions, you must + // create the administrator and execution roles required to deploy to target // accounts. For more information, see Grant Self-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html). // - // - // * With service-managed permissions, StackSets automatically creates the IAM - // roles required to deploy to accounts managed by AWS Organizations. For more + // * + // With service-managed permissions, StackSets automatically creates the IAM roles + // required to deploy to accounts managed by AWS Organizations. For more // information, see Grant Service-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-service-managed.html). PermissionModel types.PermissionModels diff --git a/service/cloudformation/api_op_DescribeStackDriftDetectionStatus.go b/service/cloudformation/api_op_DescribeStackDriftDetectionStatus.go index 138fd20803f..1842e2733f0 100644 --- a/service/cloudformation/api_op_DescribeStackDriftDetectionStatus.go +++ b/service/cloudformation/api_op_DescribeStackDriftDetectionStatus.go @@ -55,21 +55,20 @@ type DescribeStackDriftDetectionStatusOutput struct { // The status of the stack drift detection operation. // - // * DETECTION_COMPLETE: - // The stack drift detection operation has successfully completed for all resources - // in the stack that support drift detection. (Resources that do not currently - // support stack detection remain unchecked.) If you specified logical resource IDs - // for AWS CloudFormation to use as a filter for the stack drift detection - // operation, only the resources with those logical IDs are checked for drift. + // * DETECTION_COMPLETE: The + // stack drift detection operation has successfully completed for all resources in + // the stack that support drift detection. (Resources that do not currently support + // stack detection remain unchecked.) If you specified logical resource IDs for AWS + // CloudFormation to use as a filter for the stack drift detection operation, only + // the resources with those logical IDs are checked for drift. // + // * DETECTION_FAILED: + // The stack drift detection operation has failed for at least one resource in the + // stack. Results will be available for resources on which AWS CloudFormation + // successfully completed drift detection. // - // * DETECTION_FAILED: The stack drift detection operation has failed for at least - // one resource in the stack. Results will be available for resources on which AWS - // CloudFormation successfully completed drift detection. - // - // * - // DETECTION_IN_PROGRESS: The stack drift detection operation is currently in - // progress. + // * DETECTION_IN_PROGRESS: The stack + // drift detection operation is currently in progress. // // This member is required. DetectionStatus types.StackDriftDetectionStatus @@ -103,18 +102,18 @@ type DescribeStackDriftDetectionStatusOutput struct { // Status of the stack's actual configuration compared to its expected // configuration. // - // * DRIFTED: The stack differs from its expected template + // * DRIFTED: The stack differs from its expected template // configuration. A stack is considered to have drifted if one or more of its // resources have drifted. // - // * NOT_CHECKED: AWS CloudFormation has not checked - // if the stack differs from its expected template configuration. - // - // * IN_SYNC: - // The stack's actual configuration matches its expected template configuration. + // * NOT_CHECKED: AWS CloudFormation has not checked if + // the stack differs from its expected template configuration. // + // * IN_SYNC: The + // stack's actual configuration matches its expected template configuration. // - // * UNKNOWN: This value is reserved for future use. + // * + // UNKNOWN: This value is reserved for future use. StackDriftStatus types.StackDriftStatus // Metadata pertaining to the operation's result. diff --git a/service/cloudformation/api_op_DescribeStackEvents.go b/service/cloudformation/api_op_DescribeStackEvents.go index 309a791bae1..811575f14f4 100644 --- a/service/cloudformation/api_op_DescribeStackEvents.go +++ b/service/cloudformation/api_op_DescribeStackEvents.go @@ -41,11 +41,11 @@ type DescribeStackEventsInput struct { // The name or the unique stack ID that is associated with the stack, which are not // always interchangeable: // - // * Running stacks: You can specify either the - // stack's name or its unique stack ID. + // * Running stacks: You can specify either the stack's + // name or its unique stack ID. // - // * Deleted stacks: You must specify the - // unique stack ID. + // * Deleted stacks: You must specify the unique + // stack ID. // // Default: There is no default value. StackName *string diff --git a/service/cloudformation/api_op_DescribeStackResource.go b/service/cloudformation/api_op_DescribeStackResource.go index 316b34fc07d..9ea52616287 100644 --- a/service/cloudformation/api_op_DescribeStackResource.go +++ b/service/cloudformation/api_op_DescribeStackResource.go @@ -41,11 +41,11 @@ type DescribeStackResourceInput struct { // The name or the unique stack ID that is associated with the stack, which are not // always interchangeable: // - // * Running stacks: You can specify either the - // stack's name or its unique stack ID. + // * Running stacks: You can specify either the stack's + // name or its unique stack ID. // - // * Deleted stacks: You must specify the - // unique stack ID. + // * Deleted stacks: You must specify the unique + // stack ID. // // Default: There is no default value. // diff --git a/service/cloudformation/api_op_DescribeStackResourceDrifts.go b/service/cloudformation/api_op_DescribeStackResourceDrifts.go index 60b9dc3f13c..2c2b00c0378 100644 --- a/service/cloudformation/api_op_DescribeStackResourceDrifts.go +++ b/service/cloudformation/api_op_DescribeStackResourceDrifts.go @@ -56,18 +56,17 @@ type DescribeStackResourceDriftsInput struct { // The resource drift status values to use as filters for the resource drift // results returned. // - // * DELETED: The resource differs from its expected - // template configuration in that the resource has been deleted. + // * DELETED: The resource differs from its expected template + // configuration in that the resource has been deleted. // - // * MODIFIED: - // One or more resource properties differ from their expected template values. + // * MODIFIED: One or more + // resource properties differ from their expected template values. // + // * IN_SYNC: The + // resources's actual configuration matches its expected template configuration. // - // * IN_SYNC: The resources's actual configuration matches its expected template - // configuration. - // - // * NOT_CHECKED: AWS CloudFormation does not currently return - // this value. + // * + // NOT_CHECKED: AWS CloudFormation does not currently return this value. StackResourceDriftStatusFilters []types.StackResourceDriftStatus } diff --git a/service/cloudformation/api_op_DescribeStackResources.go b/service/cloudformation/api_op_DescribeStackResources.go index e4ce713b063..f8c4d1c2d33 100644 --- a/service/cloudformation/api_op_DescribeStackResources.go +++ b/service/cloudformation/api_op_DescribeStackResources.go @@ -59,14 +59,14 @@ type DescribeStackResourcesInput struct { // The name or the unique stack ID that is associated with the stack, which are not // always interchangeable: // - // * Running stacks: You can specify either the - // stack's name or its unique stack ID. + // * Running stacks: You can specify either the stack's + // name or its unique stack ID. // - // * Deleted stacks: You must specify the - // unique stack ID. + // * Deleted stacks: You must specify the unique + // stack ID. // - // Default: There is no default value. Required: Conditional. If - // you do not specify StackName, you must specify PhysicalResourceId. + // Default: There is no default value. Required: Conditional. If you do + // not specify StackName, you must specify PhysicalResourceId. StackName *string } diff --git a/service/cloudformation/api_op_DescribeStacks.go b/service/cloudformation/api_op_DescribeStacks.go index e1826547444..47b37cb9fe2 100644 --- a/service/cloudformation/api_op_DescribeStacks.go +++ b/service/cloudformation/api_op_DescribeStacks.go @@ -38,11 +38,11 @@ type DescribeStacksInput struct { // The name or the unique stack ID that is associated with the stack, which are not // always interchangeable: // - // * Running stacks: You can specify either the - // stack's name or its unique stack ID. + // * Running stacks: You can specify either the stack's + // name or its unique stack ID. // - // * Deleted stacks: You must specify the - // unique stack ID. + // * Deleted stacks: You must specify the unique + // stack ID. // // Default: There is no default value. StackName *string diff --git a/service/cloudformation/api_op_DescribeType.go b/service/cloudformation/api_op_DescribeType.go index 1c64c5e3904..388d85648f1 100644 --- a/service/cloudformation/api_op_DescribeType.go +++ b/service/cloudformation/api_op_DescribeType.go @@ -64,11 +64,11 @@ type DescribeTypeOutput struct { // The deprecation status of the type. Valid values include: // - // * LIVE: The type - // is registered and can be used in CloudFormation operations, dependent on its + // * LIVE: The type is + // registered and can be used in CloudFormation operations, dependent on its // provisioning behavior and visibility scope. // - // * DEPRECATED: The type has been + // * DEPRECATED: The type has been // deregistered and can no longer be used in CloudFormation operations. DeprecatedStatus types.DeprecatedStatus @@ -100,23 +100,23 @@ type DescribeTypeOutput struct { // provisioning type during registration, based on the types of handlers in the // schema handler package submitted. Valid values include: // - // * FULLY_MUTABLE: - // The type includes an update handler to process updates to the type during stack + // * FULLY_MUTABLE: The + // type includes an update handler to process updates to the type during stack // update operations. // - // * IMMUTABLE: The type does not include an update - // handler, so the type cannot be updated and must instead be replaced during stack - // update operations. + // * IMMUTABLE: The type does not include an update handler, so + // the type cannot be updated and must instead be replaced during stack update + // operations. // - // * NON_PROVISIONABLE: The type does not include all of - // the following handlers, and therefore cannot actually be provisioned. + // * NON_PROVISIONABLE: The type does not include all of the following + // handlers, and therefore cannot actually be provisioned. // - // * - // create + // * create // - // * read + // * read // - // * delete + // * + // delete ProvisioningType types.ProvisioningType // The schema that defines the type. For more information on type schemas, see @@ -140,12 +140,12 @@ type DescribeTypeOutput struct { // The scope at which the type is visible and usable in CloudFormation operations. // Valid values include: // - // * PRIVATE: The type is only visible and usable within - // the account in which it is registered. Currently, AWS CloudFormation marks any - // types you register as PRIVATE. + // * PRIVATE: The type is only visible and usable within the + // account in which it is registered. Currently, AWS CloudFormation marks any types + // you register as PRIVATE. // - // * PUBLIC: The type is publically visible and - // usable within any Amazon account. + // * PUBLIC: The type is publically visible and usable + // within any Amazon account. Visibility types.Visibility // Metadata pertaining to the operation's result. diff --git a/service/cloudformation/api_op_DetectStackSetDrift.go b/service/cloudformation/api_op_DetectStackSetDrift.go index e12befdbda2..2f0e2e7013f 100644 --- a/service/cloudformation/api_op_DetectStackSetDrift.go +++ b/service/cloudformation/api_op_DetectStackSetDrift.go @@ -25,18 +25,18 @@ import ( // operation has completed, use the following actions to return drift // information: // -// * Use DescribeStackSet to return detailed informaiton about -// the stack set, including detailed information about the last completed drift +// * Use DescribeStackSet to return detailed informaiton about the +// stack set, including detailed information about the last completed drift // operation performed on the stack set. (Information about drift operations that // are in progress is not included.) // -// * Use ListStackInstances to return a list -// of stack instances belonging to the stack set, including the drift status and -// last drift time checked of each instance. +// * Use ListStackInstances to return a list of +// stack instances belonging to the stack set, including the drift status and last +// drift time checked of each instance. // -// * Use DescribeStackInstance to -// return detailed information about a specific stack instance, including its drift -// status and last drift time checked. +// * Use DescribeStackInstance to return +// detailed information about a specific stack instance, including its drift status +// and last drift time checked. // // For more information on performing a drift // detection operation on a stack set, see Detecting Unmanaged Changes in Stack diff --git a/service/cloudformation/api_op_GetTemplate.go b/service/cloudformation/api_op_GetTemplate.go index bb196ac50d3..9a5c6806d2a 100644 --- a/service/cloudformation/api_op_GetTemplate.go +++ b/service/cloudformation/api_op_GetTemplate.go @@ -41,11 +41,11 @@ type GetTemplateInput struct { // The name or the unique stack ID that is associated with the stack, which are not // always interchangeable: // - // * Running stacks: You can specify either the - // stack's name or its unique stack ID. + // * Running stacks: You can specify either the stack's + // name or its unique stack ID. // - // * Deleted stacks: You must specify the - // unique stack ID. + // * Deleted stacks: You must specify the unique + // stack ID. // // Default: There is no default value. StackName *string diff --git a/service/cloudformation/api_op_ListStackResources.go b/service/cloudformation/api_op_ListStackResources.go index 4e2ab5c9197..fee0117358f 100644 --- a/service/cloudformation/api_op_ListStackResources.go +++ b/service/cloudformation/api_op_ListStackResources.go @@ -35,11 +35,11 @@ type ListStackResourcesInput struct { // The name or the unique stack ID that is associated with the stack, which are not // always interchangeable: // - // * Running stacks: You can specify either the - // stack's name or its unique stack ID. + // * Running stacks: You can specify either the stack's + // name or its unique stack ID. // - // * Deleted stacks: You must specify the - // unique stack ID. + // * Deleted stacks: You must specify the unique + // stack ID. // // Default: There is no default value. // diff --git a/service/cloudformation/api_op_ListTypeVersions.go b/service/cloudformation/api_op_ListTypeVersions.go index 39061c7e553..acd3831c9b3 100644 --- a/service/cloudformation/api_op_ListTypeVersions.go +++ b/service/cloudformation/api_op_ListTypeVersions.go @@ -36,15 +36,15 @@ type ListTypeVersionsInput struct { // The deprecation status of the type versions that you want to get summary // information about. Valid values include: // - // * LIVE: The type version is - // registered and can be used in CloudFormation operations, dependent on its - // provisioning behavior and visibility scope. + // * LIVE: The type version is registered + // and can be used in CloudFormation operations, dependent on its provisioning + // behavior and visibility scope. // - // * DEPRECATED: The type version - // has been deregistered and can no longer be used in CloudFormation - // operations. + // * DEPRECATED: The type version has been + // deregistered and can no longer be used in CloudFormation operations. // - // The default is LIVE. + // The + // default is LIVE. DeprecatedStatus types.DeprecatedStatus // The maximum number of results to be returned with a single call. If the number diff --git a/service/cloudformation/api_op_ListTypes.go b/service/cloudformation/api_op_ListTypes.go index 333b25aeba9..bba9cb28e11 100644 --- a/service/cloudformation/api_op_ListTypes.go +++ b/service/cloudformation/api_op_ListTypes.go @@ -33,11 +33,11 @@ type ListTypesInput struct { // The deprecation status of the types that you want to get summary information // about. Valid values include: // - // * LIVE: The type is registered for use in + // * LIVE: The type is registered for use in // CloudFormation operations. // - // * DEPRECATED: The type has been deregistered and - // can no longer be used in CloudFormation operations. + // * DEPRECATED: The type has been deregistered and can + // no longer be used in CloudFormation operations. DeprecatedStatus types.DeprecatedStatus // The maximum number of results to be returned with a single call. If the number @@ -57,27 +57,27 @@ type ListTypesInput struct { // provisioning type during registration, based on the types of handlers in the // schema handler package submitted. Valid values include: // - // * FULLY_MUTABLE: - // The type includes an update handler to process updates to the type during stack + // * FULLY_MUTABLE: The + // type includes an update handler to process updates to the type during stack // update operations. // - // * IMMUTABLE: The type does not include an update - // handler, so the type cannot be updated and must instead be replaced during stack - // update operations. + // * IMMUTABLE: The type does not include an update handler, so + // the type cannot be updated and must instead be replaced during stack update + // operations. // - // * NON_PROVISIONABLE: The type does not include create, - // read, and delete handlers, and therefore cannot actually be provisioned. + // * NON_PROVISIONABLE: The type does not include create, read, and + // delete handlers, and therefore cannot actually be provisioned. ProvisioningType types.ProvisioningType // The scope at which the type is visible and usable in CloudFormation operations. // Valid values include: // - // * PRIVATE: The type is only visible and usable within - // the account in which it is registered. Currently, AWS CloudFormation marks any - // types you create as PRIVATE. + // * PRIVATE: The type is only visible and usable within the + // account in which it is registered. Currently, AWS CloudFormation marks any types + // you create as PRIVATE. // - // * PUBLIC: The type is publically visible and - // usable within any Amazon account. + // * PUBLIC: The type is publically visible and usable + // within any Amazon account. // // The default is PRIVATE. Visibility types.Visibility diff --git a/service/cloudformation/api_op_RegisterType.go b/service/cloudformation/api_op_RegisterType.go index c910a7ae85f..12a96e1c4fd 100644 --- a/service/cloudformation/api_op_RegisterType.go +++ b/service/cloudformation/api_op_RegisterType.go @@ -15,16 +15,16 @@ import ( // available for use in CloudFormation templates in your AWS account, and // includes: // -// * Validating the resource schema +// * Validating the resource schema // -// * Determining which -// handlers have been specified for the resource +// * Determining which handlers have +// been specified for the resource // -// * Making the resource type -// available for use in your account +// * Making the resource type available for use in +// your account // -// For more information on how to develop types -// and ready them for registeration, see Creating Resource Providers +// For more information on how to develop types and ready them for +// registeration, see Creating Resource Providers // (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-types.html) // in the CloudFormation CLI User Guide. You can have a maximum of 50 resource type // versions registered at a time. This maximum is per account and per region. Use @@ -68,18 +68,17 @@ type RegisterTypeInput struct { // organization namespaces are reserved and cannot be used in your resource type // names: // - // * Alexa + // * Alexa // - // * AMZN + // * AMZN // - // * Amazon + // * Amazon // - // * AWS + // * AWS // - // * Custom + // * Custom // - // * - // Dev + // * Dev // // This member is required. TypeName *string diff --git a/service/cloudformation/api_op_UpdateStack.go b/service/cloudformation/api_op_UpdateStack.go index 49e44d05239..d7c2cf683cd 100644 --- a/service/cloudformation/api_op_UpdateStack.go +++ b/service/cloudformation/api_op_UpdateStack.go @@ -44,7 +44,7 @@ type UpdateStackInput struct { // In some cases, you must explicitly acknowledge that your stack template contains // certain capabilities in order for AWS CloudFormation to update the stack. // - // * + // * // CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some stack templates might include // resources that can affect permissions in your AWS account; for example, by // creating new AWS Identity and Access Management (IAM) users. For those stacks, @@ -52,54 +52,54 @@ type UpdateStackInput struct { // The following IAM resources require you to specify either the CAPABILITY_IAM or // CAPABILITY_NAMED_IAM capability. // - // * If you have IAM resources, you can - // specify either capability. + // * If you have IAM resources, you can specify + // either capability. // - // * If you have IAM resources with custom - // names, you must specify CAPABILITY_NAMED_IAM. + // * If you have IAM resources with custom names, you must + // specify CAPABILITY_NAMED_IAM. // - // * If you don't specify - // either of these capabilities, AWS CloudFormation returns an - // InsufficientCapabilities error. + // * If you don't specify either of these + // capabilities, AWS CloudFormation returns an InsufficientCapabilities error. // - // If your stack template contains these - // resources, we recommend that you review all permissions associated with them and - // edit their permissions if necessary. + // If + // your stack template contains these resources, we recommend that you review all + // permissions associated with them and edit their permissions if necessary. // - // * AWS::IAM::AccessKey + // * + // AWS::IAM::AccessKey // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) // - // - // * AWS::IAM::Group + // * + // AWS::IAM::Group // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) // - // - // * AWS::IAM::InstanceProfile + // * + // AWS::IAM::InstanceProfile // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) // - // - // * AWS::IAM::Policy + // * + // AWS::IAM::Policy // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) // - // - // * AWS::IAM::Role + // * + // AWS::IAM::Role // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) // - // - // * AWS::IAM::User + // * + // AWS::IAM::User // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) // - // - // * AWS::IAM::UserToGroupAddition + // * + // AWS::IAM::UserToGroupAddition // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) // - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // For + // more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // - // * CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom + // * + // CAPABILITY_AUTO_EXPAND Some template contain macros. Macros perform custom // processing on templates; this can include simple actions like find-and-replace // operations, all the way to extensive transformations of entire templates. // Because of this, users typically create a change set from the processed diff --git a/service/cloudformation/api_op_UpdateStackInstances.go b/service/cloudformation/api_op_UpdateStackInstances.go index c77f670386b..d0e541df58e 100644 --- a/service/cloudformation/api_op_UpdateStackInstances.go +++ b/service/cloudformation/api_op_UpdateStackInstances.go @@ -90,33 +90,32 @@ type UpdateStackInstancesInput struct { // their values, be aware of how AWS CloudFormation sets parameter values during // stack instance update operations: // - // * To override the current value for a + // * To override the current value for a // parameter, include the parameter and specify its value. // - // * To leave a - // parameter set to its present value, you can do one of the following: + // * To leave a parameter + // set to its present value, you can do one of the following: // - // * - // Do not include the parameter in the list. + // * Do not include the + // parameter in the list. // - // * Include the parameter and - // specify UsePreviousValue as true. (You cannot specify both a value and set - // UsePreviousValue to true.) + // * Include the parameter and specify UsePreviousValue as + // true. (You cannot specify both a value and set UsePreviousValue to true.) // - // * To set all overridden parameter back to the - // values specified in the stack set, specify a parameter list but do not include - // any parameters. + // * To + // set all overridden parameter back to the values specified in the stack set, + // specify a parameter list but do not include any parameters. // - // * To leave all parameters set to their present values, do - // not specify this property at all. + // * To leave all + // parameters set to their present values, do not specify this property at + // all. // - // During stack set updates, any parameter - // values overridden for a stack instance are not updated, but retain their - // overridden value. You can only override the parameter values that are specified - // in the stack set; to add or delete a parameter itself, use UpdateStackSet to - // update the stack set template. If you add a parameter to a template, before you - // can override the parameter value specified in the stack set you must first use - // UpdateStackSet + // During stack set updates, any parameter values overridden for a stack + // instance are not updated, but retain their overridden value. You can only + // override the parameter values that are specified in the stack set; to add or + // delete a parameter itself, use UpdateStackSet to update the stack set template. + // If you add a parameter to a template, before you can override the parameter + // value specified in the stack set you must first use UpdateStackSet // (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_UpdateStackSet.html) // to update all stack instances with the updated template and parameter value // specified in the stack set. Once a stack instance has been updated with the new diff --git a/service/cloudformation/api_op_UpdateStackSet.go b/service/cloudformation/api_op_UpdateStackSet.go index 99594650561..b905302cb5e 100644 --- a/service/cloudformation/api_op_UpdateStackSet.go +++ b/service/cloudformation/api_op_UpdateStackSet.go @@ -75,61 +75,61 @@ type UpdateStackSetInput struct { // certain capabilities in order for AWS CloudFormation to update the stack set and // its associated stack instances. // - // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM - // Some stack templates might include resources that can affect permissions in your - // AWS account; for example, by creating new AWS Identity and Access Management - // (IAM) users. For those stacks sets, you must explicitly acknowledge this by - // specifying one of these capabilities. The following IAM resources require you to - // specify either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. + // * CAPABILITY_IAM and CAPABILITY_NAMED_IAM Some + // stack templates might include resources that can affect permissions in your AWS + // account; for example, by creating new AWS Identity and Access Management (IAM) + // users. For those stacks sets, you must explicitly acknowledge this by specifying + // one of these capabilities. The following IAM resources require you to specify + // either the CAPABILITY_IAM or CAPABILITY_NAMED_IAM capability. // - // * - // If you have IAM resources, you can specify either capability. + // * If you have IAM + // resources, you can specify either capability. // - // * If you - // have IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. + // * If you have IAM resources with + // custom names, you must specify CAPABILITY_NAMED_IAM. // + // * If you don't specify + // either of these capabilities, AWS CloudFormation returns an + // InsufficientCapabilities error. // - // * If you don't specify either of these capabilities, AWS CloudFormation returns - // an InsufficientCapabilities error. - // - // If your stack template contains these + // If your stack template contains these // resources, we recommend that you review all permissions associated with them and // edit their permissions if necessary. // - // * AWS::IAM::AccessKey + // * AWS::IAM::AccessKey // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html) // - // - // * AWS::IAM::Group + // * + // AWS::IAM::Group // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html) // - // - // * AWS::IAM::InstanceProfile + // * + // AWS::IAM::InstanceProfile // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html) // - // - // * AWS::IAM::Policy + // * + // AWS::IAM::Policy // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html) // - // - // * AWS::IAM::Role + // * + // AWS::IAM::Role // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html) // - // - // * AWS::IAM::User + // * + // AWS::IAM::User // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-user.html) // - // - // * AWS::IAM::UserToGroupAddition + // * + // AWS::IAM::UserToGroupAddition // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html) // - // - // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // For + // more information, see Acknowledging IAM Resources in AWS CloudFormation // Templates // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). // - // - // * CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack template + // * + // CAPABILITY_AUTO_EXPAND Some templates contain macros. If your stack template // contains one or more macros, and you choose to update a stack directly from the // processed template, without first reviewing the resulting changes in a change // set, you must acknowledge this capability. For more information, see Using AWS @@ -190,14 +190,14 @@ type UpdateStackSetInput struct { // cannot modify PermissionModel if there are stack instances associated with your // stack set. // - // * With self-managed permissions, you must create the - // administrator and execution roles required to deploy to target accounts. For - // more information, see Grant Self-Managed Stack Set Permissions + // * With self-managed permissions, you must create the administrator + // and execution roles required to deploy to target accounts. For more information, + // see Grant Self-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html). // - // - // * With service-managed permissions, StackSets automatically creates the IAM - // roles required to deploy to accounts managed by AWS Organizations. For more + // * + // With service-managed permissions, StackSets automatically creates the IAM roles + // required to deploy to accounts managed by AWS Organizations. For more // information, see Grant Service-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-service-managed.html). PermissionModel types.PermissionModels @@ -221,28 +221,28 @@ type UpdateStackSetInput struct { // specify tags for this parameter, those tags replace any list of tags that are // currently associated with this stack set. This means: // - // * If you don't - // specify this parameter, AWS CloudFormation doesn't modify the stack's tags. - // - // - // * If you specify any tags using this parameter, you must specify all the tags - // that you want associated with this stack set, even tags you've specifed before - // (for example, when creating the stack set or during a previous update of the - // stack set.). Any tags that you don't include in the updated list of tags are - // removed from the stack set, and therefore from the stacks and resources as - // well. - // - // * If you specify an empty value, AWS CloudFormation removes all - // currently associated tags. - // - // If you specify new tags as part of an UpdateStackSet - // action, AWS CloudFormation checks to see if you have the required IAM permission - // to tag resources. If you omit tags that are currently associated with the stack - // set from the list of tags you specify, AWS CloudFormation assumes that you want - // to remove those tags from the stack set, and checks to see if you have - // permission to untag resources. If you don't have the necessary permission(s), - // the entire UpdateStackSet action fails with an access denied error, and the - // stack set is not updated. + // * If you don't specify + // this parameter, AWS CloudFormation doesn't modify the stack's tags. + // + // * If you + // specify any tags using this parameter, you must specify all the tags that you + // want associated with this stack set, even tags you've specifed before (for + // example, when creating the stack set or during a previous update of the stack + // set.). Any tags that you don't include in the updated list of tags are removed + // from the stack set, and therefore from the stacks and resources as well. + // + // * If + // you specify an empty value, AWS CloudFormation removes all currently associated + // tags. + // + // If you specify new tags as part of an UpdateStackSet action, AWS + // CloudFormation checks to see if you have the required IAM permission to tag + // resources. If you omit tags that are currently associated with the stack set + // from the list of tags you specify, AWS CloudFormation assumes that you want to + // remove those tags from the stack set, and checks to see if you have permission + // to untag resources. If you don't have the necessary permission(s), the entire + // UpdateStackSet action fails with an access denied error, and the stack set is + // not updated. Tags []*types.Tag // The structure that contains the template body, with a minimum length of 1 byte diff --git a/service/cloudformation/types/enums.go b/service/cloudformation/types/enums.go index 3451fe58fc7..cd9f3bab8ed 100644 --- a/service/cloudformation/types/enums.go +++ b/service/cloudformation/types/enums.go @@ -26,9 +26,9 @@ type Capability string // Enum values for Capability const ( - CapabilityCapability_iam Capability = "CAPABILITY_IAM" - CapabilityCapability_named_iam Capability = "CAPABILITY_NAMED_IAM" - CapabilityCapability_auto_expand Capability = "CAPABILITY_AUTO_EXPAND" + CapabilityCapabilityIam Capability = "CAPABILITY_IAM" + CapabilityCapabilityNamedIam Capability = "CAPABILITY_NAMED_IAM" + CapabilityCapabilityAutoExpand Capability = "CAPABILITY_AUTO_EXPAND" ) // Values returns all known values for Capability. Note that this can be expanded @@ -68,11 +68,11 @@ type ChangeSetStatus string // Enum values for ChangeSetStatus const ( - ChangeSetStatusCreate_pending ChangeSetStatus = "CREATE_PENDING" - ChangeSetStatusCreate_in_progress ChangeSetStatus = "CREATE_IN_PROGRESS" - ChangeSetStatusCreate_complete ChangeSetStatus = "CREATE_COMPLETE" - ChangeSetStatusDelete_complete ChangeSetStatus = "DELETE_COMPLETE" - ChangeSetStatusFailed ChangeSetStatus = "FAILED" + ChangeSetStatusCreatePending ChangeSetStatus = "CREATE_PENDING" + ChangeSetStatusCreateInProgress ChangeSetStatus = "CREATE_IN_PROGRESS" + ChangeSetStatusCreateComplete ChangeSetStatus = "CREATE_COMPLETE" + ChangeSetStatusDeleteComplete ChangeSetStatus = "DELETE_COMPLETE" + ChangeSetStatusFailed ChangeSetStatus = "FAILED" ) // Values returns all known values for ChangeSetStatus. Note that this can be @@ -170,9 +170,9 @@ type DifferenceType string // Enum values for DifferenceType const ( - DifferenceTypeAdd DifferenceType = "ADD" - DifferenceTypeRemove DifferenceType = "REMOVE" - DifferenceTypeNot_equal DifferenceType = "NOT_EQUAL" + DifferenceTypeAdd DifferenceType = "ADD" + DifferenceTypeRemove DifferenceType = "REMOVE" + DifferenceTypeNotEqual DifferenceType = "NOT_EQUAL" ) // Values returns all known values for DifferenceType. Note that this can be @@ -208,12 +208,12 @@ type ExecutionStatus string // Enum values for ExecutionStatus const ( - ExecutionStatusUnavailable ExecutionStatus = "UNAVAILABLE" - ExecutionStatusAvailable ExecutionStatus = "AVAILABLE" - ExecutionStatusExecute_in_progress ExecutionStatus = "EXECUTE_IN_PROGRESS" - ExecutionStatusExecute_complete ExecutionStatus = "EXECUTE_COMPLETE" - ExecutionStatusExecute_failed ExecutionStatus = "EXECUTE_FAILED" - ExecutionStatusObsolete ExecutionStatus = "OBSOLETE" + ExecutionStatusUnavailable ExecutionStatus = "UNAVAILABLE" + ExecutionStatusAvailable ExecutionStatus = "AVAILABLE" + ExecutionStatusExecuteInProgress ExecutionStatus = "EXECUTE_IN_PROGRESS" + ExecutionStatusExecuteComplete ExecutionStatus = "EXECUTE_COMPLETE" + ExecutionStatusExecuteFailed ExecutionStatus = "EXECUTE_FAILED" + ExecutionStatusObsolete ExecutionStatus = "OBSOLETE" ) // Values returns all known values for ExecutionStatus. Note that this can be @@ -276,9 +276,9 @@ type OnFailure string // Enum values for OnFailure const ( - OnFailureDo_nothing OnFailure = "DO_NOTHING" - OnFailureRollback OnFailure = "ROLLBACK" - OnFailureDelete OnFailure = "DELETE" + OnFailureDoNothing OnFailure = "DO_NOTHING" + OnFailureRollback OnFailure = "ROLLBACK" + OnFailureDelete OnFailure = "DELETE" ) // Values returns all known values for OnFailure. Note that this can be expanded in @@ -296,10 +296,10 @@ type OperationStatus string // Enum values for OperationStatus const ( - OperationStatusPending OperationStatus = "PENDING" - OperationStatusIn_progress OperationStatus = "IN_PROGRESS" - OperationStatusSuccess OperationStatus = "SUCCESS" - OperationStatusFailed OperationStatus = "FAILED" + OperationStatusPending OperationStatus = "PENDING" + OperationStatusInProgress OperationStatus = "IN_PROGRESS" + OperationStatusSuccess OperationStatus = "SUCCESS" + OperationStatusFailed OperationStatus = "FAILED" ) // Values returns all known values for OperationStatus. Note that this can be @@ -318,8 +318,8 @@ type PermissionModels string // Enum values for PermissionModels const ( - PermissionModelsService_managed PermissionModels = "SERVICE_MANAGED" - PermissionModelsSelf_managed PermissionModels = "SELF_MANAGED" + PermissionModelsServiceManaged PermissionModels = "SERVICE_MANAGED" + PermissionModelsSelfManaged PermissionModels = "SELF_MANAGED" ) // Values returns all known values for PermissionModels. Note that this can be @@ -336,9 +336,9 @@ type ProvisioningType string // Enum values for ProvisioningType const ( - ProvisioningTypeNon_provisionable ProvisioningType = "NON_PROVISIONABLE" - ProvisioningTypeImmutable ProvisioningType = "IMMUTABLE" - ProvisioningTypeFully_mutable ProvisioningType = "FULLY_MUTABLE" + ProvisioningTypeNonProvisionable ProvisioningType = "NON_PROVISIONABLE" + ProvisioningTypeImmutable ProvisioningType = "IMMUTABLE" + ProvisioningTypeFullyMutable ProvisioningType = "FULLY_MUTABLE" ) // Values returns all known values for ProvisioningType. Note that this can be @@ -356,9 +356,9 @@ type RegistrationStatus string // Enum values for RegistrationStatus const ( - RegistrationStatusComplete RegistrationStatus = "COMPLETE" - RegistrationStatusIn_progress RegistrationStatus = "IN_PROGRESS" - RegistrationStatusFailed RegistrationStatus = "FAILED" + RegistrationStatusComplete RegistrationStatus = "COMPLETE" + RegistrationStatusInProgress RegistrationStatus = "IN_PROGRESS" + RegistrationStatusFailed RegistrationStatus = "FAILED" ) // Values returns all known values for RegistrationStatus. Note that this can be @@ -476,22 +476,22 @@ type ResourceStatus string // Enum values for ResourceStatus const ( - ResourceStatusCreate_in_progress ResourceStatus = "CREATE_IN_PROGRESS" - ResourceStatusCreate_failed ResourceStatus = "CREATE_FAILED" - ResourceStatusCreate_complete ResourceStatus = "CREATE_COMPLETE" - ResourceStatusDelete_in_progress ResourceStatus = "DELETE_IN_PROGRESS" - ResourceStatusDelete_failed ResourceStatus = "DELETE_FAILED" - ResourceStatusDelete_complete ResourceStatus = "DELETE_COMPLETE" - ResourceStatusDelete_skipped ResourceStatus = "DELETE_SKIPPED" - ResourceStatusUpdate_in_progress ResourceStatus = "UPDATE_IN_PROGRESS" - ResourceStatusUpdate_failed ResourceStatus = "UPDATE_FAILED" - ResourceStatusUpdate_complete ResourceStatus = "UPDATE_COMPLETE" - ResourceStatusImport_failed ResourceStatus = "IMPORT_FAILED" - ResourceStatusImport_complete ResourceStatus = "IMPORT_COMPLETE" - ResourceStatusImport_in_progress ResourceStatus = "IMPORT_IN_PROGRESS" - ResourceStatusImport_rollback_in_progress ResourceStatus = "IMPORT_ROLLBACK_IN_PROGRESS" - ResourceStatusImport_rollback_failed ResourceStatus = "IMPORT_ROLLBACK_FAILED" - ResourceStatusImport_rollback_complete ResourceStatus = "IMPORT_ROLLBACK_COMPLETE" + ResourceStatusCreateInProgress ResourceStatus = "CREATE_IN_PROGRESS" + ResourceStatusCreateFailed ResourceStatus = "CREATE_FAILED" + ResourceStatusCreateComplete ResourceStatus = "CREATE_COMPLETE" + ResourceStatusDeleteInProgress ResourceStatus = "DELETE_IN_PROGRESS" + ResourceStatusDeleteFailed ResourceStatus = "DELETE_FAILED" + ResourceStatusDeleteComplete ResourceStatus = "DELETE_COMPLETE" + ResourceStatusDeleteSkipped ResourceStatus = "DELETE_SKIPPED" + ResourceStatusUpdateInProgress ResourceStatus = "UPDATE_IN_PROGRESS" + ResourceStatusUpdateFailed ResourceStatus = "UPDATE_FAILED" + ResourceStatusUpdateComplete ResourceStatus = "UPDATE_COMPLETE" + ResourceStatusImportFailed ResourceStatus = "IMPORT_FAILED" + ResourceStatusImportComplete ResourceStatus = "IMPORT_COMPLETE" + ResourceStatusImportInProgress ResourceStatus = "IMPORT_IN_PROGRESS" + ResourceStatusImportRollbackInProgress ResourceStatus = "IMPORT_ROLLBACK_IN_PROGRESS" + ResourceStatusImportRollbackFailed ResourceStatus = "IMPORT_ROLLBACK_FAILED" + ResourceStatusImportRollbackComplete ResourceStatus = "IMPORT_ROLLBACK_COMPLETE" ) // Values returns all known values for ResourceStatus. Note that this can be @@ -522,9 +522,9 @@ type StackDriftDetectionStatus string // Enum values for StackDriftDetectionStatus const ( - StackDriftDetectionStatusDetection_in_progress StackDriftDetectionStatus = "DETECTION_IN_PROGRESS" - StackDriftDetectionStatusDetection_failed StackDriftDetectionStatus = "DETECTION_FAILED" - StackDriftDetectionStatusDetection_complete StackDriftDetectionStatus = "DETECTION_COMPLETE" + StackDriftDetectionStatusDetectionInProgress StackDriftDetectionStatus = "DETECTION_IN_PROGRESS" + StackDriftDetectionStatusDetectionFailed StackDriftDetectionStatus = "DETECTION_FAILED" + StackDriftDetectionStatusDetectionComplete StackDriftDetectionStatus = "DETECTION_COMPLETE" ) // Values returns all known values for StackDriftDetectionStatus. Note that this @@ -542,10 +542,10 @@ type StackDriftStatus string // Enum values for StackDriftStatus const ( - StackDriftStatusDrifted StackDriftStatus = "DRIFTED" - StackDriftStatusIn_sync StackDriftStatus = "IN_SYNC" - StackDriftStatusUnknown StackDriftStatus = "UNKNOWN" - StackDriftStatusNot_checked StackDriftStatus = "NOT_CHECKED" + StackDriftStatusDrifted StackDriftStatus = "DRIFTED" + StackDriftStatusInSync StackDriftStatus = "IN_SYNC" + StackDriftStatusUnknown StackDriftStatus = "UNKNOWN" + StackDriftStatusNotChecked StackDriftStatus = "NOT_CHECKED" ) // Values returns all known values for StackDriftStatus. Note that this can be @@ -590,7 +590,7 @@ type StackInstanceFilterName string // Enum values for StackInstanceFilterName const ( - StackInstanceFilterNameDetailed_status StackInstanceFilterName = "DETAILED_STATUS" + StackInstanceFilterNameDetailedStatus StackInstanceFilterName = "DETAILED_STATUS" ) // Values returns all known values for StackInstanceFilterName. Note that this can @@ -626,10 +626,10 @@ type StackResourceDriftStatus string // Enum values for StackResourceDriftStatus const ( - StackResourceDriftStatusIn_sync StackResourceDriftStatus = "IN_SYNC" - StackResourceDriftStatusModified StackResourceDriftStatus = "MODIFIED" - StackResourceDriftStatusDeleted StackResourceDriftStatus = "DELETED" - StackResourceDriftStatusNot_checked StackResourceDriftStatus = "NOT_CHECKED" + StackResourceDriftStatusInSync StackResourceDriftStatus = "IN_SYNC" + StackResourceDriftStatusModified StackResourceDriftStatus = "MODIFIED" + StackResourceDriftStatusDeleted StackResourceDriftStatus = "DELETED" + StackResourceDriftStatusNotChecked StackResourceDriftStatus = "NOT_CHECKED" ) // Values returns all known values for StackResourceDriftStatus. Note that this can @@ -648,11 +648,11 @@ type StackSetDriftDetectionStatus string // Enum values for StackSetDriftDetectionStatus const ( - StackSetDriftDetectionStatusCompleted StackSetDriftDetectionStatus = "COMPLETED" - StackSetDriftDetectionStatusFailed StackSetDriftDetectionStatus = "FAILED" - StackSetDriftDetectionStatusPartial_success StackSetDriftDetectionStatus = "PARTIAL_SUCCESS" - StackSetDriftDetectionStatusIn_progress StackSetDriftDetectionStatus = "IN_PROGRESS" - StackSetDriftDetectionStatusStopped StackSetDriftDetectionStatus = "STOPPED" + StackSetDriftDetectionStatusCompleted StackSetDriftDetectionStatus = "COMPLETED" + StackSetDriftDetectionStatusFailed StackSetDriftDetectionStatus = "FAILED" + StackSetDriftDetectionStatusPartialSuccess StackSetDriftDetectionStatus = "PARTIAL_SUCCESS" + StackSetDriftDetectionStatusInProgress StackSetDriftDetectionStatus = "IN_PROGRESS" + StackSetDriftDetectionStatusStopped StackSetDriftDetectionStatus = "STOPPED" ) // Values returns all known values for StackSetDriftDetectionStatus. Note that this @@ -672,9 +672,9 @@ type StackSetDriftStatus string // Enum values for StackSetDriftStatus const ( - StackSetDriftStatusDrifted StackSetDriftStatus = "DRIFTED" - StackSetDriftStatusIn_sync StackSetDriftStatus = "IN_SYNC" - StackSetDriftStatusNot_checked StackSetDriftStatus = "NOT_CHECKED" + StackSetDriftStatusDrifted StackSetDriftStatus = "DRIFTED" + StackSetDriftStatusInSync StackSetDriftStatus = "IN_SYNC" + StackSetDriftStatusNotChecked StackSetDriftStatus = "NOT_CHECKED" ) // Values returns all known values for StackSetDriftStatus. Note that this can be @@ -692,10 +692,10 @@ type StackSetOperationAction string // Enum values for StackSetOperationAction const ( - StackSetOperationActionCreate StackSetOperationAction = "CREATE" - StackSetOperationActionUpdate StackSetOperationAction = "UPDATE" - StackSetOperationActionDelete StackSetOperationAction = "DELETE" - StackSetOperationActionDetect_drift StackSetOperationAction = "DETECT_DRIFT" + StackSetOperationActionCreate StackSetOperationAction = "CREATE" + StackSetOperationActionUpdate StackSetOperationAction = "UPDATE" + StackSetOperationActionDelete StackSetOperationAction = "DELETE" + StackSetOperationActionDetectDrift StackSetOperationAction = "DETECT_DRIFT" ) // Values returns all known values for StackSetOperationAction. Note that this can @@ -783,28 +783,28 @@ type StackStatus string // Enum values for StackStatus const ( - StackStatusCreate_in_progress StackStatus = "CREATE_IN_PROGRESS" - StackStatusCreate_failed StackStatus = "CREATE_FAILED" - StackStatusCreate_complete StackStatus = "CREATE_COMPLETE" - StackStatusRollback_in_progress StackStatus = "ROLLBACK_IN_PROGRESS" - StackStatusRollback_failed StackStatus = "ROLLBACK_FAILED" - StackStatusRollback_complete StackStatus = "ROLLBACK_COMPLETE" - StackStatusDelete_in_progress StackStatus = "DELETE_IN_PROGRESS" - StackStatusDelete_failed StackStatus = "DELETE_FAILED" - StackStatusDelete_complete StackStatus = "DELETE_COMPLETE" - StackStatusUpdate_in_progress StackStatus = "UPDATE_IN_PROGRESS" - StackStatusUpdate_complete_cleanup_in_progress StackStatus = "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" - StackStatusUpdate_complete StackStatus = "UPDATE_COMPLETE" - StackStatusUpdate_rollback_in_progress StackStatus = "UPDATE_ROLLBACK_IN_PROGRESS" - StackStatusUpdate_rollback_failed StackStatus = "UPDATE_ROLLBACK_FAILED" - StackStatusUpdate_rollback_complete_cleanup_in_progress StackStatus = "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS" - StackStatusUpdate_rollback_complete StackStatus = "UPDATE_ROLLBACK_COMPLETE" - StackStatusReview_in_progress StackStatus = "REVIEW_IN_PROGRESS" - StackStatusImport_in_progress StackStatus = "IMPORT_IN_PROGRESS" - StackStatusImport_complete StackStatus = "IMPORT_COMPLETE" - StackStatusImport_rollback_in_progress StackStatus = "IMPORT_ROLLBACK_IN_PROGRESS" - StackStatusImport_rollback_failed StackStatus = "IMPORT_ROLLBACK_FAILED" - StackStatusImport_rollback_complete StackStatus = "IMPORT_ROLLBACK_COMPLETE" + StackStatusCreateInProgress StackStatus = "CREATE_IN_PROGRESS" + StackStatusCreateFailed StackStatus = "CREATE_FAILED" + StackStatusCreateComplete StackStatus = "CREATE_COMPLETE" + StackStatusRollbackInProgress StackStatus = "ROLLBACK_IN_PROGRESS" + StackStatusRollbackFailed StackStatus = "ROLLBACK_FAILED" + StackStatusRollbackComplete StackStatus = "ROLLBACK_COMPLETE" + StackStatusDeleteInProgress StackStatus = "DELETE_IN_PROGRESS" + StackStatusDeleteFailed StackStatus = "DELETE_FAILED" + StackStatusDeleteComplete StackStatus = "DELETE_COMPLETE" + StackStatusUpdateInProgress StackStatus = "UPDATE_IN_PROGRESS" + StackStatusUpdateCompleteCleanupInProgress StackStatus = "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + StackStatusUpdateComplete StackStatus = "UPDATE_COMPLETE" + StackStatusUpdateRollbackInProgress StackStatus = "UPDATE_ROLLBACK_IN_PROGRESS" + StackStatusUpdateRollbackFailed StackStatus = "UPDATE_ROLLBACK_FAILED" + StackStatusUpdateRollbackCompleteCleanupInProgress StackStatus = "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS" + StackStatusUpdateRollbackComplete StackStatus = "UPDATE_ROLLBACK_COMPLETE" + StackStatusReviewInProgress StackStatus = "REVIEW_IN_PROGRESS" + StackStatusImportInProgress StackStatus = "IMPORT_IN_PROGRESS" + StackStatusImportComplete StackStatus = "IMPORT_COMPLETE" + StackStatusImportRollbackInProgress StackStatus = "IMPORT_ROLLBACK_IN_PROGRESS" + StackStatusImportRollbackFailed StackStatus = "IMPORT_ROLLBACK_FAILED" + StackStatusImportRollbackComplete StackStatus = "IMPORT_ROLLBACK_COMPLETE" ) // Values returns all known values for StackStatus. Note that this can be expanded diff --git a/service/cloudformation/types/types.go b/service/cloudformation/types/types.go index 0c968a2e5df..49a1abd50d8 100644 --- a/service/cloudformation/types/types.go +++ b/service/cloudformation/types/types.go @@ -21,33 +21,33 @@ type AccountGateResult struct { // The status of the account gate function. // - // * SUCCEEDED: The account gate - // function has determined that the account and Region passes any requirements for - // a stack set operation to occur. AWS CloudFormation proceeds with the stack - // operation in that account and Region. - // - // * FAILED: The account gate function - // has determined that the account and Region does not meet the requirements for a - // stack set operation to occur. AWS CloudFormation cancels the stack set operation - // in that account and Region, and sets the stack set operation result status for - // that account and Region to FAILED. - // - // * SKIPPED: AWS CloudFormation has - // skipped calling the account gate function for this account and Region, for one - // of the following reasons: - // - // * An account gate function has not been - // specified for the account and Region. AWS CloudFormation proceeds with the stack - // set operation in this account and Region. - // - // * The - // AWSCloudFormationStackSetExecutionRole of the stack set adminstration account - // lacks permissions to invoke the function. AWS CloudFormation proceeds with the - // stack set operation in this account and Region. - // - // * Either no action is - // necessary, or no action is possible, on the stack. AWS CloudFormation skips the - // stack set operation in this account and Region. + // * SUCCEEDED: The account gate function + // has determined that the account and Region passes any requirements for a stack + // set operation to occur. AWS CloudFormation proceeds with the stack operation in + // that account and Region. + // + // * FAILED: The account gate function has determined + // that the account and Region does not meet the requirements for a stack set + // operation to occur. AWS CloudFormation cancels the stack set operation in that + // account and Region, and sets the stack set operation result status for that + // account and Region to FAILED. + // + // * SKIPPED: AWS CloudFormation has skipped calling + // the account gate function for this account and Region, for one of the following + // reasons: + // + // * An account gate function has not been specified for the account and + // Region. AWS CloudFormation proceeds with the stack set operation in this account + // and Region. + // + // * The AWSCloudFormationStackSetExecutionRole of the stack set + // adminstration account lacks permissions to invoke the function. AWS + // CloudFormation proceeds with the stack set operation in this account and + // Region. + // + // * Either no action is necessary, or no action is possible, on the + // stack. AWS CloudFormation skips the stack set operation in this account and + // Region. Status AccountGateStatus // The reason for the account gate status assigned to this account and Region for @@ -58,15 +58,15 @@ type AccountGateResult struct { // The AccountLimit data type. CloudFormation has the following limits per // account: // -// * Number of concurrent resources +// * Number of concurrent resources // -// * Number of stacks +// * Number of stacks // -// * -// Number of stack outputs +// * Number of +// stack outputs // -// For more information about these account limits, and -// other CloudFormation limits, see AWS CloudFormation Limits +// For more information about these account limits, and other +// CloudFormation limits, see AWS CloudFormation Limits // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html) // in the AWS CloudFormation User Guide. type AccountLimit struct { @@ -300,15 +300,15 @@ type PropertyDifference struct { // The type of property difference. // - // * ADD: A value has been added to a - // resource property that is an array or list data type. + // * ADD: A value has been added to a resource + // property that is an array or list data type. // - // * REMOVE: The - // property has been removed from the current resource configuration. + // * REMOVE: The property has been + // removed from the current resource configuration. // - // * - // NOT_EQUAL: The current property value differs from its expected value (as - // defined in the stack template and any values specified as template parameters). + // * NOT_EQUAL: The current + // property value differs from its expected value (as defined in the stack template + // and any values specified as template parameters). // // This member is required. DifferenceType DifferenceType @@ -379,27 +379,27 @@ type ResourceChangeDetail struct { // The group to which the CausingEntity value belongs. There are five entity // groups: // - // * ResourceReference entities are Ref intrinsic functions that refer - // to resources in the template, such as { "Ref" : "MyEC2InstanceResource" }. + // * ResourceReference entities are Ref intrinsic functions that refer to + // resources in the template, such as { "Ref" : "MyEC2InstanceResource" }. // - // - // * ParameterReference entities are Ref intrinsic functions that get template + // * + // ParameterReference entities are Ref intrinsic functions that get template // parameter values, such as { "Ref" : "MyPasswordParameter" }. // - // * + // * // ResourceAttribute entities are Fn::GetAtt intrinsic functions that get resource // attribute values, such as { "Fn::GetAtt" : [ "MyEC2InstanceResource", // "PublicDnsName" ] }. // - // * DirectModification entities are changes that are - // made directly to the template. + // * DirectModification entities are changes that are made + // directly to the template. // - // * Automatic entities are - // AWS::CloudFormation::Stack resource types, which are also known as nested - // stacks. If you made no changes to the AWS::CloudFormation::Stack resource, AWS - // CloudFormation sets the ChangeSource to Automatic because the nested stack's - // template might have changed. Changes to a nested stack's template aren't visible - // to AWS CloudFormation until you run an update on the parent stack. + // * Automatic entities are AWS::CloudFormation::Stack + // resource types, which are also known as nested stacks. If you made no changes to + // the AWS::CloudFormation::Stack resource, AWS CloudFormation sets the + // ChangeSource to Automatic because the nested stack's template might have + // changed. Changes to a nested stack's template aren't visible to AWS + // CloudFormation until you run an update on the parent stack. ChangeSource ChangeSource // Indicates whether AWS CloudFormation can determine the target value, and whether @@ -517,20 +517,20 @@ type RollbackConfiguration struct { // triggers replace any list of triggers previously specified for the stack. This // means: // - // * To use the rollback triggers previously specified for this stack, - // if any, don't specify this parameter. + // * To use the rollback triggers previously specified for this stack, if + // any, don't specify this parameter. // - // * To specify new or updated rollback + // * To specify new or updated rollback // triggers, you must specify all the triggers that you want used for this stack, // even triggers you've specifed before (for example, when creating the stack or // during a previous stack update). Any triggers that you don't include in the // updated list of triggers are no longer applied to the stack. // - // * To remove - // all currently specified triggers, specify an empty list for this parameter. + // * To remove all + // currently specified triggers, specify an empty list for this parameter. // - // If - // a specified trigger is missing, the entire stack operation fails and is rolled + // If a + // specified trigger is missing, the entire stack operation fails and is rolled // back. RollbackTriggers []*RollbackTrigger } @@ -587,10 +587,10 @@ type Stack struct { // Boolean to enable or disable rollback on stack creation failures: // - // * true: + // * true: // disable rollback // - // * false: enable rollback + // * false: enable rollback DisableRollback *bool // Information on whether a stack's actual configuration differs, or has drifted, @@ -668,18 +668,18 @@ type StackDriftInformation struct { // Status of the stack's actual configuration compared to its expected template // configuration. // - // * DRIFTED: The stack differs from its expected template + // * DRIFTED: The stack differs from its expected template // configuration. A stack is considered to have drifted if one or more of its // resources have drifted. // - // * NOT_CHECKED: AWS CloudFormation has not checked - // if the stack differs from its expected template configuration. - // - // * IN_SYNC: - // The stack's actual configuration matches its expected template configuration. + // * NOT_CHECKED: AWS CloudFormation has not checked if + // the stack differs from its expected template configuration. // + // * IN_SYNC: The + // stack's actual configuration matches its expected template configuration. // - // * UNKNOWN: This value is reserved for future use. + // * + // UNKNOWN: This value is reserved for future use. // // This member is required. StackDriftStatus StackDriftStatus @@ -698,18 +698,18 @@ type StackDriftInformationSummary struct { // Status of the stack's actual configuration compared to its expected template // configuration. // - // * DRIFTED: The stack differs from its expected template + // * DRIFTED: The stack differs from its expected template // configuration. A stack is considered to have drifted if one or more of its // resources have drifted. // - // * NOT_CHECKED: AWS CloudFormation has not checked - // if the stack differs from its expected template configuration. - // - // * IN_SYNC: - // The stack's actual configuration matches its expected template configuration. + // * NOT_CHECKED: AWS CloudFormation has not checked if + // the stack differs from its expected template configuration. // + // * IN_SYNC: The + // stack's actual configuration matches its expected template configuration. // - // * UNKNOWN: This value is reserved for future use. + // * + // UNKNOWN: This value is reserved for future use. // // This member is required. StackDriftStatus StackDriftStatus @@ -793,20 +793,20 @@ type StackInstance struct { // Status of the stack instance's actual configuration compared to the expected // template and parameter configuration of the stack set to which it belongs. // - // - // * DRIFTED: The stack differs from the expected template and parameter + // * + // DRIFTED: The stack differs from the expected template and parameter // configuration of the stack set to which it belongs. A stack instance is // considered to have drifted if one or more of the resources in the associated // stack have drifted. // - // * NOT_CHECKED: AWS CloudFormation has not checked if - // the stack instance differs from its expected stack set configuration. + // * NOT_CHECKED: AWS CloudFormation has not checked if the + // stack instance differs from its expected stack set configuration. // - // * - // IN_SYNC: The stack instance's actual configuration matches its expected stack - // set configuration. + // * IN_SYNC: + // The stack instance's actual configuration matches its expected stack set + // configuration. // - // * UNKNOWN: This value is reserved for future use. + // * UNKNOWN: This value is reserved for future use. DriftStatus StackDriftStatus // Most recent time when CloudFormation performed a drift detection operation on @@ -839,24 +839,24 @@ type StackInstance struct { // The status of the stack instance, in terms of its synchronization with its // associated stack set. // - // * INOPERABLE: A DeleteStackInstances operation has - // failed and left the stack in an unstable state. Stacks in this state are - // excluded from further UpdateStackSet operations. You might need to perform a + // * INOPERABLE: A DeleteStackInstances operation has failed + // and left the stack in an unstable state. Stacks in this state are excluded from + // further UpdateStackSet operations. You might need to perform a // DeleteStackInstances operation, with RetainStacks set to true, to delete the // stack instance, and then delete the stack manually. // - // * OUTDATED: The stack - // isn't currently up to date with the stack set because: + // * OUTDATED: The stack isn't + // currently up to date with the stack set because: // - // * The associated - // stack failed during a CreateStackSet or UpdateStackSet operation. + // * The associated stack failed + // during a CreateStackSet or UpdateStackSet operation. // - // * The - // stack was part of a CreateStackSet or UpdateStackSet operation that failed or - // was stopped before the stack was created or updated. + // * The stack was part of a + // CreateStackSet or UpdateStackSet operation that failed or was stopped before the + // stack was created or updated. // - // * CURRENT: The stack - // is currently up to date with the stack set. + // * CURRENT: The stack is currently up to date with + // the stack set. Status StackInstanceStatus // The explanation for the specific status code that is assigned to this stack @@ -871,26 +871,26 @@ type StackInstanceComprehensiveStatus struct { // cancelled. This is either because a user has stopped the stack set operation, or // because the failure tolerance of the stack set operation has been exceeded. // - // - // * FAILED: The operation in the specified account and Region failed. If the stack + // * + // FAILED: The operation in the specified account and Region failed. If the stack // set operation fails in enough accounts within a Region, the failure tolerance // for the stack set operation as a whole might be exceeded. // - // * INOPERABLE: A + // * INOPERABLE: A // DeleteStackInstances operation has failed and left the stack in an unstable // state. Stacks in this state are excluded from further UpdateStackSet operations. // You might need to perform a DeleteStackInstances operation, with RetainStacks - // set to true, to delete the stack instance, and then delete the stack manually. - // + // set to true, to delete the stack instance, and then delete the stack + // manually. // - // * PENDING: The operation in the specified account and Region has yet to start. + // * PENDING: The operation in the specified account and Region has yet + // to start. // + // * RUNNING: The operation in the specified account and Region is + // currently in progress. // - // * RUNNING: The operation in the specified account and Region is currently in - // progress. - // - // * SUCCEEDED: The operation in the specified account and Region - // completed successfully. + // * SUCCEEDED: The operation in the specified account and + // Region completed successfully. DetailedStatus StackInstanceDetailedStatus } @@ -914,20 +914,20 @@ type StackInstanceSummary struct { // Status of the stack instance's actual configuration compared to the expected // template and parameter configuration of the stack set to which it belongs. // - // - // * DRIFTED: The stack differs from the expected template and parameter + // * + // DRIFTED: The stack differs from the expected template and parameter // configuration of the stack set to which it belongs. A stack instance is // considered to have drifted if one or more of the resources in the associated // stack have drifted. // - // * NOT_CHECKED: AWS CloudFormation has not checked if - // the stack instance differs from its expected stack set configuration. + // * NOT_CHECKED: AWS CloudFormation has not checked if the + // stack instance differs from its expected stack set configuration. // - // * - // IN_SYNC: The stack instance's actual configuration matches its expected stack - // set configuration. + // * IN_SYNC: + // The stack instance's actual configuration matches its expected stack set + // configuration. // - // * UNKNOWN: This value is reserved for future use. + // * UNKNOWN: This value is reserved for future use. DriftStatus StackDriftStatus // Most recent time when CloudFormation performed a drift detection operation on @@ -956,24 +956,24 @@ type StackInstanceSummary struct { // The status of the stack instance, in terms of its synchronization with its // associated stack set. // - // * INOPERABLE: A DeleteStackInstances operation has - // failed and left the stack in an unstable state. Stacks in this state are - // excluded from further UpdateStackSet operations. You might need to perform a + // * INOPERABLE: A DeleteStackInstances operation has failed + // and left the stack in an unstable state. Stacks in this state are excluded from + // further UpdateStackSet operations. You might need to perform a // DeleteStackInstances operation, with RetainStacks set to true, to delete the // stack instance, and then delete the stack manually. // - // * OUTDATED: The stack - // isn't currently up to date with the stack set because: + // * OUTDATED: The stack isn't + // currently up to date with the stack set because: // - // * The associated - // stack failed during a CreateStackSet or UpdateStackSet operation. + // * The associated stack failed + // during a CreateStackSet or UpdateStackSet operation. // - // * The - // stack was part of a CreateStackSet or UpdateStackSet operation that failed or - // was stopped before the stack was created or updated. + // * The stack was part of a + // CreateStackSet or UpdateStackSet operation that failed or was stopped before the + // stack was created or updated. // - // * CURRENT: The stack - // is currently up to date with the stack set. + // * CURRENT: The stack is currently up to date with + // the stack set. Status StackInstanceStatus // The explanation for the specific status code assigned to this stack instance. @@ -1117,19 +1117,18 @@ type StackResourceDrift struct { // Status of the resource's actual configuration compared to its expected // configuration // - // * DELETED: The resource differs from its expected template + // * DELETED: The resource differs from its expected template // configuration because the resource has been deleted. // - // * MODIFIED: One or - // more resource properties differ from their expected values (as defined in the - // stack template and any values specified as template parameters). + // * MODIFIED: One or more + // resource properties differ from their expected values (as defined in the stack + // template and any values specified as template parameters). // - // * IN_SYNC: - // The resources's actual configuration matches its expected template - // configuration. + // * IN_SYNC: The + // resources's actual configuration matches its expected template configuration. // - // * NOT_CHECKED: AWS CloudFormation does not currently return - // this value. + // * + // NOT_CHECKED: AWS CloudFormation does not currently return this value. // // This member is required. StackResourceDriftStatus StackResourceDriftStatus @@ -1175,20 +1174,20 @@ type StackResourceDriftInformation struct { // Status of the resource's actual configuration compared to its expected // configuration // - // * DELETED: The resource differs from its expected - // configuration in that it has been deleted. + // * DELETED: The resource differs from its expected configuration + // in that it has been deleted. // - // * MODIFIED: The resource differs - // from its expected configuration. + // * MODIFIED: The resource differs from its expected + // configuration. // - // * NOT_CHECKED: AWS CloudFormation has not - // checked if the resource differs from its expected configuration. Any resources - // that do not currently support drift detection have a status of NOT_CHECKED. For - // more information, see Resources that Support Drift Detection + // * NOT_CHECKED: AWS CloudFormation has not checked if the + // resource differs from its expected configuration. Any resources that do not + // currently support drift detection have a status of NOT_CHECKED. For more + // information, see Resources that Support Drift Detection // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift-resource-list.html). // - // - // * IN_SYNC: The resources's actual configuration matches its expected + // * + // IN_SYNC: The resources's actual configuration matches its expected // configuration. // // This member is required. @@ -1206,16 +1205,16 @@ type StackResourceDriftInformationSummary struct { // Status of the resource's actual configuration compared to its expected // configuration // - // * DELETED: The resource differs from its expected - // configuration in that it has been deleted. + // * DELETED: The resource differs from its expected configuration + // in that it has been deleted. // - // * MODIFIED: The resource differs - // from its expected configuration. + // * MODIFIED: The resource differs from its expected + // configuration. // - // * NOT_CHECKED: AWS CloudFormation has not - // checked if the resource differs from its expected configuration. Any resources - // that do not currently support drift detection have a status of NOT_CHECKED. For - // more information, see Resources that Support Drift Detection + // * NOT_CHECKED: AWS CloudFormation has not checked if the + // resource differs from its expected configuration. Any resources that do not + // currently support drift detection have a status of NOT_CHECKED. For more + // information, see Resources that Support Drift Detection // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift-resource-list.html). // If you performed an ContinueUpdateRollback operation on a stack, any resources // included in ResourcesToSkip will also have a status of NOT_CHECKED. For more @@ -1224,7 +1223,7 @@ type StackResourceDriftInformationSummary struct { // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-continueupdaterollback.html) // in the AWS CloudFormation User Guide. // - // * IN_SYNC: The resources's actual + // * IN_SYNC: The resources's actual // configuration matches its expected configuration. // // This member is required. @@ -1321,15 +1320,15 @@ type StackSet struct { // Describes how the IAM roles required for stack set operations are created. // - // - // * With self-managed permissions, you must create the administrator and execution + // * + // With self-managed permissions, you must create the administrator and execution // roles required to deploy to target accounts. For more information, see Grant // Self-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html). // - // - // * With service-managed permissions, StackSets automatically creates the IAM - // roles required to deploy to accounts managed by AWS Organizations. For more + // * + // With service-managed permissions, StackSets automatically creates the IAM roles + // required to deploy to accounts managed by AWS Organizations. For more // information, see Grant Service-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-service-managed.html). PermissionModel PermissionModels @@ -1373,21 +1372,21 @@ type StackSetDriftDetectionDetails struct { // The status of the stack set drift detection operation. // - // * COMPLETED: The - // drift detection operation completed without failing on any stack instances. - // + // * COMPLETED: The drift + // detection operation completed without failing on any stack instances. // - // * FAILED: The drift detection operation exceeded the specified failure - // tolerance. + // * FAILED: + // The drift detection operation exceeded the specified failure tolerance. // - // * PARTIAL_SUCCESS: The drift detection operation completed - // without exceeding the failure tolerance for the operation. + // * + // PARTIAL_SUCCESS: The drift detection operation completed without exceeding the + // failure tolerance for the operation. // - // * IN_PROGRESS: - // The drift detection operation is currently being performed. + // * IN_PROGRESS: The drift detection + // operation is currently being performed. // - // * STOPPED: The - // user has cancelled the drift detection operation. + // * STOPPED: The user has cancelled the + // drift detection operation. DriftDetectionStatus StackSetDriftDetectionStatus // Status of the stack set's actual configuration compared to its expected template @@ -1395,17 +1394,17 @@ type StackSetDriftDetectionDetails struct { // more of its stack instances have drifted from their expected template and // parameter configuration. // - // * DRIFTED: One or more of the stack instances + // * DRIFTED: One or more of the stack instances // belonging to the stack set stack differs from the expected template and // parameter configuration. A stack instance is considered to have drifted if one // or more of the resources in the associated stack have drifted. // - // * - // NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift. + // * NOT_CHECKED: + // AWS CloudFormation has not checked the stack set for drift. // - // * - // IN_SYNC: All of the stack instances belonging to the stack set stack match from - // the expected template and parameter configuration. + // * IN_SYNC: All of + // the stack instances belonging to the stack set stack match from the expected + // template and parameter configuration. DriftStatus StackSetDriftStatus // The number of stack instances that have drifted from the expected template and @@ -1432,16 +1431,16 @@ type StackSetDriftDetectionDetails struct { // The total number of stack instances belonging to this stack set. The total // number of stack instances is equal to the total of: // - // * Stack instances that + // * Stack instances that // match the stack set configuration. // - // * Stack instances that have drifted from - // the stack set configuration. + // * Stack instances that have drifted from the + // stack set configuration. // - // * Stack instances where the drift detection - // operation has failed. + // * Stack instances where the drift detection operation + // has failed. // - // * Stack instances currently being checked for drift. + // * Stack instances currently being checked for drift. TotalStackInstancesCount *int32 } @@ -1509,7 +1508,7 @@ type StackSetOperation struct { // The status of the operation. // - // * FAILED: The operation exceeded the specified + // * FAILED: The operation exceeded the specified // failure tolerance. The failure tolerance value that you've set for an operation // is applied for each Region during stack create and update operations. If the // number of failed stacks within a Region exceeds the failure tolerance, the @@ -1517,24 +1516,24 @@ type StackSetOperation struct { // status of the operation as a whole to FAILED, and AWS CloudFormation cancels the // operation in any remaining Regions. // - // * QUEUED: [Service-managed permissions] - // For automatic deployments that require a sequence of operations, the operation - // is queued to be performed. For more information, see the stack set operation - // status codes + // * QUEUED: [Service-managed permissions] For + // automatic deployments that require a sequence of operations, the operation is + // queued to be performed. For more information, see the stack set operation status + // codes // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-status-codes) // in the AWS CloudFormation User Guide. // - // * RUNNING: The operation is currently + // * RUNNING: The operation is currently // being performed. // - // * STOPPED: The user has cancelled the operation. + // * STOPPED: The user has cancelled the operation. // - // * - // STOPPING: The operation is in the process of stopping, at user request. + // * STOPPING: + // The operation is in the process of stopping, at user request. // - // * - // SUCCEEDED: The operation completed creating or updating all the specified stacks - // without exceeding the failure tolerance for the operation. + // * SUCCEEDED: The + // operation completed creating or updating all the specified stacks without + // exceeding the failure tolerance for the operation. Status StackSetOperationStatus } @@ -1607,24 +1606,24 @@ type StackSetOperationResultSummary struct { // The result status of the stack set operation for the given account in the given // Region. // - // * CANCELLED: The operation in the specified account and Region has - // been cancelled. This is either because a user has stopped the stack set - // operation, or because the failure tolerance of the stack set operation has been - // exceeded. - // - // * FAILED: The operation in the specified account and Region - // failed. If the stack set operation fails in enough accounts within a Region, the - // failure tolerance for the stack set operation as a whole might be exceeded. + // * CANCELLED: The operation in the specified account and Region has been + // cancelled. This is either because a user has stopped the stack set operation, or + // because the failure tolerance of the stack set operation has been exceeded. // + // * + // FAILED: The operation in the specified account and Region failed. If the stack + // set operation fails in enough accounts within a Region, the failure tolerance + // for the stack set operation as a whole might be exceeded. // - // * RUNNING: The operation in the specified account and Region is currently in - // progress. + // * RUNNING: The + // operation in the specified account and Region is currently in progress. // - // * PENDING: The operation in the specified account and Region has - // yet to start. + // * + // PENDING: The operation in the specified account and Region has yet to start. // - // * SUCCEEDED: The operation in the specified account and - // Region completed successfully. + // * + // SUCCEEDED: The operation in the specified account and Region completed + // successfully. Status StackSetOperationResultStatus // The reason for the assigned result status. @@ -1657,7 +1656,7 @@ type StackSetOperationSummary struct { // The overall status of the operation. // - // * FAILED: The operation exceeded the + // * FAILED: The operation exceeded the // specified failure tolerance. The failure tolerance value that you've set for an // operation is applied for each Region during stack create and update operations. // If the number of failed stacks within a Region exceeds the failure tolerance, @@ -1665,24 +1664,24 @@ type StackSetOperationSummary struct { // the status of the operation as a whole to FAILED, and AWS CloudFormation cancels // the operation in any remaining Regions. // - // * QUEUED: [Service-managed - // permissions] For automatic deployments that require a sequence of operations, - // the operation is queued to be performed. For more information, see the stack set - // operation status codes + // * QUEUED: [Service-managed permissions] + // For automatic deployments that require a sequence of operations, the operation + // is queued to be performed. For more information, see the stack set operation + // status codes // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-status-codes) // in the AWS CloudFormation User Guide. // - // * RUNNING: The operation is currently + // * RUNNING: The operation is currently // being performed. // - // * STOPPED: The user has cancelled the operation. + // * STOPPED: The user has cancelled the operation. // - // * - // STOPPING: The operation is in the process of stopping, at user request. + // * STOPPING: + // The operation is in the process of stopping, at user request. // - // * - // SUCCEEDED: The operation completed creating or updating all the specified stacks - // without exceeding the failure tolerance for the operation. + // * SUCCEEDED: The + // operation completed creating or updating all the specified stacks without + // exceeding the failure tolerance for the operation. Status StackSetOperationStatus } @@ -1703,20 +1702,20 @@ type StackSetSummary struct { // more of its stack instances have drifted from their expected template and // parameter configuration. // - // * DRIFTED: One or more of the stack instances + // * DRIFTED: One or more of the stack instances // belonging to the stack set stack differs from the expected template and // parameter configuration. A stack instance is considered to have drifted if one // or more of the resources in the associated stack have drifted. // - // * - // NOT_CHECKED: AWS CloudFormation has not checked the stack set for drift. + // * NOT_CHECKED: + // AWS CloudFormation has not checked the stack set for drift. // - // * - // IN_SYNC: All of the stack instances belonging to the stack set stack match from - // the expected template and parameter configuration. + // * IN_SYNC: All of + // the stack instances belonging to the stack set stack match from the expected + // template and parameter configuration. // - // * UNKNOWN: This value is - // reserved for future use. + // * UNKNOWN: This value is reserved for + // future use. DriftStatus StackDriftStatus // Most recent time when CloudFormation performed a drift detection operation on @@ -1726,15 +1725,15 @@ type StackSetSummary struct { // Describes how the IAM roles required for stack set operations are created. // - // - // * With self-managed permissions, you must create the administrator and execution + // * + // With self-managed permissions, you must create the administrator and execution // roles required to deploy to target accounts. For more information, see Grant // Self-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html). // - // - // * With service-managed permissions, StackSets automatically creates the IAM - // roles required to deploy to accounts managed by AWS Organizations. For more + // * + // With service-managed permissions, StackSets automatically creates the IAM roles + // required to deploy to accounts managed by AWS Organizations. For more // information, see Grant Service-Managed Stack Set Permissions // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-service-managed.html). PermissionModel PermissionModels diff --git a/service/cloudfront/api_op_CreateCachePolicy.go b/service/cloudfront/api_op_CreateCachePolicy.go index 42d0854de31..558f1e40d7e 100644 --- a/service/cloudfront/api_op_CreateCachePolicy.go +++ b/service/cloudfront/api_op_CreateCachePolicy.go @@ -15,12 +15,12 @@ import ( // one or more cache behaviors. When it’s attached to a cache behavior, the cache // policy determines the following: // -// * The values that CloudFront includes in -// the cache key. These values can include HTTP headers, cookies, and URL query +// * The values that CloudFront includes in the +// cache key. These values can include HTTP headers, cookies, and URL query // strings. CloudFront uses the cache key to find an object in its cache that it // can return to the viewer. // -// * The default, minimum, and maximum time to live +// * The default, minimum, and maximum time to live // (TTL) values that you want objects to stay in the CloudFront cache. // // The diff --git a/service/cloudfront/api_op_CreateOriginRequestPolicy.go b/service/cloudfront/api_op_CreateOriginRequestPolicy.go index ca524bdcbd9..3342ccaeac9 100644 --- a/service/cloudfront/api_op_CreateOriginRequestPolicy.go +++ b/service/cloudfront/api_op_CreateOriginRequestPolicy.go @@ -17,23 +17,23 @@ import ( // includes in requests that it sends to the origin. Each request that CloudFront // sends to the origin includes the following: // -// * The request body and the URL -// path (without the domain name) from the viewer request. +// * The request body and the URL path +// (without the domain name) from the viewer request. // -// * The headers that +// * The headers that // CloudFront automatically includes in every origin request, including Host, // User-Agent, and X-Amz-Cf-Id. // -// * All HTTP headers, cookies, and URL query -// strings that are specified in the cache policy or the origin request policy. -// These can include items from the viewer request and, in the case of headers, -// additional ones that are added by CloudFront. +// * All HTTP headers, cookies, and URL query strings +// that are specified in the cache policy or the origin request policy. These can +// include items from the viewer request and, in the case of headers, additional +// ones that are added by CloudFront. // -// CloudFront sends a request when -// it can’t find a valid object in its cache that matches the request. If you want -// to send values to the origin and also include them in the cache key, use -// CachePolicy. For more information about origin request policies, see Controlling -// origin requests +// CloudFront sends a request when it can’t +// find a valid object in its cache that matches the request. If you want to send +// values to the origin and also include them in the cache key, use CachePolicy. +// For more information about origin request policies, see Controlling origin +// requests // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) // in the Amazon CloudFront Developer Guide. func (c *Client) CreateOriginRequestPolicy(ctx context.Context, params *CreateOriginRequestPolicyInput, optFns ...func(*Options)) (*CreateOriginRequestPolicyOutput, error) { diff --git a/service/cloudfront/api_op_DeleteDistribution.go b/service/cloudfront/api_op_DeleteDistribution.go index b70b832293b..b98800e202e 100644 --- a/service/cloudfront/api_op_DeleteDistribution.go +++ b/service/cloudfront/api_op_DeleteDistribution.go @@ -30,40 +30,40 @@ func (c *Client) DeleteDistribution(ctx context.Context, params *DeleteDistribut // CloudFront API, perform the following steps. To delete a web distribution using // the CloudFront API: // -// * Disable the web distribution +// * Disable the web distribution // -// * Submit a GET -// Distribution Config request to get the current configuration and the Etag header -// for the distribution. +// * Submit a GET Distribution +// Config request to get the current configuration and the Etag header for the +// distribution. // -// * Update the XML document that was returned in the -// response to your GET Distribution Config request to change the value of Enabled -// to false. +// * Update the XML document that was returned in the response to +// your GET Distribution Config request to change the value of Enabled to false. // -// * Submit a PUT Distribution Config request to update the -// configuration for your distribution. In the request body, include the XML -// document that you updated in Step 3. Set the value of the HTTP If-Match header -// to the value of the ETag header that CloudFront returned when you submitted the -// GET Distribution Config request in Step 2. +// * +// Submit a PUT Distribution Config request to update the configuration for your +// distribution. In the request body, include the XML document that you updated in +// Step 3. Set the value of the HTTP If-Match header to the value of the ETag +// header that CloudFront returned when you submitted the GET Distribution Config +// request in Step 2. // -// * Review the response to the PUT -// Distribution Config request to confirm that the distribution was successfully -// disabled. +// * Review the response to the PUT Distribution Config request +// to confirm that the distribution was successfully disabled. // -// * Submit a GET Distribution request to confirm that your changes -// have propagated. When propagation is complete, the value of Status is -// Deployed. +// * Submit a GET +// Distribution request to confirm that your changes have propagated. When +// propagation is complete, the value of Status is Deployed. // -// * Submit a DELETE Distribution request. Set the value of the HTTP -// If-Match header to the value of the ETag header that CloudFront returned when -// you submitted the GET Distribution Config request in Step 6. +// * Submit a DELETE +// Distribution request. Set the value of the HTTP If-Match header to the value of +// the ETag header that CloudFront returned when you submitted the GET Distribution +// Config request in Step 6. // -// * Review the -// response to your DELETE Distribution request to confirm that the distribution -// was successfully deleted. +// * Review the response to your DELETE Distribution +// request to confirm that the distribution was successfully deleted. // -// For information about deleting a distribution using -// the CloudFront console, see Deleting a Distribution +// For +// information about deleting a distribution using the CloudFront console, see +// Deleting a Distribution // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) // in the Amazon CloudFront Developer Guide. type DeleteDistributionInput struct { diff --git a/service/cloudfront/api_op_DeleteStreamingDistribution.go b/service/cloudfront/api_op_DeleteStreamingDistribution.go index 7c67560fea4..6ff4491b5cd 100644 --- a/service/cloudfront/api_op_DeleteStreamingDistribution.go +++ b/service/cloudfront/api_op_DeleteStreamingDistribution.go @@ -14,42 +14,41 @@ import ( // CloudFront API, perform the following steps. To delete an RTMP distribution // using the CloudFront API: // -// * Disable the RTMP distribution. +// * Disable the RTMP distribution. // -// * Submit a -// GET Streaming Distribution Config request to get the current configuration and -// the Etag header for the distribution. +// * Submit a GET +// Streaming Distribution Config request to get the current configuration and the +// Etag header for the distribution. // -// * Update the XML document that was -// returned in the response to your GET Streaming Distribution Config request to -// change the value of Enabled to false. +// * Update the XML document that was returned +// in the response to your GET Streaming Distribution Config request to change the +// value of Enabled to false. // -// * Submit a PUT Streaming Distribution -// Config request to update the configuration for your distribution. In the request -// body, include the XML document that you updated in Step 3. Then set the value of -// the HTTP If-Match header to the value of the ETag header that CloudFront -// returned when you submitted the GET Streaming Distribution Config request in -// Step 2. +// * Submit a PUT Streaming Distribution Config request +// to update the configuration for your distribution. In the request body, include +// the XML document that you updated in Step 3. Then set the value of the HTTP +// If-Match header to the value of the ETag header that CloudFront returned when +// you submitted the GET Streaming Distribution Config request in Step 2. // -// * Review the response to the PUT Streaming Distribution Config -// request to confirm that the distribution was successfully disabled. +// * Review +// the response to the PUT Streaming Distribution Config request to confirm that +// the distribution was successfully disabled. // -// * -// Submit a GET Streaming Distribution Config request to confirm that your changes -// have propagated. When propagation is complete, the value of Status is -// Deployed. +// * Submit a GET Streaming +// Distribution Config request to confirm that your changes have propagated. When +// propagation is complete, the value of Status is Deployed. // -// * Submit a DELETE Streaming Distribution request. Set the value -// of the HTTP If-Match header to the value of the ETag header that CloudFront -// returned when you submitted the GET Streaming Distribution Config request in -// Step 2. +// * Submit a DELETE +// Streaming Distribution request. Set the value of the HTTP If-Match header to the +// value of the ETag header that CloudFront returned when you submitted the GET +// Streaming Distribution Config request in Step 2. // -// * Review the response to your DELETE Streaming Distribution request -// to confirm that the distribution was successfully deleted. +// * Review the response to your +// DELETE Streaming Distribution request to confirm that the distribution was +// successfully deleted. // -// For information -// about deleting a distribution using the CloudFront console, see Deleting a -// Distribution +// For information about deleting a distribution using the +// CloudFront console, see Deleting a Distribution // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) // in the Amazon CloudFront Developer Guide. func (c *Client) DeleteStreamingDistribution(ctx context.Context, params *DeleteStreamingDistributionInput, optFns ...func(*Options)) (*DeleteStreamingDistributionOutput, error) { diff --git a/service/cloudfront/api_op_GetCachePolicy.go b/service/cloudfront/api_op_GetCachePolicy.go index 8f26fa0d92c..da89afa5d88 100644 --- a/service/cloudfront/api_op_GetCachePolicy.go +++ b/service/cloudfront/api_op_GetCachePolicy.go @@ -13,13 +13,13 @@ import ( // Gets a cache policy, including the following metadata: // -// * The policy’s +// * The policy’s // identifier. // -// * The date and time when the policy was last modified. +// * The date and time when the policy was last modified. // -// To get -// a cache policy, you must provide the policy’s identifier. If the cache policy is +// To get a +// cache policy, you must provide the policy’s identifier. If the cache policy is // attached to a distribution’s cache behavior, you can get the policy’s identifier // using ListDistributions or GetDistribution. If the cache policy is not attached // to a cache behavior, you can get the identifier using ListCachePolicies. diff --git a/service/cloudfront/api_op_GetOriginRequestPolicy.go b/service/cloudfront/api_op_GetOriginRequestPolicy.go index f1d54cfc002..f29bbbda802 100644 --- a/service/cloudfront/api_op_GetOriginRequestPolicy.go +++ b/service/cloudfront/api_op_GetOriginRequestPolicy.go @@ -13,17 +13,17 @@ import ( // Gets an origin request policy, including the following metadata: // -// * The -// policy’s identifier. +// * The policy’s +// identifier. // -// * The date and time when the policy was last -// modified. +// * The date and time when the policy was last modified. // -// To get an origin request policy, you must provide the policy’s -// identifier. If the origin request policy is attached to a distribution’s cache -// behavior, you can get the policy’s identifier using ListDistributions or -// GetDistribution. If the origin request policy is not attached to a cache -// behavior, you can get the identifier using ListOriginRequestPolicies. +// To get an +// origin request policy, you must provide the policy’s identifier. If the origin +// request policy is attached to a distribution’s cache behavior, you can get the +// policy’s identifier using ListDistributions or GetDistribution. If the origin +// request policy is not attached to a cache behavior, you can get the identifier +// using ListOriginRequestPolicies. func (c *Client) GetOriginRequestPolicy(ctx context.Context, params *GetOriginRequestPolicyInput, optFns ...func(*Options)) (*GetOriginRequestPolicyOutput, error) { if params == nil { params = &GetOriginRequestPolicyInput{} diff --git a/service/cloudfront/api_op_ListCachePolicies.go b/service/cloudfront/api_op_ListCachePolicies.go index 980175e5601..f8ba22b56da 100644 --- a/service/cloudfront/api_op_ListCachePolicies.go +++ b/service/cloudfront/api_op_ListCachePolicies.go @@ -47,10 +47,10 @@ type ListCachePoliciesInput struct { // A filter to return only the specified kinds of cache policies. Valid values // are: // - // * managed – Returns only the managed policies created by AWS. + // * managed – Returns only the managed policies created by AWS. // - // * - // custom – Returns only the custom policies created in your AWS account. + // * custom – + // Returns only the custom policies created in your AWS account. Type types.CachePolicyType } diff --git a/service/cloudfront/api_op_ListOriginRequestPolicies.go b/service/cloudfront/api_op_ListOriginRequestPolicies.go index 8a6831ba008..b5c682ebb15 100644 --- a/service/cloudfront/api_op_ListOriginRequestPolicies.go +++ b/service/cloudfront/api_op_ListOriginRequestPolicies.go @@ -48,10 +48,10 @@ type ListOriginRequestPoliciesInput struct { // A filter to return only the specified kinds of origin request policies. Valid // values are: // - // * managed – Returns only the managed policies created by AWS. + // * managed – Returns only the managed policies created by AWS. // - // - // * custom – Returns only the custom policies created in your AWS account. + // * + // custom – Returns only the custom policies created in your AWS account. Type types.OriginRequestPolicyType } diff --git a/service/cloudfront/api_op_UpdateCachePolicy.go b/service/cloudfront/api_op_UpdateCachePolicy.go index c07bfc1442f..b2cfca36620 100644 --- a/service/cloudfront/api_op_UpdateCachePolicy.go +++ b/service/cloudfront/api_op_UpdateCachePolicy.go @@ -16,15 +16,15 @@ import ( // request. You cannot update some fields independent of others. To update a cache // policy configuration: // -// * Use GetCachePolicyConfig to get the current +// * Use GetCachePolicyConfig to get the current // configuration. // -// * Locally modify the fields in the cache policy -// configuration that you want to update. +// * Locally modify the fields in the cache policy configuration +// that you want to update. // -// * Call UpdateCachePolicy by -// providing the entire cache policy configuration, including the fields that you -// modified and those that you didn’t. +// * Call UpdateCachePolicy by providing the entire cache +// policy configuration, including the fields that you modified and those that you +// didn’t. func (c *Client) UpdateCachePolicy(ctx context.Context, params *UpdateCachePolicyInput, optFns ...func(*Options)) (*UpdateCachePolicyOutput, error) { if params == nil { params = &UpdateCachePolicyInput{} diff --git a/service/cloudfront/api_op_UpdateDistribution.go b/service/cloudfront/api_op_UpdateDistribution.go index 861e8eb574e..885986aa6b1 100644 --- a/service/cloudfront/api_op_UpdateDistribution.go +++ b/service/cloudfront/api_op_UpdateDistribution.go @@ -27,68 +27,67 @@ import ( // in the Amazon CloudFront Developer Guide. To update a web distribution using the // CloudFront API // -// * Submit a GetDistributionConfig +// * Submit a GetDistributionConfig // (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistributionConfig.html) // request to get the current configuration and an Etag header // -// for the +// for the // distribution. If you update the distribution again, you must get a new Etag // header. // -// * Update the XML document that was returned in the response to your +// * Update the XML document that was returned in the response to your // GetDistributionConfig request to include your changes. When you edit the XML // file, be aware of the following: // -// * You must strip out the ETag -// parameter that is returned. +// * You must strip out the ETag parameter that +// is returned. // -// * Additional fields are required when you -// update a distribution. There may be fields included in the XML file for features -// that you haven't configured for your distribution. This is expected and required -// to successfully update the distribution. -// -// * You can't change the value -// of CallerReference. If you try to change this value, CloudFront returns an +// * Additional fields are required when you update a distribution. +// There may be fields included in the XML file for features that you haven't +// configured for your distribution. This is expected and required to successfully +// update the distribution. // +// * You can't change the value of CallerReference. If +// you try to change this value, CloudFront returns an // // IllegalUpdate error. // -// * The new configuration replaces the existing -// configuration; the values that you specify in an UpdateDistribution request are -// not merged into your existing configuration. When you add, delete, or replace -// values in an element that allows multiple values (for example, CNAME), you must -// specify all of the values that you want to appear in the updated distribution. -// In addition, +// * The +// new configuration replaces the existing configuration; the values that you +// specify in an UpdateDistribution request are not merged into your existing +// configuration. When you add, delete, or replace values in an element that allows +// multiple values (for example, CNAME), you must specify all of the values that +// you want to appear in the updated distribution. In addition, // -// you must update the corresponding Quantity element. +// you must update +// the corresponding Quantity element. // -// * -// Submit an UpdateDistribution request to update the configuration for your -// distribution: +// * Submit an UpdateDistribution request to +// update the configuration for your distribution: // -// * In the request body, include the XML document that you -// updated in Step 2. The request body must include an +// * In the request body, include +// the XML document that you updated in Step 2. The request body must include +// an // -// XML document with a -// DistributionConfig element. +// XML document with a DistributionConfig element. // -// * Set the value of the HTTP If-Match header -// to the value of the ETag header that CloudFront returned +// * Set the value of the HTTP +// If-Match header to the value of the ETag header that CloudFront returned // -// when you -// submitted the GetDistributionConfig request in Step 1. +// when +// you submitted the GetDistributionConfig request in Step 1. // -// * Review the +// * Review the // response to the UpdateDistribution request to confirm that the configuration // was // -// successfully updated. +// successfully updated. // -// * Optional: Submit a GetDistribution +// * Optional: Submit a GetDistribution // (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistribution.html) // request to confirm that your changes have propagated. // -// When propagation is +// When propagation is // complete, the value of Status is Deployed. func (c *Client) UpdateDistribution(ctx context.Context, params *UpdateDistributionInput, optFns ...func(*Options)) (*UpdateDistributionOutput, error) { if params == nil { diff --git a/service/cloudfront/api_op_UpdateKeyGroup.go b/service/cloudfront/api_op_UpdateKeyGroup.go index b0b4564071f..22a00b0c690 100644 --- a/service/cloudfront/api_op_UpdateKeyGroup.go +++ b/service/cloudfront/api_op_UpdateKeyGroup.go @@ -15,15 +15,15 @@ import ( // with the values provided in the request. You cannot update some fields // independent of others. To update a key group: // -// * Get the current key group -// with GetKeyGroup or GetKeyGroupConfig. +// * Get the current key group with +// GetKeyGroup or GetKeyGroupConfig. // -// * Locally modify the fields in the -// key group that you want to update. For example, add or remove public key IDs. +// * Locally modify the fields in the key group +// that you want to update. For example, add or remove public key IDs. // -// -// * Call UpdateKeyGroup with the entire key group object, including the fields -// that you modified and those that you didn’t. +// * Call +// UpdateKeyGroup with the entire key group object, including the fields that you +// modified and those that you didn’t. func (c *Client) UpdateKeyGroup(ctx context.Context, params *UpdateKeyGroupInput, optFns ...func(*Options)) (*UpdateKeyGroupOutput, error) { if params == nil { params = &UpdateKeyGroupInput{} diff --git a/service/cloudfront/api_op_UpdateOriginRequestPolicy.go b/service/cloudfront/api_op_UpdateOriginRequestPolicy.go index 164be2bf3b2..1302ce3cbf7 100644 --- a/service/cloudfront/api_op_UpdateOriginRequestPolicy.go +++ b/service/cloudfront/api_op_UpdateOriginRequestPolicy.go @@ -16,16 +16,16 @@ import ( // provided in the request. You cannot update some fields independent of others. To // update an origin request policy configuration: // -// * Use +// * Use // GetOriginRequestPolicyConfig to get the current configuration. // -// * Locally -// modify the fields in the origin request policy configuration that you want to +// * Locally modify +// the fields in the origin request policy configuration that you want to // update. // -// * Call UpdateOriginRequestPolicy by providing the entire origin -// request policy configuration, including the fields that you modified and those -// that you didn’t. +// * Call UpdateOriginRequestPolicy by providing the entire origin request +// policy configuration, including the fields that you modified and those that you +// didn’t. func (c *Client) UpdateOriginRequestPolicy(ctx context.Context, params *UpdateOriginRequestPolicyInput, optFns ...func(*Options)) (*UpdateOriginRequestPolicyOutput, error) { if params == nil { params = &UpdateOriginRequestPolicyInput{} diff --git a/service/cloudfront/api_op_UpdateRealtimeLogConfig.go b/service/cloudfront/api_op_UpdateRealtimeLogConfig.go index 39f159fb2d1..64903cc2b85 100644 --- a/service/cloudfront/api_op_UpdateRealtimeLogConfig.go +++ b/service/cloudfront/api_op_UpdateRealtimeLogConfig.go @@ -16,13 +16,13 @@ import ( // request. You cannot update some parameters independent of others. To update a // real-time log configuration: // -// * Call GetRealtimeLogConfig to get the current +// * Call GetRealtimeLogConfig to get the current // real-time log configuration. // -// * Locally modify the parameters in the -// real-time log configuration that you want to update. +// * Locally modify the parameters in the real-time +// log configuration that you want to update. // -// * Call this API +// * Call this API // (UpdateRealtimeLogConfig) by providing the entire real-time log configuration, // including the parameters that you modified and those that you didn’t. // diff --git a/service/cloudfront/types/enums.go b/service/cloudfront/types/enums.go index f768b98ccc7..95137e7a8a8 100644 --- a/service/cloudfront/types/enums.go +++ b/service/cloudfront/types/enums.go @@ -251,12 +251,12 @@ type MinimumProtocolVersion string // Enum values for MinimumProtocolVersion const ( - MinimumProtocolVersionSslv3 MinimumProtocolVersion = "SSLv3" - MinimumProtocolVersionTlsv1 MinimumProtocolVersion = "TLSv1" - MinimumProtocolVersionTlsv1_2016 MinimumProtocolVersion = "TLSv1_2016" - MinimumProtocolVersionTlsv11_2016 MinimumProtocolVersion = "TLSv1.1_2016" - MinimumProtocolVersionTlsv12_2018 MinimumProtocolVersion = "TLSv1.2_2018" - MinimumProtocolVersionTlsv12_2019 MinimumProtocolVersion = "TLSv1.2_2019" + MinimumProtocolVersionSslv3 MinimumProtocolVersion = "SSLv3" + MinimumProtocolVersionTlsv1 MinimumProtocolVersion = "TLSv1" + MinimumProtocolVersionTlsv12016 MinimumProtocolVersion = "TLSv1_2016" + MinimumProtocolVersionTlsv112016 MinimumProtocolVersion = "TLSv1.1_2016" + MinimumProtocolVersionTlsv122018 MinimumProtocolVersion = "TLSv1.2_2018" + MinimumProtocolVersionTlsv122019 MinimumProtocolVersion = "TLSv1.2_2019" ) // Values returns all known values for MinimumProtocolVersion. Note that this can @@ -380,9 +380,9 @@ type PriceClass string // Enum values for PriceClass const ( - PriceClassPriceclass_100 PriceClass = "PriceClass_100" - PriceClassPriceclass_200 PriceClass = "PriceClass_200" - PriceClassPriceclass_all PriceClass = "PriceClass_All" + PriceClassPriceclass100 PriceClass = "PriceClass_100" + PriceClassPriceclass200 PriceClass = "PriceClass_200" + PriceClassPriceclassAll PriceClass = "PriceClass_All" ) // Values returns all known values for PriceClass. Note that this can be expanded diff --git a/service/cloudfront/types/types.go b/service/cloudfront/types/types.go index bdc2e9dbb48..e7b0b4c481f 100644 --- a/service/cloudfront/types/types.go +++ b/service/cloudfront/types/types.go @@ -82,21 +82,20 @@ type AliasICPRecordal struct { // ICPRecordalStatus is set to APPROVED for all CNAMEs (aliases) in regions outside // of China. The status values returned are the following: // - // * APPROVED - // indicates that the associated CNAME has a valid ICP recordal number. Multiple - // CNAMEs can be associated with a distribution, and CNAMEs can correspond to - // different ICP recordals. To be marked as APPROVED, that is, valid to use with - // China region, a CNAME must have one ICP recordal number associated with it. - // - // - // * SUSPENDED indicates that the associated CNAME does not have a valid ICP - // recordal number. - // - // * PENDING indicates that CloudFront can't determine the - // ICP recordal status of the CNAME associated with the distribution because there - // was an error in trying to determine the status. You can try again to see if the - // error is resolved in which case CloudFront returns an APPROVED or SUSPENDED - // status. + // * APPROVED indicates + // that the associated CNAME has a valid ICP recordal number. Multiple CNAMEs can + // be associated with a distribution, and CNAMEs can correspond to different ICP + // recordals. To be marked as APPROVED, that is, valid to use with China region, a + // CNAME must have one ICP recordal number associated with it. + // + // * SUSPENDED + // indicates that the associated CNAME does not have a valid ICP recordal + // number. + // + // * PENDING indicates that CloudFront can't determine the ICP recordal + // status of the CNAME associated with the distribution because there was an error + // in trying to determine the status. You can try again to see if the error is + // resolved in which case CloudFront returns an APPROVED or SUSPENDED status. ICPRecordalStatus ICPRecordalStatus } @@ -104,19 +103,19 @@ type AliasICPRecordal struct { // forwards to your Amazon S3 bucket or your custom origin. There are three // choices: // -// * CloudFront forwards only GET and HEAD requests. +// * CloudFront forwards only GET and HEAD requests. // -// * -// CloudFront forwards only GET, HEAD, and OPTIONS requests. +// * CloudFront +// forwards only GET, HEAD, and OPTIONS requests. // -// * CloudFront -// forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. +// * CloudFront forwards GET, HEAD, +// OPTIONS, PUT, PATCH, POST, and DELETE requests. // -// If you pick -// the third choice, you may need to restrict access to your Amazon S3 bucket or to -// your custom origin so users can't perform operations that you don't want them -// to. For example, you might not want users to have permissions to delete objects -// from your origin. +// If you pick the third choice, +// you may need to restrict access to your Amazon S3 bucket or to your custom +// origin so users can't perform operations that you don't want them to. For +// example, you might not want users to have permissions to delete objects from +// your origin. type AllowedMethods struct { // A complex type that contains the HTTP methods that you want CloudFront to @@ -135,14 +134,14 @@ type AllowedMethods struct { // A complex type that controls whether CloudFront caches the response to requests // using the specified HTTP methods. There are two choices: // - // * CloudFront - // caches responses to GET and HEAD requests. + // * CloudFront caches + // responses to GET and HEAD requests. // - // * CloudFront caches responses to - // GET, HEAD, and OPTIONS requests. + // * CloudFront caches responses to GET, HEAD, + // and OPTIONS requests. // - // If you pick the second choice for your Amazon - // S3 Origin, you may need to forward Access-Control-Request-Method, + // If you pick the second choice for your Amazon S3 Origin, + // you may need to forward Access-Control-Request-Method, // Access-Control-Request-Headers, and Origin headers for the responses to be // cached correctly. CachedMethods *CachedMethods @@ -195,19 +194,19 @@ type CacheBehavior struct { // TargetOriginId when a request matches the path pattern in PathPattern. You can // specify the following options: // - // * allow-all: Viewers can use HTTP or - // HTTPS. + // * allow-all: Viewers can use HTTP or HTTPS. // - // * redirect-to-https: If a viewer submits an HTTP request, CloudFront - // returns an HTTP status code of 301 (Moved Permanently) to the viewer along with - // the HTTPS URL. The viewer then resubmits the request using the new URL. + // * + // redirect-to-https: If a viewer submits an HTTP request, CloudFront returns an + // HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS + // URL. The viewer then resubmits the request using the new URL. // - // * - // https-only: If a viewer sends an HTTP request, CloudFront returns an HTTP status - // code of 403 (Forbidden). + // * https-only: If + // a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 + // (Forbidden). // - // For more information about requiring the HTTPS - // protocol, see Requiring HTTPS Between Viewers and CloudFront + // For more information about requiring the HTTPS protocol, see + // Requiring HTTPS Between Viewers and CloudFront // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html) // in the Amazon CloudFront Developer Guide. The only way to guarantee that viewers // retrieve an object that was fetched from the origin using HTTPS is never to use @@ -226,19 +225,19 @@ type CacheBehavior struct { // forwards to your Amazon S3 bucket or your custom origin. There are three // choices: // - // * CloudFront forwards only GET and HEAD requests. + // * CloudFront forwards only GET and HEAD requests. // - // * - // CloudFront forwards only GET, HEAD, and OPTIONS requests. + // * CloudFront + // forwards only GET, HEAD, and OPTIONS requests. // - // * CloudFront - // forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. + // * CloudFront forwards GET, HEAD, + // OPTIONS, PUT, PATCH, POST, and DELETE requests. // - // If you pick - // the third choice, you may need to restrict access to your Amazon S3 bucket or to - // your custom origin so users can't perform operations that you don't want them - // to. For example, you might not want users to have permissions to delete objects - // from your origin. + // If you pick the third choice, + // you may need to restrict access to your Amazon S3 bucket or to your custom + // origin so users can't perform operations that you don't want them to. For + // example, you might not want users to have permissions to delete objects from + // your origin. AllowedMethods *AllowedMethods // The unique identifier of the cache policy that is attached to this cache @@ -393,14 +392,14 @@ type CacheBehaviors struct { // A complex type that controls whether CloudFront caches the response to requests // using the specified HTTP methods. There are two choices: // -// * CloudFront -// caches responses to GET and HEAD requests. +// * CloudFront caches +// responses to GET and HEAD requests. // -// * CloudFront caches responses to -// GET, HEAD, and OPTIONS requests. +// * CloudFront caches responses to GET, HEAD, +// and OPTIONS requests. // -// If you pick the second choice for your Amazon -// S3 Origin, you may need to forward Access-Control-Request-Method, +// If you pick the second choice for your Amazon S3 Origin, +// you may need to forward Access-Control-Request-Method, // Access-Control-Request-Headers, and Origin headers for the responses to be // cached correctly. type CachedMethods struct { @@ -422,20 +421,20 @@ type CachedMethods struct { // A cache policy. When it’s attached to a cache behavior, the cache policy // determines the following: // -// * The values that CloudFront includes in the -// cache key. These values can include HTTP headers, cookies, and URL query -// strings. CloudFront uses the cache key to find an object in its cache that it -// can return to the viewer. +// * The values that CloudFront includes in the cache +// key. These values can include HTTP headers, cookies, and URL query strings. +// CloudFront uses the cache key to find an object in its cache that it can return +// to the viewer. // -// * The default, minimum, and maximum time to live -// (TTL) values that you want objects to stay in the CloudFront cache. +// * The default, minimum, and maximum time to live (TTL) values +// that you want objects to stay in the CloudFront cache. // -// The -// headers, cookies, and query strings that are included in the cache key are -// automatically included in requests that CloudFront sends to the origin. -// CloudFront sends a request when it can’t find a valid object in its cache that -// matches the request’s cache key. If you want to send values to the origin but -// not include them in the cache key, use OriginRequestPolicy. +// The headers, cookies, +// and query strings that are included in the cache key are automatically included +// in requests that CloudFront sends to the origin. CloudFront sends a request when +// it can’t find a valid object in its cache that matches the request’s cache key. +// If you want to send values to the origin but not include them in the cache key, +// use OriginRequestPolicy. type CachePolicy struct { // The cache policy configuration. @@ -456,21 +455,20 @@ type CachePolicy struct { // A cache policy configuration. This configuration determines the following: // -// -// * The values that CloudFront includes in the cache key. These values can include +// * +// The values that CloudFront includes in the cache key. These values can include // HTTP headers, cookies, and URL query strings. CloudFront uses the cache key to // find an object in its cache that it can return to the viewer. // -// * The -// default, minimum, and maximum time to live (TTL) values that you want objects to -// stay in the CloudFront cache. +// * The default, +// minimum, and maximum time to live (TTL) values that you want objects to stay in +// the CloudFront cache. // -// The headers, cookies, and query strings that are -// included in the cache key are automatically included in requests that CloudFront -// sends to the origin. CloudFront sends a request when it can’t find a valid -// object in its cache that matches the request’s cache key. If you want to send -// values to the origin but not include them in the cache key, use -// OriginRequestPolicy. +// The headers, cookies, and query strings that are included +// in the cache key are automatically included in requests that CloudFront sends to +// the origin. CloudFront sends a request when it can’t find a valid object in its +// cache that matches the request’s cache key. If you want to send values to the +// origin but not include them in the cache key, use OriginRequestPolicy. type CachePolicyConfig struct { // The minimum amount of time, in seconds, that you want objects to stay in the @@ -530,24 +528,23 @@ type CachePolicyCookiesConfig struct { // and automatically included in requests that CloudFront sends to the origin. // Valid values are: // - // * none – Cookies in viewer requests are not included in - // the cache key and are not automatically included in requests that CloudFront - // sends to the origin. Even when this field is set to none, any cookies that are - // listed in an OriginRequestPolicy are included in origin requests. + // * none – Cookies in viewer requests are not included in the + // cache key and are not automatically included in requests that CloudFront sends + // to the origin. Even when this field is set to none, any cookies that are listed + // in an OriginRequestPolicy are included in origin requests. // - // * - // whitelist – The cookies in viewer requests that are listed in the CookieNames - // type are included in the cache key and automatically included in requests that - // CloudFront sends to the origin. + // * whitelist – The + // cookies in viewer requests that are listed in the CookieNames type are included + // in the cache key and automatically included in requests that CloudFront sends to + // the origin. // - // * allExcept – All cookies in viewer - // requests that are not listed in the CookieNames type are included in the cache - // key and automatically included in requests that CloudFront sends to the - // origin. + // * allExcept – All cookies in viewer requests that are not listed in + // the CookieNames type are included in the cache key and automatically included in + // requests that CloudFront sends to the origin. // - // * all – All cookies in viewer requests are included in the cache - // key and are automatically included in requests that CloudFront sends to the - // origin. + // * all – All cookies in viewer + // requests are included in the cache key and are automatically included in + // requests that CloudFront sends to the origin. // // This member is required. CookieBehavior CachePolicyCookieBehavior @@ -565,12 +562,12 @@ type CachePolicyHeadersConfig struct { // automatically included in requests that CloudFront sends to the origin. Valid // values are: // - // * none – HTTP headers are not included in the cache key and are - // not automatically included in requests that CloudFront sends to the origin. Even + // * none – HTTP headers are not included in the cache key and are not + // automatically included in requests that CloudFront sends to the origin. Even // when this field is set to none, any headers that are listed in an // OriginRequestPolicy are included in origin requests. // - // * whitelist – The HTTP + // * whitelist – The HTTP // headers that are listed in the Headers type are included in the cache key and // are automatically included in requests that CloudFront sends to the origin. // @@ -612,24 +609,24 @@ type CachePolicyQueryStringsConfig struct { // cache key and automatically included in requests that CloudFront sends to the // origin. Valid values are: // - // * none – Query strings in viewer requests are not + // * none – Query strings in viewer requests are not // included in the cache key and are not automatically included in requests that // CloudFront sends to the origin. Even when this field is set to none, any query // strings that are listed in an OriginRequestPolicy are included in origin // requests. // - // * whitelist – The query strings in viewer requests that are - // listed in the QueryStringNames type are included in the cache key and - // automatically included in requests that CloudFront sends to the origin. + // * whitelist – The query strings in viewer requests that are listed in + // the QueryStringNames type are included in the cache key and automatically + // included in requests that CloudFront sends to the origin. // - // * - // allExcept – All query strings in viewer requests that are not listed in the - // QueryStringNames type are included in the cache key and automatically included - // in requests that CloudFront sends to the origin. + // * allExcept – All + // query strings in viewer requests that are not listed in the QueryStringNames + // type are included in the cache key and automatically included in requests that + // CloudFront sends to the origin. // - // * all – All query strings - // in viewer requests are included in the cache key and are automatically included - // in requests that CloudFront sends to the origin. + // * all – All query strings in viewer requests + // are included in the cache key and are automatically included in requests that + // CloudFront sends to the origin. // // This member is required. QueryStringBehavior CachePolicyQueryStringBehavior @@ -878,12 +875,12 @@ type CookiePreference struct { // A complex type that controls: // -// * Whether CloudFront replaces HTTP status -// codes in the 4xx and 5xx range with custom error messages before returning the +// * Whether CloudFront replaces HTTP status codes +// in the 4xx and 5xx range with custom error messages before returning the // response to the viewer. // -// * How long CloudFront caches HTTP status codes in -// the 4xx and 5xx range. +// * How long CloudFront caches HTTP status codes in the +// 4xx and 5xx range. // // For more information about custom error pages, see // Customizing Error Responses @@ -911,17 +908,17 @@ type CustomErrorResponse struct { // CloudFront to return a status code different from the status code that your // origin returned to CloudFront, for example: // - // * Some Internet devices (some + // * Some Internet devices (some // firewalls and corporate proxies, for example) intercept HTTP 4xx and 5xx and // prevent the response from being returned to the viewer. If you substitute 200, // the response typically won't be intercepted. // - // * If you don't care about + // * If you don't care about // distinguishing among different client errors or server errors, you can specify // 400 or 500 as the ResponseCode for all 4xx or 5xx errors. // - // * You might want - // to return a 200 status code (OK) and static website so your customers don't know + // * You might want to + // return a 200 status code (OK) and static website so your customers don't know // that your website is down. // // If you specify a value for ResponseCode, you must @@ -934,15 +931,15 @@ type CustomErrorResponse struct { // your custom error pages in different locations, your distribution must include a // cache behavior for which the following is true: // - // * The value of PathPattern + // * The value of PathPattern // matches the path to your custom error messages. For example, suppose you saved // custom error pages for 4xx errors in an Amazon S3 bucket in a directory named // /4xx-errors. Your distribution must include a cache behavior for which the path // pattern routes requests for your custom error pages to that location, for // example, /4xx-errors/*. // - // * The value of TargetOriginId specifies the value - // of the ID element for the origin that contains your custom error pages. + // * The value of TargetOriginId specifies the value of + // the ID element for the origin that contains your custom error pages. // // If you // specify a value for ResponsePagePath, you must also specify a value for @@ -955,12 +952,12 @@ type CustomErrorResponse struct { // A complex type that controls: // -// * Whether CloudFront replaces HTTP status -// codes in the 4xx and 5xx range with custom error messages before returning the +// * Whether CloudFront replaces HTTP status codes +// in the 4xx and 5xx range with custom error messages before returning the // response to the viewer. // -// * How long CloudFront caches HTTP status codes in -// the 4xx and 5xx range. +// * How long CloudFront caches HTTP status codes in the +// 4xx and 5xx range. // // For more information about custom error pages, see // Customizing Error Responses @@ -1015,14 +1012,14 @@ type CustomOriginConfig struct { // Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the // origin. Valid values are: // - // * http-only – CloudFront always uses HTTP to - // connect to the origin. + // * http-only – CloudFront always uses HTTP to connect + // to the origin. // - // * match-viewer – CloudFront connects to the origin - // using the same protocol that the viewer used to connect to CloudFront. + // * match-viewer – CloudFront connects to the origin using the + // same protocol that the viewer used to connect to CloudFront. // - // * - // https-only – CloudFront always uses HTTPS to connect to the origin. + // * https-only – + // CloudFront always uses HTTPS to connect to the origin. // // This member is required. OriginProtocolPolicy OriginProtocolPolicy @@ -1067,19 +1064,19 @@ type DefaultCacheBehavior struct { // TargetOriginId when a request matches the path pattern in PathPattern. You can // specify the following options: // - // * allow-all: Viewers can use HTTP or - // HTTPS. + // * allow-all: Viewers can use HTTP or HTTPS. // - // * redirect-to-https: If a viewer submits an HTTP request, CloudFront - // returns an HTTP status code of 301 (Moved Permanently) to the viewer along with - // the HTTPS URL. The viewer then resubmits the request using the new URL. + // * + // redirect-to-https: If a viewer submits an HTTP request, CloudFront returns an + // HTTP status code of 301 (Moved Permanently) to the viewer along with the HTTPS + // URL. The viewer then resubmits the request using the new URL. // - // * - // https-only: If a viewer sends an HTTP request, CloudFront returns an HTTP status - // code of 403 (Forbidden). + // * https-only: If + // a viewer sends an HTTP request, CloudFront returns an HTTP status code of 403 + // (Forbidden). // - // For more information about requiring the HTTPS - // protocol, see Requiring HTTPS Between Viewers and CloudFront + // For more information about requiring the HTTPS protocol, see + // Requiring HTTPS Between Viewers and CloudFront // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html) // in the Amazon CloudFront Developer Guide. The only way to guarantee that viewers // retrieve an object that was fetched from the origin using HTTPS is never to use @@ -1098,19 +1095,19 @@ type DefaultCacheBehavior struct { // forwards to your Amazon S3 bucket or your custom origin. There are three // choices: // - // * CloudFront forwards only GET and HEAD requests. + // * CloudFront forwards only GET and HEAD requests. // - // * - // CloudFront forwards only GET, HEAD, and OPTIONS requests. + // * CloudFront + // forwards only GET, HEAD, and OPTIONS requests. // - // * CloudFront - // forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests. + // * CloudFront forwards GET, HEAD, + // OPTIONS, PUT, PATCH, POST, and DELETE requests. // - // If you pick - // the third choice, you may need to restrict access to your Amazon S3 bucket or to - // your custom origin so users can't perform operations that you don't want them - // to. For example, you might not want users to have permissions to delete objects - // from your origin. + // If you pick the third choice, + // you may need to restrict access to your Amazon S3 bucket or to your custom + // origin so users can't perform operations that you don't want them to. For + // example, you might not want users to have permissions to delete objects from + // your origin. AllowedMethods *AllowedMethods // The unique identifier of the cache policy that is attached to the default cache @@ -1368,15 +1365,15 @@ type DistributionConfig struct { // A complex type that controls the following: // - // * Whether CloudFront replaces - // HTTP status codes in the 4xx and 5xx range with custom error messages before + // * Whether CloudFront replaces HTTP + // status codes in the 4xx and 5xx range with custom error messages before // returning the response to the viewer. // - // * How long CloudFront caches HTTP - // status codes in the 4xx and 5xx range. + // * How long CloudFront caches HTTP status + // codes in the 4xx and 5xx range. // - // For more information about custom error - // pages, see Customizing Error Responses + // For more information about custom error pages, + // see Customizing Error Responses // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) // in the Amazon CloudFront Developer Guide. CustomErrorResponses *CustomErrorResponses @@ -1425,13 +1422,13 @@ type DistributionConfig struct { // need to create a second alias resource record set when both of the following are // true: // - // * You enable IPv6 for the distribution + // * You enable IPv6 for the distribution // - // * You're using alternate - // domain names in the URLs for your objects + // * You're using alternate domain + // names in the URLs for your objects // - // For more information, see Routing - // Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name + // For more information, see Routing Traffic to + // an Amazon CloudFront Web Distribution by Using Your Domain Name // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) // in the Amazon Route 53 Developer Guide. If you created a CNAME resource record // set, either with Amazon Route 53 or with another DNS service, you don't need to @@ -2058,15 +2055,15 @@ type GeoRestriction struct { // The method that you want to use to restrict distribution of your content by // country: // - // * none: No geo restriction is enabled, meaning access to content - // is not restricted by client geo location. + // * none: No geo restriction is enabled, meaning access to content is + // not restricted by client geo location. // - // * blacklist: The Location - // elements specify the countries in which you don't want CloudFront to distribute - // your content. + // * blacklist: The Location elements + // specify the countries in which you don't want CloudFront to distribute your + // content. // - // * whitelist: The Location elements specify the countries in - // which you want CloudFront to distribute your content. + // * whitelist: The Location elements specify the countries in which you + // want CloudFront to distribute your content. // // This member is required. RestrictionType GeoRestrictionType @@ -2329,24 +2326,24 @@ type LambdaFunctionAssociation struct { // Specifies the event type that triggers a Lambda function invocation. You can // specify the following values: // - // * viewer-request: The function executes when + // * viewer-request: The function executes when // CloudFront receives a request from a viewer and before it checks to see whether // the requested object is in the edge cache. // - // * origin-request: The function + // * origin-request: The function // executes only when CloudFront sends a request to your origin. When the requested // object is in the edge cache, the function doesn't execute. // - // * - // origin-response: The function executes after CloudFront receives a response from - // the origin and before it caches the object in the response. When the requested - // object is in the edge cache, the function doesn't execute. + // * origin-response: + // The function executes after CloudFront receives a response from the origin and + // before it caches the object in the response. When the requested object is in the + // edge cache, the function doesn't execute. // - // * - // viewer-response: The function executes before CloudFront returns the requested - // object to the viewer. The function executes regardless of whether the object was - // already in the edge cache. If the origin returns an HTTP status code other than - // HTTP 200 (OK), the function doesn't execute. + // * viewer-response: The function + // executes before CloudFront returns the requested object to the viewer. The + // function executes regardless of whether the object was already in the edge + // cache. If the origin returns an HTTP status code other than HTTP 200 (OK), the + // function doesn't execute. // // This member is required. EventType EventType @@ -2435,29 +2432,29 @@ type MonitoringSubscription struct { // An origin. An origin is the location where content is stored, and from which // CloudFront gets content to serve to viewers. To specify an origin: // -// * Use +// * Use // S3OriginConfig to specify an Amazon S3 bucket that is not configured with static // website hosting. // -// * Use CustomOriginConfig to specify all other kinds of +// * Use CustomOriginConfig to specify all other kinds of // origins, including: // -// * An Amazon S3 bucket that is configured with -// static website hosting +// * An Amazon S3 bucket that is configured with static +// website hosting // -// * An Elastic Load Balancing load balancer +// * An Elastic Load Balancing load balancer // +// * An AWS Elemental +// MediaPackage endpoint // -// * An AWS Elemental MediaPackage endpoint +// * An AWS Elemental MediaStore container // -// * An AWS Elemental MediaStore -// container +// * Any other HTTP +// server, running on an Amazon EC2 instance or any other kind of host // -// * Any other HTTP server, running on an Amazon EC2 instance or -// any other kind of host -// -// For the current maximum number of origins that you can -// specify per distribution, see General Quotas on Web Distributions +// For the +// current maximum number of origins that you can specify per distribution, see +// General Quotas on Web Distributions // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html#limits-web-distributions) // in the Amazon CloudFront Developer Guide (quotas were formerly referred to as // limits). @@ -2625,14 +2622,14 @@ type OriginGroups struct { // it sends to the origin. Each request that CloudFront sends to the origin // includes the following: // -// * The request body and the URL path (without the -// domain name) from the viewer request. +// * The request body and the URL path (without the domain +// name) from the viewer request. // -// * The headers that CloudFront -// automatically includes in every origin request, including Host, User-Agent, and +// * The headers that CloudFront automatically +// includes in every origin request, including Host, User-Agent, and // X-Amz-Cf-Id. // -// * All HTTP headers, cookies, and URL query strings that are +// * All HTTP headers, cookies, and URL query strings that are // specified in the cache policy or the origin request policy. These can include // items from the viewer request and, in the case of headers, additional ones that // are added by CloudFront. @@ -2662,17 +2659,17 @@ type OriginRequestPolicy struct { // that CloudFront includes in requests that it sends to the origin. Each request // that CloudFront sends to the origin includes the following: // -// * The request -// body and the URL path (without the domain name) from the viewer request. +// * The request body +// and the URL path (without the domain name) from the viewer request. // -// * -// The headers that CloudFront automatically includes in every origin request, +// * The +// headers that CloudFront automatically includes in every origin request, // including Host, User-Agent, and X-Amz-Cf-Id. // -// * All HTTP headers, cookies, -// and URL query strings that are specified in the cache policy or the origin -// request policy. These can include items from the viewer request and, in the case -// of headers, additional ones that are added by CloudFront. +// * All HTTP headers, cookies, and +// URL query strings that are specified in the cache policy or the origin request +// policy. These can include items from the viewer request and, in the case of +// headers, additional ones that are added by CloudFront. // // CloudFront sends a // request when it can’t find an object in its cache that matches the request. If @@ -2712,17 +2709,17 @@ type OriginRequestPolicyCookiesConfig struct { // Determines whether cookies in viewer requests are included in requests that // CloudFront sends to the origin. Valid values are: // - // * none – Cookies in - // viewer requests are not included in requests that CloudFront sends to the - // origin. Even when this field is set to none, any cookies that are listed in a - // CachePolicy are included in origin requests. + // * none – Cookies in viewer + // requests are not included in requests that CloudFront sends to the origin. Even + // when this field is set to none, any cookies that are listed in a CachePolicy are + // included in origin requests. // - // * whitelist – The cookies in - // viewer requests that are listed in the CookieNames type are included in requests - // that CloudFront sends to the origin. + // * whitelist – The cookies in viewer requests that + // are listed in the CookieNames type are included in requests that CloudFront + // sends to the origin. // - // * all – All cookies in viewer requests - // are included in requests that CloudFront sends to the origin. + // * all – All cookies in viewer requests are included in + // requests that CloudFront sends to the origin. // // This member is required. CookieBehavior OriginRequestPolicyCookieBehavior @@ -2738,22 +2735,22 @@ type OriginRequestPolicyHeadersConfig struct { // Determines whether any HTTP headers are included in requests that CloudFront // sends to the origin. Valid values are: // - // * none – HTTP headers are not - // included in requests that CloudFront sends to the origin. Even when this field - // is set to none, any headers that are listed in a CachePolicy are included in - // origin requests. - // - // * whitelist – The HTTP headers that are listed in the - // Headers type are included in requests that CloudFront sends to the origin. + // * none – HTTP headers are not included + // in requests that CloudFront sends to the origin. Even when this field is set to + // none, any headers that are listed in a CachePolicy are included in origin + // requests. // + // * whitelist – The HTTP headers that are listed in the Headers type + // are included in requests that CloudFront sends to the origin. // - // * allViewer – All HTTP headers in viewer requests are included in requests that - // CloudFront sends to the origin. + // * allViewer – All + // HTTP headers in viewer requests are included in requests that CloudFront sends + // to the origin. // - // * allViewerAndWhitelistCloudFront – All - // HTTP headers in viewer requests and the additional CloudFront headers that are - // listed in the Headers type are included in requests that CloudFront sends to the - // origin. The additional headers are added by CloudFront. + // * allViewerAndWhitelistCloudFront – All HTTP headers in viewer + // requests and the additional CloudFront headers that are listed in the Headers + // type are included in requests that CloudFront sends to the origin. The + // additional headers are added by CloudFront. // // This member is required. HeaderBehavior OriginRequestPolicyHeaderBehavior @@ -2793,18 +2790,18 @@ type OriginRequestPolicyQueryStringsConfig struct { // Determines whether any URL query strings in viewer requests are included in // requests that CloudFront sends to the origin. Valid values are: // - // * none – - // Query strings in viewer requests are not included in requests that CloudFront - // sends to the origin. Even when this field is set to none, any query strings that - // are listed in a CachePolicy are included in origin requests. + // * none – Query + // strings in viewer requests are not included in requests that CloudFront sends to + // the origin. Even when this field is set to none, any query strings that are + // listed in a CachePolicy are included in origin requests. // - // * whitelist – - // The query strings in viewer requests that are listed in the QueryStringNames - // type are included in requests that CloudFront sends to the origin. + // * whitelist – The + // query strings in viewer requests that are listed in the QueryStringNames type + // are included in requests that CloudFront sends to the origin. // - // * all – - // All query strings in viewer requests are included in requests that CloudFront - // sends to the origin. + // * all – All query + // strings in viewer requests are included in requests that CloudFront sends to the + // origin. // // This member is required. QueryStringBehavior OriginRequestPolicyQueryStringBehavior @@ -2907,13 +2904,13 @@ type ParametersInCacheKeyAndForwardedToOrigin struct { // these fields is true and the viewer request includes the Accept-Encoding header, // then CloudFront does the following: // - // * Normalizes the value of the viewer’s + // * Normalizes the value of the viewer’s // Accept-Encoding header // - // * Includes the normalized header in the cache key - // + // * Includes the normalized header in the cache key // - // * Includes the normalized header in the request to the origin, if a request is + // * + // Includes the normalized header in the request to the origin, if a request is // necessary // // For more information, see Compression support @@ -2952,13 +2949,13 @@ type ParametersInCacheKeyAndForwardedToOrigin struct { // fields is true and the viewer request includes the Accept-Encoding header, then // CloudFront does the following: // - // * Normalizes the value of the viewer’s + // * Normalizes the value of the viewer’s // Accept-Encoding header // - // * Includes the normalized header in the cache key - // + // * Includes the normalized header in the cache key // - // * Includes the normalized header in the request to the origin, if a request is + // * + // Includes the normalized header in the request to the origin, if a request is // necessary // // For more information, see Compression support @@ -3686,28 +3683,28 @@ type TrustedSigners struct { // distribution uses Aliases (alternate domain names or CNAMEs), use the fields in // this type to specify the following settings: // -// * Which viewers the -// distribution accepts HTTPS connections from: only viewers that support server -// name indication (SNI) (https://en.wikipedia.org/wiki/Server_Name_Indication) -// (recommended), or all viewers including those that don’t support SNI. +// * Which viewers the distribution +// accepts HTTPS connections from: only viewers that support server name indication +// (SNI) (https://en.wikipedia.org/wiki/Server_Name_Indication) (recommended), or +// all viewers including those that don’t support SNI. // -// * -// To accept HTTPS connections from only viewers that support SNI, set -// SSLSupportMethod to sni-only. This is recommended. Most browsers and clients -// support SNI. +// * To accept HTTPS +// connections from only viewers that support SNI, set SSLSupportMethod to +// sni-only. This is recommended. Most browsers and clients support SNI. // -// * To accept HTTPS connections from all viewers, including -// those that don’t support SNI, set SSLSupportMethod to vip. This is not -// recommended, and results in additional monthly charges from CloudFront. +// * To +// accept HTTPS connections from all viewers, including those that don’t support +// SNI, set SSLSupportMethod to vip. This is not recommended, and results in +// additional monthly charges from CloudFront. // -// * -// The minimum SSL/TLS protocol version that the distribution can use to -// communicate with viewers. To specify a minimum version, choose a value for -// MinimumProtocolVersion. For more information, see Security Policy +// * The minimum SSL/TLS protocol +// version that the distribution can use to communicate with viewers. To specify a +// minimum version, choose a value for MinimumProtocolVersion. For more +// information, see Security Policy // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValues-security-policy) // in the Amazon CloudFront Developer Guide. // -// * The location of the SSL/TLS +// * The location of the SSL/TLS // certificate, AWS Certificate Manager (ACM) // (https://docs.aws.amazon.com/acm/latest/userguide/acm-overview.html) // (recommended) or AWS Identity and Access Management (AWS IAM) @@ -3715,15 +3712,15 @@ type TrustedSigners struct { // You specify the location by setting a value in one of the following fields (not // both): // -// * ACMCertificateArn +// * ACMCertificateArn // -// * IAMCertificateId +// * IAMCertificateId // -// All -// distributions support HTTPS connections from viewers. To require viewers to use -// HTTPS only, or to redirect them from HTTP to HTTPS, use ViewerProtocolPolicy in -// the CacheBehavior or DefaultCacheBehavior. To specify how CloudFront should use -// SSL/TLS to communicate with your custom origin, use CustomOriginConfig. For more +// All distributions support HTTPS +// connections from viewers. To require viewers to use HTTPS only, or to redirect +// them from HTTP to HTTPS, use ViewerProtocolPolicy in the CacheBehavior or +// DefaultCacheBehavior. To specify how CloudFront should use SSL/TLS to +// communicate with your custom origin, use CustomOriginConfig. For more // information, see Using HTTPS with CloudFront // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https.html) // and Using Alternate Domain Names and HTTPS @@ -3742,22 +3739,22 @@ type ViewerCertificate struct { // This field is deprecated. Use one of the following fields instead: // - // * + // * // ACMCertificateArn // - // * IAMCertificateId + // * IAMCertificateId // - // * CloudFrontDefaultCertificate + // * CloudFrontDefaultCertificate Certificate *string // This field is deprecated. Use one of the following fields instead: // - // * + // * // ACMCertificateArn // - // * IAMCertificateId + // * IAMCertificateId // - // * CloudFrontDefaultCertificate + // * CloudFrontDefaultCertificate CertificateSource CertificateSource // If the distribution uses the CloudFront domain name such as @@ -3765,12 +3762,12 @@ type ViewerCertificate struct { // Aliases (alternate domain names or CNAMEs), set this field to false and specify // values for the following fields: // - // * ACMCertificateArn or IAMCertificateId + // * ACMCertificateArn or IAMCertificateId // (specify a value for one, not both) // - // * MinimumProtocolVersion + // * MinimumProtocolVersion // - // * + // * // SSLSupportMethod CloudFrontDefaultCertificate *bool @@ -3785,10 +3782,10 @@ type ViewerCertificate struct { // security policy that you want CloudFront to use for HTTPS connections with // viewers. The security policy determines two settings: // - // * The minimum SSL/TLS + // * The minimum SSL/TLS // protocol that CloudFront can use to communicate with viewers. // - // * The ciphers + // * The ciphers // that CloudFront can use to encrypt the content that it returns to viewers. // // For @@ -3808,20 +3805,20 @@ type ViewerCertificate struct { // If the distribution uses Aliases (alternate domain names or CNAMEs), specify // which viewers the distribution accepts HTTPS connections from. // - // * sni-only – - // The distribution accepts HTTPS connections from only viewers that support server + // * sni-only – The + // distribution accepts HTTPS connections from only viewers that support server // name indication (SNI) (https://en.wikipedia.org/wiki/Server_Name_Indication). // This is recommended. Most browsers and clients support SNI. // - // * vip – The + // * vip – The // distribution accepts HTTPS connections from all viewers including those that // don’t support SNI. This is not recommended, and results in additional monthly // charges from CloudFront. // - // * static-ip - Do not specify this value unless - // your distribution has been enabled for this feature by the CloudFront team. If - // you have a use case that requires static IP addresses for a distribution, - // contact CloudFront through the AWS Support Center + // * static-ip - Do not specify this value unless your + // distribution has been enabled for this feature by the CloudFront team. If you + // have a use case that requires static IP addresses for a distribution, contact + // CloudFront through the AWS Support Center // (https://console.aws.amazon.com/support/home). // // If the distribution uses the diff --git a/service/cloudhsm/api_op_CreateHsm.go b/service/cloudhsm/api_op_CreateHsm.go index 28224ba779a..1a6603cbac4 100644 --- a/service/cloudhsm/api_op_CreateHsm.go +++ b/service/cloudhsm/api_op_CreateHsm.go @@ -66,11 +66,11 @@ type CreateHsmInput struct { // Specifies the type of subscription for the HSM. // - // * PRODUCTION - The HSM is - // being used in a production environment. + // * PRODUCTION - The HSM is being + // used in a production environment. // - // * TRIAL - The HSM is being used in - // a product trial. + // * TRIAL - The HSM is being used in a product + // trial. // // This member is required. SubscriptionType types.SubscriptionType diff --git a/service/cloudhsm/api_op_DescribeHsm.go b/service/cloudhsm/api_op_DescribeHsm.go index 4a197c9c440..23bdcd2fd2e 100644 --- a/service/cloudhsm/api_op_DescribeHsm.go +++ b/service/cloudhsm/api_op_DescribeHsm.go @@ -109,11 +109,11 @@ type DescribeHsmOutput struct { // Specifies the type of subscription for the HSM. // - // * PRODUCTION - The HSM is - // being used in a production environment. + // * PRODUCTION - The HSM is being + // used in a production environment. // - // * TRIAL - The HSM is being used in - // a product trial. + // * TRIAL - The HSM is being used in a product + // trial. SubscriptionType types.SubscriptionType // The name of the HSM vendor. diff --git a/service/cloudhsm/types/enums.go b/service/cloudhsm/types/enums.go index 85b3aa79e81..70cd711a052 100644 --- a/service/cloudhsm/types/enums.go +++ b/service/cloudhsm/types/enums.go @@ -6,8 +6,8 @@ type ClientVersion string // Enum values for ClientVersion const ( - ClientVersionFive_one ClientVersion = "5.1" - ClientVersionFive_three ClientVersion = "5.3" + ClientVersionFiveOne ClientVersion = "5.1" + ClientVersionFiveThree ClientVersion = "5.3" ) // Values returns all known values for ClientVersion. Note that this can be diff --git a/service/cloudhsmv2/api_op_CreateCluster.go b/service/cloudhsmv2/api_op_CreateCluster.go index 88a05a2a985..4ba2f130294 100644 --- a/service/cloudhsmv2/api_op_CreateCluster.go +++ b/service/cloudhsmv2/api_op_CreateCluster.go @@ -39,11 +39,10 @@ type CreateClusterInput struct { // must specify at least one subnet. If you specify multiple subnets, they must // meet the following criteria: // - // * All subnets must be in the same virtual - // private cloud (VPC). + // * All subnets must be in the same virtual private + // cloud (VPC). // - // * You can specify only one subnet per Availability - // Zone. + // * You can specify only one subnet per Availability Zone. // // This member is required. SubnetIds []*string diff --git a/service/cloudhsmv2/types/enums.go b/service/cloudhsmv2/types/enums.go index 7cfa1ef9765..f481a98e1d1 100644 --- a/service/cloudhsmv2/types/enums.go +++ b/service/cloudhsmv2/types/enums.go @@ -22,10 +22,10 @@ type BackupState string // Enum values for BackupState const ( - BackupStateCreate_in_progress BackupState = "CREATE_IN_PROGRESS" - BackupStateReady BackupState = "READY" - BackupStateDeleted BackupState = "DELETED" - BackupStatePending_deletion BackupState = "PENDING_DELETION" + BackupStateCreateInProgress BackupState = "CREATE_IN_PROGRESS" + BackupStateReady BackupState = "READY" + BackupStateDeleted BackupState = "DELETED" + BackupStatePendingDeletion BackupState = "PENDING_DELETION" ) // Values returns all known values for BackupState. Note that this can be expanded @@ -44,15 +44,15 @@ type ClusterState string // Enum values for ClusterState const ( - ClusterStateCreate_in_progress ClusterState = "CREATE_IN_PROGRESS" - ClusterStateUninitialized ClusterState = "UNINITIALIZED" - ClusterStateInitialize_in_progress ClusterState = "INITIALIZE_IN_PROGRESS" - ClusterStateInitialized ClusterState = "INITIALIZED" - ClusterStateActive ClusterState = "ACTIVE" - ClusterStateUpdate_in_progress ClusterState = "UPDATE_IN_PROGRESS" - ClusterStateDelete_in_progress ClusterState = "DELETE_IN_PROGRESS" - ClusterStateDeleted ClusterState = "DELETED" - ClusterStateDegraded ClusterState = "DEGRADED" + ClusterStateCreateInProgress ClusterState = "CREATE_IN_PROGRESS" + ClusterStateUninitialized ClusterState = "UNINITIALIZED" + ClusterStateInitializeInProgress ClusterState = "INITIALIZE_IN_PROGRESS" + ClusterStateInitialized ClusterState = "INITIALIZED" + ClusterStateActive ClusterState = "ACTIVE" + ClusterStateUpdateInProgress ClusterState = "UPDATE_IN_PROGRESS" + ClusterStateDeleteInProgress ClusterState = "DELETE_IN_PROGRESS" + ClusterStateDeleted ClusterState = "DELETED" + ClusterStateDegraded ClusterState = "DEGRADED" ) // Values returns all known values for ClusterState. Note that this can be expanded @@ -76,11 +76,11 @@ type HsmState string // Enum values for HsmState const ( - HsmStateCreate_in_progress HsmState = "CREATE_IN_PROGRESS" - HsmStateActive HsmState = "ACTIVE" - HsmStateDegraded HsmState = "DEGRADED" - HsmStateDelete_in_progress HsmState = "DELETE_IN_PROGRESS" - HsmStateDeleted HsmState = "DELETED" + HsmStateCreateInProgress HsmState = "CREATE_IN_PROGRESS" + HsmStateActive HsmState = "ACTIVE" + HsmStateDegraded HsmState = "DEGRADED" + HsmStateDeleteInProgress HsmState = "DELETE_IN_PROGRESS" + HsmStateDeleted HsmState = "DELETED" ) // Values returns all known values for HsmState. Note that this can be expanded in diff --git a/service/cloudsearch/types/enums.go b/service/cloudsearch/types/enums.go index 63b4688a325..d2129aace04 100644 --- a/service/cloudsearch/types/enums.go +++ b/service/cloudsearch/types/enums.go @@ -220,8 +220,8 @@ type TLSSecurityPolicy string // Enum values for TLSSecurityPolicy const ( - TLSSecurityPolicyPolicy_min_tls_1_0_2019_07 TLSSecurityPolicy = "Policy-Min-TLS-1-0-2019-07" - TLSSecurityPolicyPolicy_min_tls_1_2_2019_07 TLSSecurityPolicy = "Policy-Min-TLS-1-2-2019-07" + TLSSecurityPolicyPolicyMinTls10201907 TLSSecurityPolicy = "Policy-Min-TLS-1-0-2019-07" + TLSSecurityPolicyPolicyMinTls12201907 TLSSecurityPolicy = "Policy-Min-TLS-1-2-2019-07" ) // Values returns all known values for TLSSecurityPolicy. Note that this can be diff --git a/service/cloudsearch/types/types.go b/service/cloudsearch/types/types.go index e3d979ad99c..b7945df631d 100644 --- a/service/cloudsearch/types/types.go +++ b/service/cloudsearch/types/types.go @@ -614,19 +614,19 @@ type OptionStatus struct { // The state of processing a change to an option. Possible values: // - // * + // * // RequiresIndexDocuments: the option's latest value will not be deployed until // IndexDocuments has been called and indexing is complete. // - // * Processing: the + // * Processing: the // option's latest value is in the process of being activated. // - // * Active: the + // * Active: the // option's latest value is completely deployed. // - // * FailedToValidate: the - // option value is not compatible with the domain's data and cannot be used to - // index the data. You must either modify the option value or update or remove the + // * FailedToValidate: the option + // value is not compatible with the domain's data and cannot be used to index the + // data. You must either modify the option value or update or remove the // incompatible documents. // // This member is required. diff --git a/service/cloudsearchdomain/api_op_Search.go b/service/cloudsearchdomain/api_op_Search.go index ec41e50d5c8..e5c1545bcb1 100644 --- a/service/cloudsearchdomain/api_op_Search.go +++ b/service/cloudsearchdomain/api_op_Search.go @@ -15,20 +15,20 @@ import ( // specify the search criteria depends on which query parser you use. Amazon // CloudSearch supports four query parsers: // -// * simple: search all text and +// * simple: search all text and // text-array fields for the specified string. Search for phrases, individual // terms, and prefixes. // -// * structured: search specific fields, construct -// compound queries using Boolean operators, and use advanced features such as term -// boosting and proximity searching. +// * structured: search specific fields, construct compound +// queries using Boolean operators, and use advanced features such as term boosting +// and proximity searching. // -// * lucene: specify search criteria using -// the Apache Lucene query parser syntax. +// * lucene: specify search criteria using the Apache +// Lucene query parser syntax. // -// * dismax: specify search criteria -// using the simplified subset of the Apache Lucene query parser syntax defined by -// the DisMax query parser. +// * dismax: specify search criteria using the +// simplified subset of the Apache Lucene query parser syntax defined by the DisMax +// query parser. // // For more information, see Searching Your Data // (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching.html) in @@ -96,32 +96,32 @@ type SearchInput struct { // {"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}. // You can specify the following faceting options: // - // * buckets specifies an - // array of the facet values or ranges to count. Ranges are specified using the - // same syntax that you use to search for a range of values. For more information, - // see Searching for a Range of Values + // * buckets specifies an array of + // the facet values or ranges to count. Ranges are specified using the same syntax + // that you use to search for a range of values. For more information, see + // Searching for a Range of Values // (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-ranges.html) // in the Amazon CloudSearch Developer Guide. Buckets are returned in the order // they are specified in the request. The sort and size options are not valid if // you specify buckets. // - // * size specifies the maximum number of facets to - // include in the results. By default, Amazon CloudSearch returns counts for the - // top 10. The size parameter is only valid when you specify the sort option; it - // cannot be used in conjunction with buckets. + // * size specifies the maximum number of facets to include + // in the results. By default, Amazon CloudSearch returns counts for the top 10. + // The size parameter is only valid when you specify the sort option; it cannot be + // used in conjunction with buckets. // - // * sort specifies how you want - // to sort the facets in the results: bucket or count. Specify bucket to sort - // alphabetically or numerically by facet value (in ascending order). Specify count - // to sort by the facet counts computed for each facet value (in descending order). - // To retrieve facet counts for particular values or ranges of values, use the - // buckets option instead of sort. + // * sort specifies how you want to sort the + // facets in the results: bucket or count. Specify bucket to sort alphabetically or + // numerically by facet value (in ascending order). Specify count to sort by the + // facet counts computed for each facet value (in descending order). To retrieve + // facet counts for particular values or ranges of values, use the buckets option + // instead of sort. // - // If no facet options are specified, facet counts - // are computed for all field values, the facets are sorted by facet count, and the - // top 10 facets are returned in the results. To count particular buckets of - // values, use the buckets option. For example, the following request uses the - // buckets option to calculate and return facet counts by decade. + // If no facet options are specified, facet counts are computed + // for all field values, the facets are sorted by facet count, and the top 10 + // facets are returned in the results. To count particular buckets of values, use + // the buckets option. For example, the following request uses the buckets option + // to calculate and return facet counts by decade. // {"year":{"buckets":["[1970,1979]","[1980,1989]","[1990,1999]","[2000,2009]","[2010,}"]}} // To sort facets by facet count, use the count option. For example, the following // request sets the sort option to count to sort the facet values by facet count, @@ -154,25 +154,25 @@ type SearchInput struct { // {"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}. // You can specify the following highlight options: // - // * format: specifies the - // format of the data in the text field: text or html. When data is returned as - // HTML, all non-alphanumeric characters are encoded. The default is html. + // * format: specifies the format + // of the data in the text field: text or html. When data is returned as HTML, all + // non-alphanumeric characters are encoded. The default is html. // - // * - // max_phrases: specifies the maximum number of occurrences of the search term(s) - // you want to highlight. By default, the first occurrence is highlighted. + // * max_phrases: + // specifies the maximum number of occurrences of the search term(s) you want to + // highlight. By default, the first occurrence is highlighted. // - // * - // pre_tag: specifies the string to prepend to an occurrence of a search term. The - // default for HTML highlights is . The default for text highlights is *. + // * pre_tag: + // specifies the string to prepend to an occurrence of a search term. The default + // for HTML highlights is . The default for text highlights is *. // + // * post_tag: + // specifies the string to append to an occurrence of a search term. The default + // for HTML highlights is . The default for text highlights is *. // - // * post_tag: specifies the string to append to an occurrence of a search term. - // The default for HTML highlights is . The default for text highlights is - // *. - // - // If no highlight options are specified for a field, the returned field text - // is treated as HTML and the first match is highlighted with emphasis tags: + // If no + // highlight options are specified for a field, the returned field text is treated + // as HTML and the first match is highlighted with emphasis tags: // search-term. For example, the following request retrieves highlights // for the actors and title fields. { "actors": {}, "title": {"format": // "text","max_phrases": 2,"pre_tag": "","post_tag": ""} } @@ -196,7 +196,7 @@ type SearchInput struct { // {"OPTION1":"VALUE1","OPTION2":VALUE2"..."OPTIONN":"VALUEN"}. The options you can // configure vary according to which parser you use: // - // * defaultOperator: The + // * defaultOperator: The // default operator used to combine individual terms in the search string. For // example: defaultOperator: 'or'. For the dismax parser, you specify a percentage // that represents the percentage of terms in the search string (rounded down) that @@ -207,7 +207,7 @@ type SearchInput struct { // (dismax). Default: and (simple, structured, lucene) or 100 (dismax). Valid for: // simple, structured, lucene, and dismax. // - // * fields: An array of the fields to + // * fields: An array of the fields to // search when no fields are specified in a search. If no fields are specified in a // search and this option is not specified, all text and text-array fields are // searched. You can specify a weight for each field to control the relative @@ -219,7 +219,7 @@ type SearchInput struct { // than zero. Default: All text and text-array fields. Valid for: simple, // structured, lucene, and dismax. // - // * operators: An array of the operators or + // * operators: An array of the operators or // special characters you want to disable for the simple query parser. If you // disable the and, or, or not operators, the corresponding operators (+, |, -) // have no special meaning and are dropped from the search string. Similarly, @@ -237,11 +237,11 @@ type SearchInput struct { // and phrase queries: "operators":["and","not","or", "prefix"]. Valid values: and, // escape, // - // fuzzy, near, not, or, phrase, precedence, prefix, whitespace. - // Default: All operators and special characters are enabled. Valid for: simple. - // + // fuzzy, near, not, or, phrase, precedence, prefix, whitespace. Default: + // All operators and special characters are enabled. Valid for: simple. // - // * phraseFields: An array of the text or text-array fields you want to use for + // * + // phraseFields: An array of the text or text-array fields you want to use for // phrase searches. When the terms in the search string appear in close proximity // within a field, the field scores higher. You can specify a weight for each field // to boost that score. The phraseSlop option controls how much the matches can @@ -253,42 +253,42 @@ type SearchInput struct { // fields. If you don't specify any fields with phraseFields, proximity scoring is // disabled even if phraseSlop is specified. Valid for: dismax. // - // * phraseSlop: - // An integer value that specifies how much matches can deviate from the search - // phrase and still be boosted according to the weights specified in the - // phraseFields option; for example, phraseSlop: 2. You must also specify - // phraseFields to enable proximity scoring. Valid values: positive integers. - // Default: 0. Valid for: dismax. + // * phraseSlop: An + // integer value that specifies how much matches can deviate from the search phrase + // and still be boosted according to the weights specified in the phraseFields + // option; for example, phraseSlop: 2. You must also specify phraseFields to enable + // proximity scoring. Valid values: positive integers. Default: 0. Valid for: + // dismax. // - // * explicitPhraseSlop: An integer value that - // specifies how much a match can deviate from the search phrase when the phrase is - // enclosed in double quotes in the search string. (Phrases that exceed this - // proximity distance are not considered a match.) For example, to specify a slop - // of three for dismax phrase queries, you would specify "explicitPhraseSlop":3. - // Valid values: positive integers. Default: 0. Valid for: dismax. + // * explicitPhraseSlop: An integer value that specifies how much a match + // can deviate from the search phrase when the phrase is enclosed in double quotes + // in the search string. (Phrases that exceed this proximity distance are not + // considered a match.) For example, to specify a slop of three for dismax phrase + // queries, you would specify "explicitPhraseSlop":3. Valid values: positive + // integers. Default: 0. Valid for: dismax. // - // * - // tieBreaker: When a term in the search string is found in a document's field, a - // score is calculated for that field based on how common the word is in that field - // compared to other documents. If the term occurs in multiple fields within a - // document, by default only the highest scoring field contributes to the - // document's overall score. You can specify a tieBreaker value to enable the - // matches in lower-scoring fields to contribute to the document's score. That way, - // if two documents have the same max field score for a particular term, the score - // for the document that has matches in more fields will be higher. The formula for - // calculating the score with a tieBreaker is (max field score) + (tieBreaker) * - // (sum of the scores for the rest of the matching fields). Set tieBreaker to 0 to - // disregard all but the highest scoring field (pure max): "tieBreaker":0. Set to 1 - // to sum the scores from all fields (pure sum): "tieBreaker":1. Valid values: 0.0 - // to 1.0. Default: 0.0. Valid for: dismax. + // * tieBreaker: When a term in the + // search string is found in a document's field, a score is calculated for that + // field based on how common the word is in that field compared to other documents. + // If the term occurs in multiple fields within a document, by default only the + // highest scoring field contributes to the document's overall score. You can + // specify a tieBreaker value to enable the matches in lower-scoring fields to + // contribute to the document's score. That way, if two documents have the same max + // field score for a particular term, the score for the document that has matches + // in more fields will be higher. The formula for calculating the score with a + // tieBreaker is (max field score) + (tieBreaker) * (sum of the scores for the rest + // of the matching fields). Set tieBreaker to 0 to disregard all but the highest + // scoring field (pure max): "tieBreaker":0. Set to 1 to sum the scores from all + // fields (pure sum): "tieBreaker":1. Valid values: 0.0 to 1.0. Default: 0.0. Valid + // for: dismax. QueryOptions *string // Specifies which query parser to use to process the request. If queryParser is // not specified, Amazon CloudSearch uses the simple query parser. Amazon // CloudSearch supports four query parsers: // - // * simple: perform simple searches - // of text and text-array fields. By default, the simple query parser searches all + // * simple: perform simple searches of + // text and text-array fields. By default, the simple query parser searches all // text and text-array fields. You can specify which fields to search by with the // queryOptions parameter. If you prefix a search term with a plus sign (+) // documents must contain the term to be considered a match. (This is the default, @@ -300,7 +300,7 @@ type SearchInput struct { // (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-text.html) // in the Amazon CloudSearch Developer Guide. // - // * structured: perform advanced + // * structured: perform advanced // searches by combining multiple expressions to define the search criteria. You // can also search within particular fields, search for values and ranges of // values, and use advanced options such as term boosting, matchall, and near. For @@ -308,13 +308,13 @@ type SearchInput struct { // (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/searching-compound-queries.html) // in the Amazon CloudSearch Developer Guide. // - // * lucene: search using the - // Apache Lucene query parser syntax. For more information, see Apache Lucene Query - // Parser Syntax + // * lucene: search using the Apache + // Lucene query parser syntax. For more information, see Apache Lucene Query Parser + // Syntax // (http://lucene.apache.org/core/4_6_0/queryparser/org/apache/lucene/queryparser/classic/package-summary.html#package_description). // - // - // * dismax: search using the simplified subset of the Apache Lucene query parser + // * + // dismax: search using the simplified subset of the Apache Lucene query parser // syntax defined by the DisMax query parser. For more information, see DisMax // Query Parser Syntax // (http://wiki.apache.org/solr/DisMaxQParserPlugin#Query_Syntax). diff --git a/service/cloudsearchdomain/api_op_UploadDocuments.go b/service/cloudsearchdomain/api_op_UploadDocuments.go index 872c2be9de1..a291328118e 100644 --- a/service/cloudsearchdomain/api_op_UploadDocuments.go +++ b/service/cloudsearchdomain/api_op_UploadDocuments.go @@ -54,9 +54,9 @@ type UploadDocumentsInput struct { // The format of the batch you are uploading. Amazon CloudSearch supports two // document batch formats: // - // * application/json + // * application/json // - // * application/xml + // * application/xml // // This member is required. ContentType types.ContentType diff --git a/service/cloudtrail/api_op_CreateTrail.go b/service/cloudtrail/api_op_CreateTrail.go index 33ae8345d57..85442c9b7cf 100644 --- a/service/cloudtrail/api_op_CreateTrail.go +++ b/service/cloudtrail/api_op_CreateTrail.go @@ -34,20 +34,19 @@ type CreateTrailInput struct { // Specifies the name of the trail. The name must meet the following // requirements: // - // * Contain only ASCII letters (a-z, A-Z), numbers (0-9), - // periods (.), underscores (_), or dashes (-) + // * Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods + // (.), underscores (_), or dashes (-) // - // * Start with a letter or - // number, and end with a letter or number + // * Start with a letter or number, and end + // with a letter or number // - // * Be between 3 and 128 characters + // * Be between 3 and 128 characters // + // * Have no adjacent + // periods, underscores or dashes. Names like my-_namespace and my--namespace are + // invalid. // - // * Have no adjacent periods, underscores or dashes. Names like my-_namespace and - // my--namespace are invalid. - // - // * Not be in IP address format (for example, - // 192.168.5.4) + // * Not be in IP address format (for example, 192.168.5.4) // // This member is required. Name *string @@ -100,16 +99,16 @@ type CreateTrailInput struct { // alias, a fully specified ARN to a key, or a globally unique identifier. // Examples: // - // * alias/MyAliasName + // * alias/MyAliasName // - // * + // * // arn:aws:kms:us-east-2:123456789012:alias/MyAliasName // - // * + // * // arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012 // - // - // * 12345678-1234-1234-1234-123456789012 + // * + // 12345678-1234-1234-1234-123456789012 KmsKeyId *string // Specifies the Amazon S3 key prefix that comes after the name of the bucket you diff --git a/service/cloudtrail/api_op_DescribeTrails.go b/service/cloudtrail/api_op_DescribeTrails.go index babd1466829..565dbc092ca 100644 --- a/service/cloudtrail/api_op_DescribeTrails.go +++ b/service/cloudtrail/api_op_DescribeTrails.go @@ -44,14 +44,14 @@ type DescribeTrailsInput struct { // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail If an empty list is // specified, information for the trail in the current region is returned. // - // * - // If an empty list is specified and IncludeShadowTrails is false, then information - // for all trails in the current region is returned. + // * If an + // empty list is specified and IncludeShadowTrails is false, then information for + // all trails in the current region is returned. // - // * If an empty list is - // specified and IncludeShadowTrails is null or true, then information for all - // trails in the current region and any associated shadow trails in other regions - // is returned. + // * If an empty list is specified + // and IncludeShadowTrails is null or true, then information for all trails in the + // current region and any associated shadow trails in other regions is + // returned. // // If one or more trail names are specified, information is returned // only if the names match the names of trails belonging only to the current diff --git a/service/cloudtrail/api_op_GetEventSelectors.go b/service/cloudtrail/api_op_GetEventSelectors.go index 6eb0cec33f0..3b42231ce98 100644 --- a/service/cloudtrail/api_op_GetEventSelectors.go +++ b/service/cloudtrail/api_op_GetEventSelectors.go @@ -15,13 +15,13 @@ import ( // trail. The information returned for your event selectors includes the // following: // -// * If your event selector includes read-only events, write-only -// events, or all events. This applies to both management events and data events. -// +// * If your event selector includes read-only events, write-only +// events, or all events. This applies to both management events and data +// events. // // * If your event selector includes management events. // -// * If your event +// * If your event // selector includes data events, the Amazon S3 objects or AWS Lambda functions // that you are logging for data events. // @@ -49,23 +49,23 @@ type GetEventSelectorsInput struct { // Specifies the name of the trail or trail ARN. If you specify a trail name, the // string must meet the following requirements: // - // * Contain only ASCII letters - // (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) + // * Contain only ASCII letters (a-z, + // A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) // - // * - // Start with a letter or number, and end with a letter or number + // * Start with a + // letter or number, and end with a letter or number // - // * Be between - // 3 and 128 characters + // * Be between 3 and 128 + // characters // - // * Have no adjacent periods, underscores or dashes. - // Names like my-_namespace and my--namespace are not valid. + // * Have no adjacent periods, underscores or dashes. Names like + // my-_namespace and my--namespace are not valid. // - // * Not be in IP - // address format (for example, 192.168.5.4) + // * Not be in IP address format + // (for example, 192.168.5.4) // - // If you specify a trail ARN, it must - // be in the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // If you specify a trail ARN, it must be in the + // format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail // // This member is required. TrailName *string diff --git a/service/cloudtrail/api_op_GetInsightSelectors.go b/service/cloudtrail/api_op_GetInsightSelectors.go index 540ccb65b9a..6b2dd14f25e 100644 --- a/service/cloudtrail/api_op_GetInsightSelectors.go +++ b/service/cloudtrail/api_op_GetInsightSelectors.go @@ -39,23 +39,23 @@ type GetInsightSelectorsInput struct { // Specifies the name of the trail or trail ARN. If you specify a trail name, the // string must meet the following requirements: // - // * Contain only ASCII letters - // (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) + // * Contain only ASCII letters (a-z, + // A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) // - // * - // Start with a letter or number, and end with a letter or number + // * Start with a + // letter or number, and end with a letter or number // - // * Be between - // 3 and 128 characters + // * Be between 3 and 128 + // characters // - // * Have no adjacent periods, underscores or dashes. - // Names like my-_namespace and my--namespace are not valid. + // * Have no adjacent periods, underscores or dashes. Names like + // my-_namespace and my--namespace are not valid. // - // * Not be in IP - // address format (for example, 192.168.5.4) + // * Not be in IP address format + // (for example, 192.168.5.4) // - // If you specify a trail ARN, it must - // be in the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // If you specify a trail ARN, it must be in the + // format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail // // This member is required. TrailName *string diff --git a/service/cloudtrail/api_op_LookupEvents.go b/service/cloudtrail/api_op_LookupEvents.go index 36871c354d6..25db9ffdbe8 100644 --- a/service/cloudtrail/api_op_LookupEvents.go +++ b/service/cloudtrail/api_op_LookupEvents.go @@ -20,38 +20,38 @@ import ( // region within the last 90 days. Lookup supports the following attributes for // management events: // -// * AWS access key +// * AWS access key // -// * Event ID +// * Event ID // -// * Event name +// * Event name // +// * Event +// source // -// * Event source -// -// * Read only +// * Read only // -// * Resource name +// * Resource name // -// * Resource type +// * Resource type // -// * -// User name +// * User name // -// Lookup supports the following attributes for Insights events: +// Lookup +// supports the following attributes for Insights events: // -// * -// Event ID +// * Event ID // -// * Event name +// * Event +// name // -// * Event source +// * Event source // -// All attributes are optional. The -// default number of results returned is 50, with a maximum of 50 possible. The -// response includes a token that you can use to get the next page of results. The -// rate of lookup requests is limited to two per second per account. If this limit -// is exceeded, a throttling error occurs. +// All attributes are optional. The default number of results +// returned is 50, with a maximum of 50 possible. The response includes a token +// that you can use to get the next page of results. The rate of lookup requests is +// limited to two per second per account. If this limit is exceeded, a throttling +// error occurs. func (c *Client) LookupEvents(ctx context.Context, params *LookupEventsInput, optFns ...func(*Options)) (*LookupEventsOutput, error) { if params == nil { params = &LookupEventsInput{} diff --git a/service/cloudtrail/api_op_PutEventSelectors.go b/service/cloudtrail/api_op_PutEventSelectors.go index b2eded08264..87f2cce1e3c 100644 --- a/service/cloudtrail/api_op_PutEventSelectors.go +++ b/service/cloudtrail/api_op_PutEventSelectors.go @@ -20,27 +20,26 @@ import ( // event. If the event doesn't match any event selector, the trail doesn't log the // event. Example // -// * You create an event selector for a trail and specify that -// you want write-only events. +// * You create an event selector for a trail and specify that you +// want write-only events. // -// * The EC2 GetConsoleOutput and RunInstances API +// * The EC2 GetConsoleOutput and RunInstances API // operations occur in your account. // -// * CloudTrail evaluates whether the events +// * CloudTrail evaluates whether the events // match your event selectors. // -// * The RunInstances is a write-only event and it +// * The RunInstances is a write-only event and it // matches your event selector. The trail logs the event. // -// * The -// GetConsoleOutput is a read-only event but it doesn't match your event selector. -// The trail doesn't log the event. +// * The GetConsoleOutput +// is a read-only event but it doesn't match your event selector. The trail doesn't +// log the event. // -// The PutEventSelectors operation must be called -// from the region in which the trail was created; otherwise, an -// InvalidHomeRegionException is thrown. You can configure up to five event -// selectors for each trail. For more information, see Logging Data and Management -// Events for Trails +// The PutEventSelectors operation must be called from the region +// in which the trail was created; otherwise, an InvalidHomeRegionException is +// thrown. You can configure up to five event selectors for each trail. For more +// information, see Logging Data and Management Events for Trails // (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html) // and Limits in AWS CloudTrail // (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) @@ -71,23 +70,23 @@ type PutEventSelectorsInput struct { // Specifies the name of the trail or trail ARN. If you specify a trail name, the // string must meet the following requirements: // - // * Contain only ASCII letters - // (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) + // * Contain only ASCII letters (a-z, + // A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) // - // * - // Start with a letter or number, and end with a letter or number + // * Start with a + // letter or number, and end with a letter or number // - // * Be between - // 3 and 128 characters + // * Be between 3 and 128 + // characters // - // * Have no adjacent periods, underscores or dashes. - // Names like my-_namespace and my--namespace are invalid. + // * Have no adjacent periods, underscores or dashes. Names like + // my-_namespace and my--namespace are invalid. // - // * Not be in IP - // address format (for example, 192.168.5.4) + // * Not be in IP address format (for + // example, 192.168.5.4) // - // If you specify a trail ARN, it must - // be in the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // If you specify a trail ARN, it must be in the format: + // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail // // This member is required. TrailName *string diff --git a/service/cloudtrail/api_op_UpdateTrail.go b/service/cloudtrail/api_op_UpdateTrail.go index 81647892844..f989d57cc6e 100644 --- a/service/cloudtrail/api_op_UpdateTrail.go +++ b/service/cloudtrail/api_op_UpdateTrail.go @@ -37,23 +37,23 @@ type UpdateTrailInput struct { // Specifies the name of the trail or trail ARN. If Name is a trail name, the // string must meet the following requirements: // - // * Contain only ASCII letters - // (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) + // * Contain only ASCII letters (a-z, + // A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) // - // * - // Start with a letter or number, and end with a letter or number + // * Start with a + // letter or number, and end with a letter or number // - // * Be between - // 3 and 128 characters + // * Be between 3 and 128 + // characters // - // * Have no adjacent periods, underscores or dashes. - // Names like my-_namespace and my--namespace are invalid. + // * Have no adjacent periods, underscores or dashes. Names like + // my-_namespace and my--namespace are invalid. // - // * Not be in IP - // address format (for example, 192.168.5.4) + // * Not be in IP address format (for + // example, 192.168.5.4) // - // If Name is a trail ARN, it must be in - // the format: arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + // If Name is a trail ARN, it must be in the format: + // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail // // This member is required. Name *string @@ -106,16 +106,16 @@ type UpdateTrailInput struct { // alias, a fully specified ARN to a key, or a globally unique identifier. // Examples: // - // * alias/MyAliasName + // * alias/MyAliasName // - // * + // * // arn:aws:kms:us-east-2:123456789012:alias/MyAliasName // - // * + // * // arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012 // - // - // * 12345678-1234-1234-1234-123456789012 + // * + // 12345678-1234-1234-1234-123456789012 KmsKeyId *string // Specifies the name of the Amazon S3 bucket designated for publishing log files. diff --git a/service/cloudtrail/types/enums.go b/service/cloudtrail/types/enums.go index 5062b1af047..d53a5502c78 100644 --- a/service/cloudtrail/types/enums.go +++ b/service/cloudtrail/types/enums.go @@ -38,14 +38,14 @@ type LookupAttributeKey string // Enum values for LookupAttributeKey const ( - LookupAttributeKeyEvent_id LookupAttributeKey = "EventId" - LookupAttributeKeyEvent_name LookupAttributeKey = "EventName" - LookupAttributeKeyRead_only LookupAttributeKey = "ReadOnly" - LookupAttributeKeyUsername LookupAttributeKey = "Username" - LookupAttributeKeyResource_type LookupAttributeKey = "ResourceType" - LookupAttributeKeyResource_name LookupAttributeKey = "ResourceName" - LookupAttributeKeyEvent_source LookupAttributeKey = "EventSource" - LookupAttributeKeyAccess_key_id LookupAttributeKey = "AccessKeyId" + LookupAttributeKeyEventId LookupAttributeKey = "EventId" + LookupAttributeKeyEventName LookupAttributeKey = "EventName" + LookupAttributeKeyReadOnly LookupAttributeKey = "ReadOnly" + LookupAttributeKeyUsername LookupAttributeKey = "Username" + LookupAttributeKeyResourceType LookupAttributeKey = "ResourceType" + LookupAttributeKeyResourceName LookupAttributeKey = "ResourceName" + LookupAttributeKeyEventSource LookupAttributeKey = "EventSource" + LookupAttributeKeyAccessKeyId LookupAttributeKey = "AccessKeyId" ) // Values returns all known values for LookupAttributeKey. Note that this can be diff --git a/service/cloudtrail/types/errors.go b/service/cloudtrail/types/errors.go index 9e5af67469d..b3378cbbd26 100644 --- a/service/cloudtrail/types/errors.go +++ b/service/cloudtrail/types/errors.go @@ -247,16 +247,16 @@ func (e *InvalidEventCategoryException) ErrorFault() smithy.ErrorFault { return // can be distributed across event selectors, but the overall total cannot exceed // 250. You can: // -// * Specify a valid number of event selectors (1 to 5) for a +// * Specify a valid number of event selectors (1 to 5) for a // trail. // -// * Specify a valid number of data resources (1 to 250) for an event +// * Specify a valid number of data resources (1 to 250) for an event // selector. The limit of number of resources on an individual event selector is // configurable up to 250. However, this upper limit is allowed only if the total // number of data resources does not exceed 250 across all event selectors for a // trail. // -// * Specify a valid value for a parameter. For example, specifying the +// * Specify a valid value for a parameter. For example, specifying the // ReadWriteType parameter with a value of read-only is invalid. type InvalidEventSelectorsException struct { Message *string @@ -514,20 +514,20 @@ func (e *InvalidTokenException) ErrorFault() smithy.ErrorFault { return smithy.F // This exception is thrown when the provided trail name is not valid. Trail names // must meet the following requirements: // -// * Contain only ASCII letters (a-z, -// A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-) +// * Contain only ASCII letters (a-z, A-Z), +// numbers (0-9), periods (.), underscores (_), or dashes (-) // -// * Start -// with a letter or number, and end with a letter or number +// * Start with a +// letter or number, and end with a letter or number // -// * Be between 3 and -// 128 characters +// * Be between 3 and 128 +// characters // -// * Have no adjacent periods, underscores or dashes. Names -// like my-_namespace and my--namespace are invalid. +// * Have no adjacent periods, underscores or dashes. Names like +// my-_namespace and my--namespace are invalid. // -// * Not be in IP address -// format (for example, 192.168.5.4) +// * Not be in IP address format (for +// example, 192.168.5.4) type InvalidTrailNameException struct { Message *string } diff --git a/service/cloudtrail/types/types.go b/service/cloudtrail/types/types.go index 5f658e3a031..a569a34d1d3 100644 --- a/service/cloudtrail/types/types.go +++ b/service/cloudtrail/types/types.go @@ -17,37 +17,36 @@ import ( // bucket-1. In this example, the CloudTrail user specified an empty prefix, and // the option to log both Read and Write data events. // -// * A user uploads an -// image file to bucket-1. +// * A user uploads an image +// file to bucket-1. // -// * The PutObject API operation is an Amazon S3 -// object-level API. It is recorded as a data event in CloudTrail. Because the -// CloudTrail user specified an S3 bucket with an empty prefix, events that occur -// on any object in that bucket are logged. The trail processes and logs the -// event. +// * The PutObject API operation is an Amazon S3 object-level +// API. It is recorded as a data event in CloudTrail. Because the CloudTrail user +// specified an S3 bucket with an empty prefix, events that occur on any object in +// that bucket are logged. The trail processes and logs the event. // -// * A user uploads an object to an Amazon S3 bucket named -// arn:aws:s3:::bucket-2. +// * A user +// uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2. // -// * The PutObject API operation occurred for an object -// in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail -// doesn’t log the event. +// * The +// PutObject API operation occurred for an object in an S3 bucket that the +// CloudTrail user didn't specify for the trail. The trail doesn’t log the +// event. // -// The following example demonstrates how logging works -// when you configure logging of AWS Lambda data events for a Lambda function named -// MyLambdaFunction, but not for all AWS Lambda functions. +// The following example demonstrates how logging works when you configure +// logging of AWS Lambda data events for a Lambda function named MyLambdaFunction, +// but not for all AWS Lambda functions. // -// * A user runs a -// script that includes a call to the MyLambdaFunction function and the -// MyOtherLambdaFunction function. +// * A user runs a script that includes a +// call to the MyLambdaFunction function and the MyOtherLambdaFunction function. // -// * The Invoke API operation on -// MyLambdaFunction is an AWS Lambda API. It is recorded as a data event in -// CloudTrail. Because the CloudTrail user specified logging data events for -// MyLambdaFunction, any invocations of that function are logged. The trail -// processes and logs the event. +// * +// The Invoke API operation on MyLambdaFunction is an AWS Lambda API. It is +// recorded as a data event in CloudTrail. Because the CloudTrail user specified +// logging data events for MyLambdaFunction, any invocations of that function are +// logged. The trail processes and logs the event. // -// * The Invoke API operation on +// * The Invoke API operation on // MyOtherLambdaFunction is an AWS Lambda API. Because the CloudTrail user did not // specify logging data events for all Lambda functions, the Invoke operation for // MyOtherLambdaFunction does not match the function specified for the trail. The @@ -61,34 +60,33 @@ type DataResource struct { // An array of Amazon Resource Name (ARN) strings or partial ARN strings for the // specified objects. // - // * To log data events for all objects in all S3 buckets - // in your AWS account, specify the prefix as arn:aws:s3:::. This will also enable + // * To log data events for all objects in all S3 buckets in + // your AWS account, specify the prefix as arn:aws:s3:::. This will also enable // logging of data event activity performed by any user or role in your AWS // account, even if that activity is performed on a bucket that belongs to another // AWS account. // - // * To log data events for all objects in an S3 bucket, specify - // the bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail - // logs data events for all objects in this S3 bucket. + // * To log data events for all objects in an S3 bucket, specify the + // bucket and an empty object prefix such as arn:aws:s3:::bucket-1/. The trail logs + // data events for all objects in this S3 bucket. // - // * To log data events - // for specific objects, specify the S3 bucket and object prefix such as + // * To log data events for + // specific objects, specify the S3 bucket and object prefix such as // arn:aws:s3:::bucket-1/example-images. The trail logs data events for objects in // this S3 bucket that match the prefix. // - // * To log data events for all - // functions in your AWS account, specify the prefix as arn:aws:lambda. This will - // also enable logging of Invoke activity performed by any user or role in your AWS - // account, even if that activity is performed on a function that belongs to - // another AWS account. + // * To log data events for all functions in + // your AWS account, specify the prefix as arn:aws:lambda. This will also enable + // logging of Invoke activity performed by any user or role in your AWS account, + // even if that activity is performed on a function that belongs to another AWS + // account. // - // * To log data events for a specific Lambda function, - // specify the function ARN. Lambda function ARNs are exact. For example, if you - // specify a function ARN - // arn:aws:lambda:us-west-2:111111111111:function:helloworld, data events will only - // be logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld. They - // will not be logged for - // arn:aws:lambda:us-west-2:111111111111:function:helloworld2. + // * To log data events for a specific Lambda function, specify the + // function ARN. Lambda function ARNs are exact. For example, if you specify a + // function ARN arn:aws:lambda:us-west-2:111111111111:function:helloworld, data + // events will only be logged for + // arn:aws:lambda:us-west-2:111111111111:function:helloworld. They will not be + // logged for arn:aws:lambda:us-west-2:111111111111:function:helloworld2. Values []*string } diff --git a/service/cloudwatch/api_op_GetInsightRuleReport.go b/service/cloudwatch/api_op_GetInsightRuleReport.go index 243de924db0..da9c6654c96 100644 --- a/service/cloudwatch/api_op_GetInsightRuleReport.go +++ b/service/cloudwatch/api_op_GetInsightRuleReport.go @@ -17,33 +17,33 @@ import ( // group. You can also optionally return one or more statistics about each data // point in the time series. These statistics can include the following: // -// * +// * // UniqueContributors -- the number of unique contributors for each data point. // -// -// * MaxContributorValue -- the value of the top contributor for each data point. -// The identity of the contributor might change for each data point in the graph. -// If this rule aggregates by COUNT, the top contributor for each data point is the +// * +// MaxContributorValue -- the value of the top contributor for each data point. The +// identity of the contributor might change for each data point in the graph. If +// this rule aggregates by COUNT, the top contributor for each data point is the // contributor with the most occurrences in that period. If the rule aggregates by // SUM, the top contributor is the contributor with the highest sum in the log // field specified by the rule's Value, during that period. // -// * SampleCount -- -// the number of data points matched by the rule. +// * SampleCount -- the +// number of data points matched by the rule. // -// * Sum -- the sum of the -// values from all contributors during the time period represented by that data -// point. +// * Sum -- the sum of the values from +// all contributors during the time period represented by that data point. // -// * Minimum -- the minimum value from a single observation during the -// time period represented by that data point. +// * +// Minimum -- the minimum value from a single observation during the time period +// represented by that data point. // -// * Maximum -- the maximum value -// from a single observation during the time period represented by that data -// point. +// * Maximum -- the maximum value from a single +// observation during the time period represented by that data point. // -// * Average -- the average value from all contributors during the time -// period represented by that data point. +// * Average -- +// the average value from all contributors during the time period represented by +// that data point. func (c *Client) GetInsightRuleReport(ctx context.Context, params *GetInsightRuleReportInput, optFns ...func(*Options)) (*GetInsightRuleReportOutput, error) { if params == nil { params = &GetInsightRuleReportInput{} @@ -91,33 +91,33 @@ type GetInsightRuleReportInput struct { // Specifies which metrics to use for aggregation of contributor values for the // report. You can specify one or more of the following metrics: // - // * + // * // UniqueContributors -- the number of unique contributors for each data point. // - // - // * MaxContributorValue -- the value of the top contributor for each data point. - // The identity of the contributor might change for each data point in the graph. - // If this rule aggregates by COUNT, the top contributor for each data point is the + // * + // MaxContributorValue -- the value of the top contributor for each data point. The + // identity of the contributor might change for each data point in the graph. If + // this rule aggregates by COUNT, the top contributor for each data point is the // contributor with the most occurrences in that period. If the rule aggregates by // SUM, the top contributor is the contributor with the highest sum in the log // field specified by the rule's Value, during that period. // - // * SampleCount -- - // the number of data points matched by the rule. + // * SampleCount -- the + // number of data points matched by the rule. // - // * Sum -- the sum of the - // values from all contributors during the time period represented by that data - // point. + // * Sum -- the sum of the values from + // all contributors during the time period represented by that data point. // - // * Minimum -- the minimum value from a single observation during the - // time period represented by that data point. + // * + // Minimum -- the minimum value from a single observation during the time period + // represented by that data point. // - // * Maximum -- the maximum value - // from a single observation during the time period represented by that data - // point. + // * Maximum -- the maximum value from a single + // observation during the time period represented by that data point. // - // * Average -- the average value from all contributors during the time - // period represented by that data point. + // * Average -- + // the average value from all contributors during the time period represented by + // that data point. Metrics []*string // Determines what statistic to use to rank the contributors. Valid values are SUM diff --git a/service/cloudwatch/api_op_GetMetricData.go b/service/cloudwatch/api_op_GetMetricData.go index 29112a44349..38f2df93169 100644 --- a/service/cloudwatch/api_op_GetMetricData.go +++ b/service/cloudwatch/api_op_GetMetricData.go @@ -26,33 +26,32 @@ import ( // (https://aws.amazon.com/cloudwatch/pricing/). Amazon CloudWatch retains metric // data as follows: // -// * Data points with a period of less than 60 seconds are +// * Data points with a period of less than 60 seconds are // available for 3 hours. These data points are high-resolution metrics and are // available only for custom metrics that have been defined with a // StorageResolution of 1. // -// * Data points with a period of 60 seconds -// (1-minute) are available for 15 days. +// * Data points with a period of 60 seconds (1-minute) +// are available for 15 days. // -// * Data points with a period of 300 -// seconds (5-minute) are available for 63 days. +// * Data points with a period of 300 seconds +// (5-minute) are available for 63 days. // -// * Data points with a period -// of 3600 seconds (1 hour) are available for 455 days (15 months). +// * Data points with a period of 3600 +// seconds (1 hour) are available for 455 days (15 months). // -// Data points -// that are initially published with a shorter period are aggregated together for -// long-term storage. For example, if you collect data using a period of 1 minute, -// the data remains available for 15 days with 1-minute resolution. After 15 days, -// this data is still available, but is aggregated and retrievable only with a -// resolution of 5 minutes. After 63 days, the data is further aggregated and is -// available with a resolution of 1 hour. If you omit Unit in your request, all -// data that was collected with any unit is returned, along with the corresponding -// units that were specified when the data was reported to CloudWatch. If you -// specify a unit, the operation returns only data that was collected with that -// unit specified. If you specify a unit that does not match the data collected, -// the results of the operation are null. CloudWatch does not perform unit -// conversions. +// Data points that are +// initially published with a shorter period are aggregated together for long-term +// storage. For example, if you collect data using a period of 1 minute, the data +// remains available for 15 days with 1-minute resolution. After 15 days, this data +// is still available, but is aggregated and retrievable only with a resolution of +// 5 minutes. After 63 days, the data is further aggregated and is available with a +// resolution of 1 hour. If you omit Unit in your request, all data that was +// collected with any unit is returned, along with the corresponding units that +// were specified when the data was reported to CloudWatch. If you specify a unit, +// the operation returns only data that was collected with that unit specified. If +// you specify a unit that does not match the data collected, the results of the +// operation are null. CloudWatch does not perform unit conversions. func (c *Client) GetMetricData(ctx context.Context, params *GetMetricDataInput, optFns ...func(*Options)) (*GetMetricDataOutput, error) { if params == nil { params = &GetMetricDataInput{} @@ -92,29 +91,29 @@ type GetMetricDataInput struct { // is inclusive; results include data points with the specified time stamp. // CloudWatch rounds the specified time stamp as follows: // - // * Start time less - // than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 - // is rounded down to 12:32:00. + // * Start time less than + // 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is + // rounded down to 12:32:00. // - // * Start time between 15 and 63 days ago - - // Round down to the nearest 5-minute clock interval. For example, 12:32:34 is - // rounded down to 12:30:00. + // * Start time between 15 and 63 days ago - Round down + // to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to + // 12:30:00. // - // * Start time greater than 63 days ago - Round - // down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down - // to 12:00:00. + // * Start time greater than 63 days ago - Round down to the nearest + // 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00. // - // If you set Period to 5, 10, or 30, the start time of your request - // is rounded down to the nearest time that corresponds to even 5-, 10-, or - // 30-second divisions of a minute. For example, if you make a query at (HH:mm:ss) - // 01:05:23 for the previous 10-second period, the start time of your request is - // rounded down and you receive data from 01:05:10 to 01:05:20. If you make a query - // at 15:07:17 for the previous 5 minutes of data, using a period of 5 seconds, you - // receive data timestamped between 15:02:15 and 15:07:15. For better performance, - // specify StartTime and EndTime values that align with the value of the metric's - // Period and sync up with the beginning and end of an hour. For example, if the - // Period of a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get - // a faster response from CloudWatch than setting 12:07 or 12:29 as the StartTime. + // If + // you set Period to 5, 10, or 30, the start time of your request is rounded down + // to the nearest time that corresponds to even 5-, 10-, or 30-second divisions of + // a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the + // previous 10-second period, the start time of your request is rounded down and + // you receive data from 01:05:10 to 01:05:20. If you make a query at 15:07:17 for + // the previous 5 minutes of data, using a period of 5 seconds, you receive data + // timestamped between 15:02:15 and 15:07:15. For better performance, specify + // StartTime and EndTime values that align with the value of the metric's Period + // and sync up with the beginning and end of an hour. For example, if the Period of + // a metric is 5 minutes, specifying 12:05 or 12:30 as StartTime can get a faster + // response from CloudWatch than setting 12:07 or 12:29 as the StartTime. // // This member is required. StartTime *time.Time diff --git a/service/cloudwatch/api_op_GetMetricStatistics.go b/service/cloudwatch/api_op_GetMetricStatistics.go index dacf189cff4..50e72dfb13b 100644 --- a/service/cloudwatch/api_op_GetMetricStatistics.go +++ b/service/cloudwatch/api_op_GetMetricStatistics.go @@ -28,40 +28,39 @@ import ( // publish data using a statistic set instead, you can only retrieve percentile // statistics for this data if one of the following conditions is true: // -// * The +// * The // SampleCount value of the statistic set is 1. // -// * The Min and the Max values -// of the statistic set are equal. +// * The Min and the Max values of +// the statistic set are equal. // // Percentile statistics are not available for // metrics when any of the metric values are negative numbers. Amazon CloudWatch // retains metric data as follows: // -// * Data points with a period of less than 60 +// * Data points with a period of less than 60 // seconds are available for 3 hours. These data points are high-resolution metrics // and are available only for custom metrics that have been defined with a // StorageResolution of 1. // -// * Data points with a period of 60 seconds -// (1-minute) are available for 15 days. +// * Data points with a period of 60 seconds (1-minute) +// are available for 15 days. // -// * Data points with a period of 300 -// seconds (5-minute) are available for 63 days. +// * Data points with a period of 300 seconds +// (5-minute) are available for 63 days. // -// * Data points with a period -// of 3600 seconds (1 hour) are available for 455 days (15 months). +// * Data points with a period of 3600 +// seconds (1 hour) are available for 455 days (15 months). // -// Data points -// that are initially published with a shorter period are aggregated together for -// long-term storage. For example, if you collect data using a period of 1 minute, -// the data remains available for 15 days with 1-minute resolution. After 15 days, -// this data is still available, but is aggregated and retrievable only with a -// resolution of 5 minutes. After 63 days, the data is further aggregated and is -// available with a resolution of 1 hour. CloudWatch started retaining 5-minute and -// 1-hour metric data as of July 9, 2016. For information about metrics and -// dimensions supported by AWS services, see the Amazon CloudWatch Metrics and -// Dimensions Reference +// Data points that are +// initially published with a shorter period are aggregated together for long-term +// storage. For example, if you collect data using a period of 1 minute, the data +// remains available for 15 days with 1-minute resolution. After 15 days, this data +// is still available, but is aggregated and retrievable only with a resolution of +// 5 minutes. After 63 days, the data is further aggregated and is available with a +// resolution of 1 hour. CloudWatch started retaining 5-minute and 1-hour metric +// data as of July 9, 2016. For information about metrics and dimensions supported +// by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html) // in the Amazon CloudWatch User Guide. func (c *Client) GetMetricStatistics(ctx context.Context, params *GetMetricStatisticsInput, optFns ...func(*Options)) (*GetMetricStatisticsOutput, error) { @@ -108,15 +107,15 @@ type GetMetricStatisticsInput struct { // specifies a time stamp that is greater than 3 hours ago, you must specify the // period as follows or no data points in that time range is returned: // - // * Start + // * Start // time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 // minute). // - // * Start time between 15 and 63 days ago - Use a multiple of 300 + // * Start time between 15 and 63 days ago - Use a multiple of 300 // seconds (5 minutes). // - // * Start time greater than 63 days ago - Use a multiple - // of 3600 seconds (1 hour). + // * Start time greater than 63 days ago - Use a multiple of + // 3600 seconds (1 hour). // // This member is required. Period *int32 @@ -128,25 +127,24 @@ type GetMetricStatisticsInput struct { // example, 2016-10-03T23:00:00Z). CloudWatch rounds the specified time stamp as // follows: // - // * Start time less than 15 days ago - Round down to the nearest - // whole minute. For example, 12:32:34 is rounded down to 12:32:00. + // * Start time less than 15 days ago - Round down to the nearest whole + // minute. For example, 12:32:34 is rounded down to 12:32:00. // - // * Start - // time between 15 and 63 days ago - Round down to the nearest 5-minute clock - // interval. For example, 12:32:34 is rounded down to 12:30:00. + // * Start time between + // 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For + // example, 12:32:34 is rounded down to 12:30:00. // - // * Start time - // greater than 63 days ago - Round down to the nearest 1-hour clock interval. For - // example, 12:32:34 is rounded down to 12:00:00. + // * Start time greater than 63 + // days ago - Round down to the nearest 1-hour clock interval. For example, + // 12:32:34 is rounded down to 12:00:00. // - // If you set Period to 5, 10, or - // 30, the start time of your request is rounded down to the nearest time that - // corresponds to even 5-, 10-, or 30-second divisions of a minute. For example, if - // you make a query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the - // start time of your request is rounded down and you receive data from 01:05:10 to - // 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, - // using a period of 5 seconds, you receive data timestamped between 15:02:15 and - // 15:07:15. + // If you set Period to 5, 10, or 30, the + // start time of your request is rounded down to the nearest time that corresponds + // to even 5-, 10-, or 30-second divisions of a minute. For example, if you make a + // query at (HH:mm:ss) 01:05:23 for the previous 10-second period, the start time + // of your request is rounded down and you receive data from 01:05:10 to 01:05:20. + // If you make a query at 15:07:17 for the previous 5 minutes of data, using a + // period of 5 seconds, you receive data timestamped between 15:02:15 and 15:07:15. // // This member is required. StartTime *time.Time diff --git a/service/cloudwatch/api_op_GetMetricWidgetImage.go b/service/cloudwatch/api_op_GetMetricWidgetImage.go index 633e080f617..a9442601068 100644 --- a/service/cloudwatch/api_op_GetMetricWidgetImage.go +++ b/service/cloudwatch/api_op_GetMetricWidgetImage.go @@ -19,10 +19,10 @@ import ( // There is a limit of 20 transactions per second for this API. Each // GetMetricWidgetImage action has the following limits: // -// * As many as 100 -// metrics in the graph. +// * As many as 100 metrics +// in the graph. // -// * Up to 100 KB uncompressed payload. +// * Up to 100 KB uncompressed payload. func (c *Client) GetMetricWidgetImage(ctx context.Context, params *GetMetricWidgetImageInput, optFns ...func(*Options)) (*GetMetricWidgetImageOutput, error) { if params == nil { params = &GetMetricWidgetImageInput{} diff --git a/service/cloudwatch/api_op_PutCompositeAlarm.go b/service/cloudwatch/api_op_PutCompositeAlarm.go index 574c238a61b..2ca991ab017 100644 --- a/service/cloudwatch/api_op_PutCompositeAlarm.go +++ b/service/cloudwatch/api_op_PutCompositeAlarm.go @@ -68,47 +68,47 @@ type PutCompositeAlarmInput struct { // to reference the other alarms that are to be evaluated. Functions can include // the following: // - // * ALARM("alarm-name or alarm-ARN") is TRUE if the named - // alarm is in ALARM state. + // * ALARM("alarm-name or alarm-ARN") is TRUE if the named alarm is + // in ALARM state. // - // * OK("alarm-name or alarm-ARN") is TRUE if the - // named alarm is in OK state. + // * OK("alarm-name or alarm-ARN") is TRUE if the named alarm is + // in OK state. // - // * INSUFFICIENT_DATA("alarm-name or alarm-ARN") - // is TRUE if the named alarm is in INSUFFICIENT_DATA state. + // * INSUFFICIENT_DATA("alarm-name or alarm-ARN") is TRUE if the + // named alarm is in INSUFFICIENT_DATA state. // - // * TRUE always - // evaluates to TRUE. + // * TRUE always evaluates to TRUE. // - // * FALSE always evaluates to FALSE. + // * + // FALSE always evaluates to FALSE. // - // TRUE and FALSE are - // useful for testing a complex AlarmRule structure, and for testing your alarm - // actions. Alarm names specified in AlarmRule can be surrounded with double-quotes - // ("), but do not have to be. The following are some examples of AlarmRule: + // TRUE and FALSE are useful for testing a + // complex AlarmRule structure, and for testing your alarm actions. Alarm names + // specified in AlarmRule can be surrounded with double-quotes ("), but do not have + // to be. The following are some examples of AlarmRule: // - // * + // * // ALARM(CPUUtilizationTooHigh) AND ALARM(DiskReadOpsTooHigh) specifies that the // composite alarm goes into ALARM state only if both CPUUtilizationTooHigh and // DiskReadOpsTooHigh alarms are in ALARM state. // - // * - // ALARM(CPUUtilizationTooHigh) AND NOT ALARM(DeploymentInProgress) specifies that - // the alarm goes to ALARM state if CPUUtilizationTooHigh is in ALARM state and - // DeploymentInProgress is not in ALARM state. This example reduces alarm noise - // during a known deployment window. + // * ALARM(CPUUtilizationTooHigh) + // AND NOT ALARM(DeploymentInProgress) specifies that the alarm goes to ALARM state + // if CPUUtilizationTooHigh is in ALARM state and DeploymentInProgress is not in + // ALARM state. This example reduces alarm noise during a known deployment + // window. // - // * (ALARM(CPUUtilizationTooHigh) OR - // ALARM(DiskReadOpsTooHigh)) AND OK(NetworkOutTooHigh) goes into ALARM state if - // CPUUtilizationTooHigh OR DiskReadOpsTooHigh is in ALARM state, and if - // NetworkOutTooHigh is in OK state. This provides another example of using a - // composite alarm to prevent noise. This rule ensures that you are not notified - // with an alarm action on high CPU or disk usage if a known network problem is - // also occurring. + // * (ALARM(CPUUtilizationTooHigh) OR ALARM(DiskReadOpsTooHigh)) AND + // OK(NetworkOutTooHigh) goes into ALARM state if CPUUtilizationTooHigh OR + // DiskReadOpsTooHigh is in ALARM state, and if NetworkOutTooHigh is in OK state. + // This provides another example of using a composite alarm to prevent noise. This + // rule ensures that you are not notified with an alarm action on high CPU or disk + // usage if a known network problem is also occurring. // - // The AlarmRule can specify as many as 100 "children" alarms. The - // AlarmRule expression can have as many as 500 elements. Elements are child - // alarms, TRUE or FALSE statements, and parentheses. + // The AlarmRule can specify + // as many as 100 "children" alarms. The AlarmRule expression can have as many as + // 500 elements. Elements are child alarms, TRUE or FALSE statements, and + // parentheses. // // This member is required. AlarmRule *string diff --git a/service/cloudwatch/api_op_PutMetricAlarm.go b/service/cloudwatch/api_op_PutMetricAlarm.go index 8adad00c881..5edce246c6b 100644 --- a/service/cloudwatch/api_op_PutMetricAlarm.go +++ b/service/cloudwatch/api_op_PutMetricAlarm.go @@ -21,23 +21,23 @@ import ( // of the alarm. If you are an IAM user, you must have Amazon EC2 permissions for // some alarm operations: // -// * iam:CreateServiceLinkedRole for all alarms with -// EC2 actions +// * iam:CreateServiceLinkedRole for all alarms with EC2 +// actions // -// * ec2:DescribeInstanceStatus and ec2:DescribeInstances for all -// alarms on EC2 instance status metrics +// * ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms +// on EC2 instance status metrics // -// * ec2:StopInstances for alarms with -// stop actions +// * ec2:StopInstances for alarms with stop +// actions // -// * ec2:TerminateInstances for alarms with terminate actions +// * ec2:TerminateInstances for alarms with terminate actions // +// * No +// specific permissions are needed for alarms with recover actions // -// * No specific permissions are needed for alarms with recover actions -// -// If you -// have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you -// can still create an alarm, but the stop or terminate actions are not performed. +// If you have +// read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can +// still create an alarm, but the stop or terminate actions are not performed. // However, if you are later granted the required permissions, the alarm actions // that you created earlier are performed. If you are using an IAM role (for // example, an EC2 instance profile), you cannot stop or terminate the instance diff --git a/service/cloudwatch/api_op_PutMetricData.go b/service/cloudwatch/api_op_PutMetricData.go index e2169bf71eb..80e450421b4 100644 --- a/service/cloudwatch/api_op_PutMetricData.go +++ b/service/cloudwatch/api_op_PutMetricData.go @@ -45,11 +45,11 @@ import ( // publish data using a statistic set instead, you can only retrieve percentile // statistics for this data if one of the following conditions is true: // -// * The +// * The // SampleCount value of the statistic set is 1 and Min, Max, and Sum are all // equal. // -// * The Min and Max are equal, and Sum is equal to Min multiplied by +// * The Min and Max are equal, and Sum is equal to Min multiplied by // SampleCount. func (c *Client) PutMetricData(ctx context.Context, params *PutMetricDataInput, optFns ...func(*Options)) (*PutMetricDataOutput, error) { if params == nil { diff --git a/service/cloudwatch/types/enums.go b/service/cloudwatch/types/enums.go index cee6f4fbddc..5a0fc1c6999 100644 --- a/service/cloudwatch/types/enums.go +++ b/service/cloudwatch/types/enums.go @@ -24,9 +24,9 @@ type AnomalyDetectorStateValue string // Enum values for AnomalyDetectorStateValue const ( - AnomalyDetectorStateValuePending_training AnomalyDetectorStateValue = "PENDING_TRAINING" - AnomalyDetectorStateValueTrained_insufficient_data AnomalyDetectorStateValue = "TRAINED_INSUFFICIENT_DATA" - AnomalyDetectorStateValueTrained AnomalyDetectorStateValue = "TRAINED" + AnomalyDetectorStateValuePendingTraining AnomalyDetectorStateValue = "PENDING_TRAINING" + AnomalyDetectorStateValueTrainedInsufficientData AnomalyDetectorStateValue = "TRAINED_INSUFFICIENT_DATA" + AnomalyDetectorStateValueTrained AnomalyDetectorStateValue = "TRAINED" ) // Values returns all known values for AnomalyDetectorStateValue. Note that this @@ -108,8 +108,8 @@ type ScanBy string // Enum values for ScanBy const ( - ScanByTimestamp_descending ScanBy = "TimestampDescending" - ScanByTimestamp_ascending ScanBy = "TimestampAscending" + ScanByTimestampDescending ScanBy = "TimestampDescending" + ScanByTimestampAscending ScanBy = "TimestampAscending" ) // Values returns all known values for ScanBy. Note that this can be expanded in @@ -194,9 +194,9 @@ type StateValue string // Enum values for StateValue const ( - StateValueOk StateValue = "OK" - StateValueAlarm StateValue = "ALARM" - StateValueInsufficient_data StateValue = "INSUFFICIENT_DATA" + StateValueOk StateValue = "OK" + StateValueAlarm StateValue = "ALARM" + StateValueInsufficientData StateValue = "INSUFFICIENT_DATA" ) // Values returns all known values for StateValue. Note that this can be expanded @@ -238,9 +238,9 @@ type StatusCode string // Enum values for StatusCode const ( - StatusCodeComplete StatusCode = "Complete" - StatusCodeInternal_error StatusCode = "InternalError" - StatusCodePartial_data StatusCode = "PartialData" + StatusCodeComplete StatusCode = "Complete" + StatusCodeInternalError StatusCode = "InternalError" + StatusCodePartialData StatusCode = "PartialData" ) // Values returns all known values for StatusCode. Note that this can be expanded diff --git a/service/cloudwatch/types/types.go b/service/cloudwatch/types/types.go index 6a799510864..54a0a37d8ae 100644 --- a/service/cloudwatch/types/types.go +++ b/service/cloudwatch/types/types.go @@ -636,15 +636,15 @@ type MetricStat struct { // specifies a time stamp that is greater than 3 hours ago, you must specify the // period as follows or no data points in that time range is returned: // - // * Start + // * Start // time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 // minute). // - // * Start time between 15 and 63 days ago - Use a multiple of 300 + // * Start time between 15 and 63 days ago - Use a multiple of 300 // seconds (5 minutes). // - // * Start time greater than 63 days ago - Use a multiple - // of 3600 seconds (1 hour). + // * Start time greater than 63 days ago - Use a multiple of + // 3600 seconds (1 hour). // // This member is required. Period *int32 diff --git a/service/cloudwatchevents/api_op_PutTargets.go b/service/cloudwatchevents/api_op_PutTargets.go index 158567d3725..7c346ef053f 100644 --- a/service/cloudwatchevents/api_op_PutTargets.go +++ b/service/cloudwatchevents/api_op_PutTargets.go @@ -16,62 +16,61 @@ import ( // when a rule is triggered. You can configure the following as targets for // Events: // -// * EC2 instances +// * EC2 instances // -// * SSM Run Command +// * SSM Run Command // -// * SSM Automation +// * SSM Automation // -// * -// AWS Lambda functions +// * AWS Lambda +// functions // -// * Data streams in Amazon Kinesis Data Streams +// * Data streams in Amazon Kinesis Data Streams // -// * -// Data delivery streams in Amazon Kinesis Data Firehose +// * Data delivery +// streams in Amazon Kinesis Data Firehose // -// * Amazon ECS tasks +// * Amazon ECS tasks // +// * AWS Step +// Functions state machines // -// * AWS Step Functions state machines +// * AWS Batch jobs // -// * AWS Batch jobs +// * AWS CodeBuild projects // -// * AWS CodeBuild -// projects +// * +// Pipelines in AWS CodePipeline // -// * Pipelines in AWS CodePipeline +// * Amazon Inspector assessment templates // -// * Amazon Inspector assessment -// templates +// * Amazon +// SNS topics // -// * Amazon SNS topics +// * Amazon SQS queues, including FIFO queues // -// * Amazon SQS queues, including FIFO -// queues +// * The default event bus +// of another AWS account // -// * The default event bus of another AWS account +// * Amazon API Gateway REST APIs // -// * Amazon API -// Gateway REST APIs +// * Redshift Clusters to +// invoke Data API ExecuteStatement on // -// * Redshift Clusters to invoke Data API ExecuteStatement -// on -// -// Creating rules with built-in targets is supported only in the AWS Management -// Console. The built-in targets are EC2 CreateSnapshot API call, EC2 -// RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances -// API call. For some target types, PutTargets provides target-specific parameters. -// If the target is a Kinesis data stream, you can optionally specify which shard -// the event goes to by using the KinesisParameters argument. To invoke a command -// on multiple EC2 instances with one rule, you can use the RunCommandParameters -// field. To be able to make API calls against the resources that you own, Amazon -// EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS -// Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. -// For EC2 instances, Kinesis data streams, AWS Step Functions state machines and -// API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the -// RoleARN argument in PutTargets. For more information, see Authentication and -// Access Control +// Creating rules with built-in targets is +// supported only in the AWS Management Console. The built-in targets are EC2 +// CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API +// call, and EC2 TerminateInstances API call. For some target types, PutTargets +// provides target-specific parameters. If the target is a Kinesis data stream, you +// can optionally specify which shard the event goes to by using the +// KinesisParameters argument. To invoke a command on multiple EC2 instances with +// one rule, you can use the RunCommandParameters field. To be able to make API +// calls against the resources that you own, Amazon EventBridge (CloudWatch Events) +// needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, +// EventBridge relies on resource-based policies. For EC2 instances, Kinesis data +// streams, AWS Step Functions state machines and API Gateway REST APIs, +// EventBridge relies on IAM roles that you specify in the RoleARN argument in +// PutTargets. For more information, see Authentication and Access Control // (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html) // in the Amazon EventBridge User Guide. If another AWS account is in the same // region and has granted you permission (using PutPermission), you can send events @@ -94,32 +93,31 @@ import ( // are mutually exclusive and optional parameters of a target. When a rule is // triggered due to a matched event: // -// * If none of the following arguments are +// * If none of the following arguments are // specified for a target, then the entire event is passed to the target in JSON // format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which // case nothing from the event is passed to the target). // -// * If Input is -// specified in the form of valid JSON, then the matched event is overridden with -// this constant. -// -// * If InputPath is specified in the form of JSONPath (for -// example, $.detail), then only the part of the event specified in the path is -// passed to the target (for example, only the detail part of the event is -// passed). -// -// * If InputTransformer is specified, then one or more specified -// JSONPaths are extracted from the event and used as values in a template that you -// specify as the input to the target. -// -// When you specify InputPath or -// InputTransformer, you must use JSON dot notation, not bracket notation. When you -// add targets to a rule and the associated rule triggers soon after, new or -// updated targets might not be immediately invoked. Allow a short period of time -// for changes to take effect. This action can partially fail if too many requests -// are made at the same time. If that happens, FailedEntryCount is non-zero in the -// response and each entry in FailedEntries provides the ID of the failed target -// and the error code. +// * If Input is specified +// in the form of valid JSON, then the matched event is overridden with this +// constant. +// +// * If InputPath is specified in the form of JSONPath (for example, +// $.detail), then only the part of the event specified in the path is passed to +// the target (for example, only the detail part of the event is passed). +// +// * If +// InputTransformer is specified, then one or more specified JSONPaths are +// extracted from the event and used as values in a template that you specify as +// the input to the target. +// +// When you specify InputPath or InputTransformer, you +// must use JSON dot notation, not bracket notation. When you add targets to a rule +// and the associated rule triggers soon after, new or updated targets might not be +// immediately invoked. Allow a short period of time for changes to take effect. +// This action can partially fail if too many requests are made at the same time. +// If that happens, FailedEntryCount is non-zero in the response and each entry in +// FailedEntries provides the ID of the failed target and the error code. func (c *Client) PutTargets(ctx context.Context, params *PutTargetsInput, optFns ...func(*Options)) (*PutTargetsOutput, error) { if params == nil { params = &PutTargetsInput{} diff --git a/service/cloudwatchevents/doc.go b/service/cloudwatchevents/doc.go index 6c99936769a..22fb9e752f2 100644 --- a/service/cloudwatchevents/doc.go +++ b/service/cloudwatchevents/doc.go @@ -9,19 +9,18 @@ // them to targets to take action. You can also use rules to take action on a // predetermined schedule. For example, you can configure rules to: // -// * +// * // Automatically invoke an AWS Lambda function to update DNS entries when an event // notifies you that Amazon EC2 instance enters the running state. // -// * Direct +// * Direct // specific API records from AWS CloudTrail to an Amazon Kinesis data stream for // detailed analysis of potential security or availability risks. // -// * -// Periodically invoke a built-in target to create a snapshot of an Amazon EBS -// volume. +// * Periodically +// invoke a built-in target to create a snapshot of an Amazon EBS volume. // -// For more information about the features of Amazon EventBridge, see the -// Amazon EventBridge User Guide -// (https://docs.aws.amazon.com/eventbridge/latest/userguide). +// For more +// information about the features of Amazon EventBridge, see the Amazon EventBridge +// User Guide (https://docs.aws.amazon.com/eventbridge/latest/userguide). package cloudwatchevents diff --git a/service/cloudwatchevents/types/types.go b/service/cloudwatchevents/types/types.go index 53b0457f91d..f29410ea81a 100644 --- a/service/cloudwatchevents/types/types.go +++ b/service/cloudwatchevents/types/types.go @@ -228,32 +228,31 @@ type InputTransformer struct { // valid JSON. If InputTemplate is a JSON object (surrounded by curly braces), the // following restrictions apply: // - // * The placeholder cannot be used as an object + // * The placeholder cannot be used as an object // key. // - // * Object values cannot include quote marks. + // * Object values cannot include quote marks. // - // The following example - // shows the syntax for using InputPathsMap and InputTemplate. - // "InputTransformer": - // { + // The following example shows + // the syntax for using InputPathsMap and InputTemplate. "InputTransformer": + // + // { // - // "InputPathsMap": {"instance": - // "$.detail.instance","status": "$.detail.status"}, + // "InputPathsMap": {"instance": "$.detail.instance","status": + // "$.detail.status"}, // - // "InputTemplate": " is in - // state " + // "InputTemplate": " is in state " // - // } To have the InputTemplate include quote marks within a JSON string, - // escape each quote marks with a slash, as in the following example: - // "InputTransformer": + // } To have the + // InputTemplate include quote marks within a JSON string, escape each quote marks + // with a slash, as in the following example: "InputTransformer": // { // - // "InputPathsMap": {"instance": - // "$.detail.instance","status": "$.detail.status"}, // - // "InputTemplate": " is in - // state """ + // "InputPathsMap": {"instance": "$.detail.instance","status": + // "$.detail.status"}, + // + // "InputTemplate": " is in state """ // // } // diff --git a/service/cloudwatchlogs/api_op_CreateLogGroup.go b/service/cloudwatchlogs/api_op_CreateLogGroup.go index c097b27df99..7e624b52aae 100644 --- a/service/cloudwatchlogs/api_op_CreateLogGroup.go +++ b/service/cloudwatchlogs/api_op_CreateLogGroup.go @@ -14,19 +14,19 @@ import ( // groups per account. You must use the following guidelines when naming a log // group: // -// * Log group names must be unique within a region for an AWS -// account. +// * Log group names must be unique within a region for an AWS account. // -// * Log group names can be between 1 and 512 characters long. +// * +// Log group names can be between 1 and 512 characters long. // -// * -// Log group names consist of the following characters: a-z, A-Z, 0-9, '_' -// (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number -// sign) +// * Log group names +// consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' +// (hyphen), '/' (forward slash), '.' (period), and '#' (number sign) // -// When you create a log group, by default the log events in the log group -// never expire. To set a retention policy so that events expire and are deleted -// after a specified time, use PutRetentionPolicy +// When you +// create a log group, by default the log events in the log group never expire. To +// set a retention policy so that events expire and are deleted after a specified +// time, use PutRetentionPolicy // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html). // If you associate a AWS Key Management Service (AWS KMS) customer master key // (CMK) with the log group, ingested data is encrypted using the CMK. This diff --git a/service/cloudwatchlogs/api_op_CreateLogStream.go b/service/cloudwatchlogs/api_op_CreateLogStream.go index d56a1e17de4..16c3a225aa8 100644 --- a/service/cloudwatchlogs/api_op_CreateLogStream.go +++ b/service/cloudwatchlogs/api_op_CreateLogStream.go @@ -17,13 +17,13 @@ import ( // CreateLogStream operations, after which transactions are throttled. You must use // the following guidelines when naming a log stream: // -// * Log stream names must -// be unique within the log group. +// * Log stream names must be +// unique within the log group. // -// * Log stream names can be between 1 and 512 +// * Log stream names can be between 1 and 512 // characters long. // -// * The ':' (colon) and '*' (asterisk) characters are not +// * The ':' (colon) and '*' (asterisk) characters are not // allowed. func (c *Client) CreateLogStream(ctx context.Context, params *CreateLogStreamInput, optFns ...func(*Options)) (*CreateLogStreamOutput, error) { if params == nil { diff --git a/service/cloudwatchlogs/api_op_PutLogEvents.go b/service/cloudwatchlogs/api_op_PutLogEvents.go index b9358a6d8b8..735fc379b94 100644 --- a/service/cloudwatchlogs/api_op_PutLogEvents.go +++ b/service/cloudwatchlogs/api_op_PutLogEvents.go @@ -20,36 +20,35 @@ import ( // successful or one might be rejected. The batch of events must satisfy the // following constraints: // -// * The maximum batch size is 1,048,576 bytes. This -// size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for -// each log event. +// * The maximum batch size is 1,048,576 bytes. This size +// is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each +// log event. // -// * None of the log events in the batch can be more than 2 -// hours in the future. +// * None of the log events in the batch can be more than 2 hours in +// the future. // -// * None of the log events in the batch can be older -// than 14 days or older than the retention period of the log group. +// * None of the log events in the batch can be older than 14 days or +// older than the retention period of the log group. // -// * The log -// events in the batch must be in chronological order by their timestamp. The -// timestamp is the time the event occurred, expressed as the number of -// milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools for PowerShell and -// the AWS SDK for .NET, the timestamp is specified in .NET format: -// yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) +// * The log events in the batch +// must be in chronological order by their timestamp. The timestamp is the time the +// event occurred, expressed as the number of milliseconds after Jan 1, 1970 +// 00:00:00 UTC. (In AWS Tools for PowerShell and the AWS SDK for .NET, the +// timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, +// 2017-09-15T13:45:30.) // -// * A batch of log -// events in a single request cannot span more than 24 hours. Otherwise, the -// operation fails. +// * A batch of log events in a single request cannot span +// more than 24 hours. Otherwise, the operation fails. // -// * The maximum number of log events in a batch is 10,000. +// * The maximum number of log +// events in a batch is 10,000. // +// * There is a quota of 5 requests per second per +// log stream. Additional requests are throttled. This quota can't be changed. // -// * There is a quota of 5 requests per second per log stream. Additional requests -// are throttled. This quota can't be changed. -// -// If a call to PutLogEvents returns -// "UnrecognizedClientException" the most likely cause is an invalid AWS access key -// ID or secret key. +// If +// a call to PutLogEvents returns "UnrecognizedClientException" the most likely +// cause is an invalid AWS access key ID or secret key. func (c *Client) PutLogEvents(ctx context.Context, params *PutLogEventsInput, optFns ...func(*Options)) (*PutLogEventsOutput, error) { if params == nil { params = &PutLogEventsInput{} diff --git a/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go b/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go index 263e82b0e7c..9f0e9bad239 100644 --- a/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go +++ b/service/cloudwatchlogs/api_op_PutSubscriptionFilter.go @@ -19,26 +19,26 @@ import ( // the receiving service, they are Base64 encoded and compressed with the gzip // format. The following destinations are supported for subscription filters: // +// * An +// Amazon Kinesis stream belonging to the same account as the subscription filter, +// for same-account delivery. // -// * An Amazon Kinesis stream belonging to the same account as the subscription -// filter, for same-account delivery. +// * A logical destination that belongs to a different +// account, for cross-account delivery. // -// * A logical destination that belongs to -// a different account, for cross-account delivery. +// * An Amazon Kinesis Firehose delivery +// stream that belongs to the same account as the subscription filter, for +// same-account delivery. // -// * An Amazon Kinesis -// Firehose delivery stream that belongs to the same account as the subscription -// filter, for same-account delivery. +// * An AWS Lambda function that belongs to the same +// account as the subscription filter, for same-account delivery. // -// * An AWS Lambda function that belongs to -// the same account as the subscription filter, for same-account delivery. -// -// There -// can only be one subscription filter associated with a log group. If you are -// updating an existing filter, you must specify the correct name in filterName. -// Otherwise, the call fails because you cannot associate a second filter with a -// log group. To perform a PutSubscriptionFilter operation, you must also have the -// iam:PassRole permission. +// There can only +// be one subscription filter associated with a log group. If you are updating an +// existing filter, you must specify the correct name in filterName. Otherwise, the +// call fails because you cannot associate a second filter with a log group. To +// perform a PutSubscriptionFilter operation, you must also have the iam:PassRole +// permission. func (c *Client) PutSubscriptionFilter(ctx context.Context, params *PutSubscriptionFilterInput, optFns ...func(*Options)) (*PutSubscriptionFilterOutput, error) { if params == nil { params = &PutSubscriptionFilterInput{} @@ -59,19 +59,19 @@ type PutSubscriptionFilterInput struct { // The ARN of the destination to deliver matching log events to. Currently, the // supported destinations are: // - // * An Amazon Kinesis stream belonging to the - // same account as the subscription filter, for same-account delivery. + // * An Amazon Kinesis stream belonging to the same + // account as the subscription filter, for same-account delivery. // - // * A - // logical destination (specified using an ARN) belonging to a different account, - // for cross-account delivery. + // * A logical + // destination (specified using an ARN) belonging to a different account, for + // cross-account delivery. // - // * An Amazon Kinesis Firehose delivery stream - // belonging to the same account as the subscription filter, for same-account - // delivery. + // * An Amazon Kinesis Firehose delivery stream belonging + // to the same account as the subscription filter, for same-account delivery. // - // * An AWS Lambda function belonging to the same account as the - // subscription filter, for same-account delivery. + // * An + // AWS Lambda function belonging to the same account as the subscription filter, + // for same-account delivery. // // This member is required. DestinationArn *string diff --git a/service/cloudwatchlogs/doc.go b/service/cloudwatchlogs/doc.go index f8d09c99c47..7d764700549 100644 --- a/service/cloudwatchlogs/doc.go +++ b/service/cloudwatchlogs/doc.go @@ -9,7 +9,7 @@ // CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or CloudWatch Logs // SDK. You can use CloudWatch Logs to: // -// * Monitor logs from EC2 instances in +// * Monitor logs from EC2 instances in // real-time: You can use CloudWatch Logs to monitor applications and systems using // log data. For example, CloudWatch Logs can track the number of errors that occur // in your application logs and send you a notification whenever the rate of errors @@ -21,15 +21,15 @@ // are searching for is found, CloudWatch Logs reports the data to a CloudWatch // metric that you specify. // -// * Monitor AWS CloudTrail logged events: You can -// create alarms in CloudWatch and receive notifications of particular API activity -// as captured by CloudTrail. You can use the notification to perform +// * Monitor AWS CloudTrail logged events: You can create +// alarms in CloudWatch and receive notifications of particular API activity as +// captured by CloudTrail. You can use the notification to perform // troubleshooting. // -// * Archive log data: You can use CloudWatch Logs to store -// your log data in highly durable storage. You can change the log retention -// setting so that any log events older than this setting are automatically -// deleted. The CloudWatch Logs agent makes it easy to quickly send both rotated -// and non-rotated log data off of a host and into the log service. You can then -// access the raw log data when you need it. +// * Archive log data: You can use CloudWatch Logs to store your +// log data in highly durable storage. You can change the log retention setting so +// that any log events older than this setting are automatically deleted. The +// CloudWatch Logs agent makes it easy to quickly send both rotated and non-rotated +// log data off of a host and into the log service. You can then access the raw log +// data when you need it. package cloudwatchlogs diff --git a/service/cloudwatchlogs/types/enums.go b/service/cloudwatchlogs/types/enums.go index 46fd35455d0..338eb5bd51f 100644 --- a/service/cloudwatchlogs/types/enums.go +++ b/service/cloudwatchlogs/types/enums.go @@ -24,12 +24,12 @@ type ExportTaskStatusCode string // Enum values for ExportTaskStatusCode const ( - ExportTaskStatusCodeCancelled ExportTaskStatusCode = "CANCELLED" - ExportTaskStatusCodeCompleted ExportTaskStatusCode = "COMPLETED" - ExportTaskStatusCodeFailed ExportTaskStatusCode = "FAILED" - ExportTaskStatusCodePending ExportTaskStatusCode = "PENDING" - ExportTaskStatusCodePending_cancel ExportTaskStatusCode = "PENDING_CANCEL" - ExportTaskStatusCodeRunning ExportTaskStatusCode = "RUNNING" + ExportTaskStatusCodeCancelled ExportTaskStatusCode = "CANCELLED" + ExportTaskStatusCodeCompleted ExportTaskStatusCode = "COMPLETED" + ExportTaskStatusCodeFailed ExportTaskStatusCode = "FAILED" + ExportTaskStatusCodePending ExportTaskStatusCode = "PENDING" + ExportTaskStatusCodePendingCancel ExportTaskStatusCode = "PENDING_CANCEL" + ExportTaskStatusCodeRunning ExportTaskStatusCode = "RUNNING" ) // Values returns all known values for ExportTaskStatusCode. Note that this can be diff --git a/service/codeartifact/api_op_AssociateExternalConnection.go b/service/codeartifact/api_op_AssociateExternalConnection.go index 10a63a941ac..01f56b54044 100644 --- a/service/codeartifact/api_op_AssociateExternalConnection.go +++ b/service/codeartifact/api_op_AssociateExternalConnection.go @@ -39,22 +39,21 @@ type AssociateExternalConnectionInput struct { // The name of the external connection to add to the repository. The following // values are supported: // - // * public:npmjs - for the npm public repository. + // * public:npmjs - for the npm public repository. // + // * + // public:pypi - for the Python Package Index. // - // * public:pypi - for the Python Package Index. + // * public:maven-central - for Maven + // Central. // - // * public:maven-central - for - // Maven Central. + // * public:maven-googleandroid - for the Google Android repository. // - // * public:maven-googleandroid - for the Google Android - // repository. + // * + // public:maven-gradleplugins - for the Gradle plugins repository. // - // * public:maven-gradleplugins - for the Gradle plugins - // repository. - // - // * public:maven-commonsware - for the CommonsWare Android - // repository. + // * + // public:maven-commonsware - for the CommonsWare Android repository. // // This member is required. ExternalConnection *string diff --git a/service/codeartifact/api_op_CopyPackageVersions.go b/service/codeartifact/api_op_CopyPackageVersions.go index a69be08c641..1280425c478 100644 --- a/service/codeartifact/api_op_CopyPackageVersions.go +++ b/service/codeartifact/api_op_CopyPackageVersions.go @@ -42,13 +42,13 @@ type CopyPackageVersionsInput struct { // The format of the package that is copied. The valid package types are: // - // * - // npm: A Node Package Manager (npm) package. + // * npm: A + // Node Package Manager (npm) package. // - // * pypi: A Python Package Index - // (PyPI) package. + // * pypi: A Python Package Index (PyPI) + // package. // - // * maven: A Maven package that contains compiled code in a + // * maven: A Maven package that contains compiled code in a // distributable format, such as a JAR file. // // This member is required. @@ -83,14 +83,14 @@ type CopyPackageVersionsInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // A list of key-value pairs. The keys are package versions and the values are @@ -110,19 +110,19 @@ type CopyPackageVersionsOutput struct { // A map of package versions that failed to copy and their error codes. The // possible error codes are in the PackageVersionError data type. They are: // - // * + // * // ALREADY_EXISTS // - // * MISMATCHED_REVISION + // * MISMATCHED_REVISION // - // * MISMATCHED_STATUS + // * MISMATCHED_STATUS // - // * - // NOT_ALLOWED + // * NOT_ALLOWED // - // * NOT_FOUND + // * + // NOT_FOUND // - // * SKIPPED + // * SKIPPED FailedVersions map[string]*types.PackageVersionError // A list of the package versions that were successfully copied to your repository. diff --git a/service/codeartifact/api_op_DeletePackageVersions.go b/service/codeartifact/api_op_DeletePackageVersions.go index f130f9c9cb5..58f03b6200e 100644 --- a/service/codeartifact/api_op_DeletePackageVersions.go +++ b/service/codeartifact/api_op_DeletePackageVersions.go @@ -43,12 +43,12 @@ type DeletePackageVersionsInput struct { // The format of the package versions to delete. The valid values are: // - // * npm + // * npm // + // * + // pypi // - // * pypi - // - // * maven + // * maven // // This member is required. Format types.PackageFormat @@ -74,29 +74,29 @@ type DeletePackageVersionsInput struct { // The expected status of the package version to delete. Valid values are: // - // * + // * // Published // - // * Unfinished + // * Unfinished // - // * Unlisted + // * Unlisted // - // * Archived + // * Archived // - // * Disposed + // * Disposed ExpectedStatus types.PackageVersionStatus // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string } @@ -105,19 +105,18 @@ type DeletePackageVersionsOutput struct { // A PackageVersionError object that contains a map of errors codes for the deleted // package that failed. The possible error codes are: // - // * ALREADY_EXISTS + // * ALREADY_EXISTS // - // * + // * // MISMATCHED_REVISION // - // * MISMATCHED_STATUS + // * MISMATCHED_STATUS // - // * NOT_ALLOWED + // * NOT_ALLOWED // - // * - // NOT_FOUND + // * NOT_FOUND // - // * SKIPPED + // * SKIPPED FailedVersions map[string]*types.PackageVersionError // A list of the package versions that were successfully deleted. diff --git a/service/codeartifact/api_op_DescribePackageVersion.go b/service/codeartifact/api_op_DescribePackageVersion.go index a2c226c07b5..9fc05aec5f6 100644 --- a/service/codeartifact/api_op_DescribePackageVersion.go +++ b/service/codeartifact/api_op_DescribePackageVersion.go @@ -40,11 +40,11 @@ type DescribePackageVersionInput struct { // A format that specifies the type of the requested package version. The valid // values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven // // This member is required. Format types.PackageFormat @@ -71,14 +71,14 @@ type DescribePackageVersionInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string } diff --git a/service/codeartifact/api_op_DisposePackageVersions.go b/service/codeartifact/api_op_DisposePackageVersions.go index a80588a9aad..52a78cc4827 100644 --- a/service/codeartifact/api_op_DisposePackageVersions.go +++ b/service/codeartifact/api_op_DisposePackageVersions.go @@ -49,11 +49,11 @@ type DisposePackageVersionsInput struct { // A format that specifies the type of package versions you want to dispose. The // valid values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven // // This member is required. Format types.PackageFormat @@ -80,29 +80,29 @@ type DisposePackageVersionsInput struct { // The expected status of the package version to dispose. Valid values are: // - // * + // * // Published // - // * Unfinished + // * Unfinished // - // * Unlisted + // * Unlisted // - // * Archived + // * Archived // - // * Disposed + // * Disposed ExpectedStatus types.PackageVersionStatus // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The revisions of the package versions you want to dispose. @@ -114,19 +114,19 @@ type DisposePackageVersionsOutput struct { // A PackageVersionError object that contains a map of errors codes for the // disposed package versions that failed. The possible error codes are: // - // * + // * // ALREADY_EXISTS // - // * MISMATCHED_REVISION + // * MISMATCHED_REVISION // - // * MISMATCHED_STATUS + // * MISMATCHED_STATUS // - // * - // NOT_ALLOWED + // * NOT_ALLOWED // - // * NOT_FOUND + // * + // NOT_FOUND // - // * SKIPPED + // * SKIPPED FailedVersions map[string]*types.PackageVersionError // A list of the package versions that were successfully disposed. diff --git a/service/codeartifact/api_op_GetPackageVersionAsset.go b/service/codeartifact/api_op_GetPackageVersionAsset.go index 39723387d73..711ad665556 100644 --- a/service/codeartifact/api_op_GetPackageVersionAsset.go +++ b/service/codeartifact/api_op_GetPackageVersionAsset.go @@ -46,11 +46,11 @@ type GetPackageVersionAssetInput struct { // A format that specifies the type of the package version with the requested asset // file. The valid values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven // // This member is required. Format types.PackageFormat @@ -77,14 +77,14 @@ type GetPackageVersionAssetInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The name of the package version revision that contains the requested asset. diff --git a/service/codeartifact/api_op_GetPackageVersionReadme.go b/service/codeartifact/api_op_GetPackageVersionReadme.go index 1f777b27c9a..0a9c916f444 100644 --- a/service/codeartifact/api_op_GetPackageVersionReadme.go +++ b/service/codeartifact/api_op_GetPackageVersionReadme.go @@ -42,11 +42,11 @@ type GetPackageVersionReadmeInput struct { // A format that specifies the type of the package version with the requested // readme file. The valid values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven // // This member is required. Format types.PackageFormat @@ -73,14 +73,14 @@ type GetPackageVersionReadmeInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string } @@ -89,24 +89,24 @@ type GetPackageVersionReadmeOutput struct { // The format of the package with the requested readme file. Valid format types // are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven Format types.PackageFormat // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The name of the package that contains the returned readme file. diff --git a/service/codeartifact/api_op_GetRepositoryEndpoint.go b/service/codeartifact/api_op_GetRepositoryEndpoint.go index fd7e42fd32d..6df7fa8018f 100644 --- a/service/codeartifact/api_op_GetRepositoryEndpoint.go +++ b/service/codeartifact/api_op_GetRepositoryEndpoint.go @@ -14,11 +14,11 @@ import ( // Returns the endpoint of a repository for a specific package format. A repository // has one endpoint for each package format: // -// * npm +// * npm // -// * pypi +// * pypi // -// * maven +// * maven func (c *Client) GetRepositoryEndpoint(ctx context.Context, params *GetRepositoryEndpointInput, optFns ...func(*Options)) (*GetRepositoryEndpointOutput, error) { if params == nil { params = &GetRepositoryEndpointInput{} @@ -44,11 +44,11 @@ type GetRepositoryEndpointInput struct { // Returns which endpoint of a repository to return. A repository has one endpoint // for each package format: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven // // This member is required. Format types.PackageFormat diff --git a/service/codeartifact/api_op_ListPackageVersionAssets.go b/service/codeartifact/api_op_ListPackageVersionAssets.go index dcab88ee1e7..ced79fefcd2 100644 --- a/service/codeartifact/api_op_ListPackageVersionAssets.go +++ b/service/codeartifact/api_op_ListPackageVersionAssets.go @@ -40,13 +40,13 @@ type ListPackageVersionAssetsInput struct { // The format of the package that contains the returned package version assets. The // valid package types are: // - // * npm: A Node Package Manager (npm) package. + // * npm: A Node Package Manager (npm) package. // + // * pypi: + // A Python Package Index (PyPI) package. // - // * pypi: A Python Package Index (PyPI) package. - // - // * maven: A Maven package - // that contains compiled code in a distributable format, such as a JAR file. + // * maven: A Maven package that contains + // compiled code in a distributable format, such as a JAR file. // // This member is required. Format types.PackageFormat @@ -77,14 +77,14 @@ type ListPackageVersionAssetsInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The token for the next set of results. Use the value returned in the previous @@ -105,14 +105,14 @@ type ListPackageVersionAssetsOutput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // If there are additional results, this is the token for the next set of results. diff --git a/service/codeartifact/api_op_ListPackageVersionDependencies.go b/service/codeartifact/api_op_ListPackageVersionDependencies.go index c47bbdcb103..4b6545ca940 100644 --- a/service/codeartifact/api_op_ListPackageVersionDependencies.go +++ b/service/codeartifact/api_op_ListPackageVersionDependencies.go @@ -44,13 +44,13 @@ type ListPackageVersionDependenciesInput struct { // The format of the package with the requested dependencies. The valid package // types are: // - // * npm: A Node Package Manager (npm) package. + // * npm: A Node Package Manager (npm) package. // - // * pypi: A - // Python Package Index (PyPI) package. + // * pypi: A Python + // Package Index (PyPI) package. // - // * maven: A Maven package that contains - // compiled code in a distributable format, such as a JAR file. + // * maven: A Maven package that contains compiled + // code in a distributable format, such as a JAR file. // // This member is required. Format types.PackageFormat @@ -77,14 +77,14 @@ type ListPackageVersionDependenciesInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The token for the next set of results. Use the value returned in the previous @@ -102,24 +102,24 @@ type ListPackageVersionDependenciesOutput struct { // A format that specifies the type of the package that contains the returned // dependencies. The valid values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven Format types.PackageFormat // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The token for the next set of results. Use the value returned in the previous diff --git a/service/codeartifact/api_op_ListPackageVersions.go b/service/codeartifact/api_op_ListPackageVersions.go index b85f134ed52..4d452ceca6f 100644 --- a/service/codeartifact/api_op_ListPackageVersions.go +++ b/service/codeartifact/api_op_ListPackageVersions.go @@ -39,13 +39,13 @@ type ListPackageVersionsInput struct { // The format of the returned packages. The valid package types are: // - // * npm: A - // Node Package Manager (npm) package. + // * npm: A Node + // Package Manager (npm) package. // - // * pypi: A Python Package Index (PyPI) + // * pypi: A Python Package Index (PyPI) // package. // - // * maven: A Maven package that contains compiled code in a + // * maven: A Maven package that contains compiled code in a // distributable format, such as a JAR file. // // This member is required. @@ -71,14 +71,14 @@ type ListPackageVersionsInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The token for the next set of results. Use the value returned in the previous @@ -91,16 +91,16 @@ type ListPackageVersionsInput struct { // A string that specifies the status of the package versions to include in the // returned list. It can be one of the following: // - // * Published + // * Published // - // * - // Unfinished + // * Unfinished // - // * Unlisted + // * + // Unlisted // - // * Archived + // * Archived // - // * Disposed + // * Disposed Status types.PackageVersionStatus } @@ -108,35 +108,35 @@ type ListPackageVersionsOutput struct { // The default package version to display. This depends on the package format: // - // - // * For Maven and PyPI packages, it's the most recently published package + // * + // For Maven and PyPI packages, it's the most recently published package // version. // - // * For npm packages, it's the version referenced by the latest tag. - // If the latest tag is not set, it's the most recently published package version. + // * For npm packages, it's the version referenced by the latest tag. If + // the latest tag is not set, it's the most recently published package version. DefaultDisplayVersion *string // A format of the package. Valid package format values are: // - // * npm + // * npm // - // * - // pypi + // * pypi // - // * maven + // * + // maven Format types.PackageFormat // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // If there are additional results, this is the token for the next set of results. diff --git a/service/codeartifact/api_op_ListPackages.go b/service/codeartifact/api_op_ListPackages.go index 3ab40a55d28..00068e98b8e 100644 --- a/service/codeartifact/api_op_ListPackages.go +++ b/service/codeartifact/api_op_ListPackages.go @@ -48,14 +48,14 @@ type ListPackagesInput struct { // The format of the packages. The valid package types are: // - // * npm: A Node - // Package Manager (npm) package. + // * npm: A Node Package + // Manager (npm) package. // - // * pypi: A Python Package Index (PyPI) - // package. + // * pypi: A Python Package Index (PyPI) package. // - // * maven: A Maven package that contains compiled code in a - // distributable format, such as a JAR file. + // * maven: + // A Maven package that contains compiled code in a distributable format, such as a + // JAR file. Format types.PackageFormat // The maximum number of results to return per page. @@ -64,14 +64,14 @@ type ListPackagesInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The token for the next set of results. Use the value returned in the previous diff --git a/service/codeartifact/api_op_UpdatePackageVersionsStatus.go b/service/codeartifact/api_op_UpdatePackageVersionsStatus.go index 575a9a86bf7..0dd4c6518cb 100644 --- a/service/codeartifact/api_op_UpdatePackageVersionsStatus.go +++ b/service/codeartifact/api_op_UpdatePackageVersionsStatus.go @@ -38,11 +38,11 @@ type UpdatePackageVersionsStatusInput struct { // A format that specifies the type of the package with the statuses to update. The // valid values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven // // This member is required. Format types.PackageFormat @@ -81,14 +81,14 @@ type UpdatePackageVersionsStatusInput struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // A map of package versions and package version revisions. The map key is the diff --git a/service/codeartifact/doc.go b/service/codeartifact/doc.go index 922fbf217e9..7f0c8fefd35 100644 --- a/service/codeartifact/doc.go +++ b/service/codeartifact/doc.go @@ -13,8 +13,8 @@ // information in this guide to help you work with the following CodeArtifact // components: // -// * Repository: A CodeArtifact repository contains a set of -// package versions +// * Repository: A CodeArtifact repository contains a set of package +// versions // (https://docs.aws.amazon.com/codeartifact/latest/ug/welcome.html#welcome-concepts-package-version), // each of which maps to a set of assets, or files. Repositories are polyglot, so a // single repository can contain packages of any supported type. Each repository @@ -22,13 +22,13 @@ // CLI, the Maven CLI ( mvn ), and pip . You can create up to 100 repositories per // AWS account. // -// * Domain: Repositories are aggregated into a higher-level -// entity known as a domain. All package assets and metadata are stored in the -// domain, but are consumed through repositories. A given package asset, such as a -// Maven JAR file, is stored once per domain, no matter how many repositories it's -// present in. All of the assets and metadata in a domain are encrypted with the -// same customer master key (CMK) stored in AWS Key Management Service (AWS KMS). -// Each repository is a member of a single domain and can't be moved to a different +// * Domain: Repositories are aggregated into a higher-level entity +// known as a domain. All package assets and metadata are stored in the domain, but +// are consumed through repositories. A given package asset, such as a Maven JAR +// file, is stored once per domain, no matter how many repositories it's present +// in. All of the assets and metadata in a domain are encrypted with the same +// customer master key (CMK) stored in AWS Key Management Service (AWS KMS). Each +// repository is a member of a single domain and can't be moved to a different // domain. The domain allows organizational policy to be applied across multiple // repositories, such as which accounts can access repositories in the domain, and // which public repositories can be used as sources of packages. Although an @@ -36,159 +36,157 @@ // that contains all published artifacts so that teams can find and share packages // across their organization. // -// * Package: A package is a bundle of software and -// the metadata required to resolve dependencies and install the software. -// CodeArtifact supports npm +// * Package: A package is a bundle of software and the +// metadata required to resolve dependencies and install the software. CodeArtifact +// supports npm // (https://docs.aws.amazon.com/codeartifact/latest/ug/using-npm.html), PyPI // (https://docs.aws.amazon.com/codeartifact/latest/ug/using-python.html), and // Maven (https://docs.aws.amazon.com/codeartifact/latest/ug/using-maven) package // formats. // -// In CodeArtifact, a package consists of: +// In CodeArtifact, a package consists of: // -// * A name (for -// example, webpack is the name of a popular npm package) +// * A name (for example, +// webpack is the name of a popular npm package) // -// * An optional -// namespace (for example, @types in @types/node) +// * An optional namespace (for +// example, @types in @types/node) // -// * A set of versions (for -// example, 1.0.0, 1.0.1, 1.0.2, etc.) +// * A set of versions (for example, 1.0.0, 1.0.1, +// 1.0.2, etc.) // -// * Package-level metadata (for -// example, npm tags) +// * Package-level metadata (for example, npm tags) // -// * Package version: A version of a package, such as -// @types/node 12.6.9. The version number format and semantics vary for different -// package formats. For example, npm package versions must conform to the Semantic -// Versioning specification (https://semver.org/). In CodeArtifact, a package -// version consists of the version identifier, metadata at the package version -// level, and a set of assets. +// * Package +// version: A version of a package, such as @types/node 12.6.9. The version number +// format and semantics vary for different package formats. For example, npm +// package versions must conform to the Semantic Versioning specification +// (https://semver.org/). In CodeArtifact, a package version consists of the +// version identifier, metadata at the package version level, and a set of +// assets. // -// * Upstream repository: One repository is -// upstream of another when the package versions in it can be accessed from the -// repository endpoint of the downstream repository, effectively merging the -// contents of the two repositories from the point of view of a client. -// CodeArtifact allows creating an upstream relationship between two -// repositories. +// * Upstream repository: One repository is upstream of another when the +// package versions in it can be accessed from the repository endpoint of the +// downstream repository, effectively merging the contents of the two repositories +// from the point of view of a client. CodeArtifact allows creating an upstream +// relationship between two repositories. // -// * Asset: An individual file stored in CodeArtifact associated -// with a package version, such as an npm .tgz file or Maven POM and JAR -// files. +// * Asset: An individual file stored in +// CodeArtifact associated with a package version, such as an npm .tgz file or +// Maven POM and JAR files. // // CodeArtifact supports these operations: // -// * +// * // AssociateExternalConnection: Adds an existing external connection to a // repository. // -// * CopyPackageVersions: Copies package versions from one -// repository to another repository in the same domain. -// -// * CreateDomain: -// Creates a domain +// * CopyPackageVersions: Copies package versions from one repository +// to another repository in the same domain. // -// * CreateRepository: Creates a CodeArtifact repository in a -// domain. +// * CreateDomain: Creates a domain // -// * DeleteDomain: Deletes a domain. You cannot delete a domain that -// contains repositories. +// * +// CreateRepository: Creates a CodeArtifact repository in a domain. // -// * DeleteDomainPermissionsPolicy: Deletes the -// resource policy that is set on a domain. +// * +// DeleteDomain: Deletes a domain. You cannot delete a domain that contains +// repositories. // -// * DeletePackageVersions: Deletes -// versions of a package. After a package has been deleted, it can be republished, -// but its assets and metadata cannot be restored because they have been -// permanently removed from storage. +// * DeleteDomainPermissionsPolicy: Deletes the resource policy that +// is set on a domain. // -// * DeleteRepository: Deletes a -// repository. +// * DeletePackageVersions: Deletes versions of a package. +// After a package has been deleted, it can be republished, but its assets and +// metadata cannot be restored because they have been permanently removed from +// storage. // -// * DeleteRepositoryPermissionsPolicy: Deletes the resource -// policy that is set on a repository. +// * DeleteRepository: Deletes a repository. // -// * DescribeDomain: Returns a -// DomainDescription object that contains information about the requested domain. +// * +// DeleteRepositoryPermissionsPolicy: Deletes the resource policy that is set on a +// repository. // +// * DescribeDomain: Returns a DomainDescription object that contains +// information about the requested domain. // -// * DescribePackageVersion: Returns a PackageVersionDescription +// * DescribePackageVersion: Returns a +// PackageVersionDescription // (https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionDescription.html) // object that contains details about a package version. // -// * DescribeRepository: +// * DescribeRepository: // Returns a RepositoryDescription object that contains detailed information about // the requested repository. // -// * DisposePackageVersions: Disposes versions of a +// * DisposePackageVersions: Disposes versions of a // package. A package version with the status Disposed cannot be restored because // they have been permanently removed from storage. // -// * +// * // DisassociateExternalConnection: Removes an existing external connection from a // repository. // -// * GetAuthorizationToken: Generates a temporary authorization -// token for accessing repositories in the domain. The token expires the -// authorization period has passed. The default authorization period is 12 hours -// and can be customized to any length with a maximum of 12 hours. +// * GetAuthorizationToken: Generates a temporary authorization token +// for accessing repositories in the domain. The token expires the authorization +// period has passed. The default authorization period is 12 hours and can be +// customized to any length with a maximum of 12 hours. // -// * +// * // GetDomainPermissionsPolicy: Returns the policy of a resource that is attached to // the specified domain. // -// * GetPackageVersionAsset: Returns the contents of an +// * GetPackageVersionAsset: Returns the contents of an // asset that is in a package version. // -// * GetPackageVersionReadme: Gets the -// readme file or descriptive text for a package version. -// -// * -// GetRepositoryEndpoint: Returns the endpoint of a repository for a specific -// package format. A repository has one endpoint for each package format: +// * GetPackageVersionReadme: Gets the readme +// file or descriptive text for a package version. // +// * GetRepositoryEndpoint: +// Returns the endpoint of a repository for a specific package format. A repository +// has one endpoint for each package format: // // * npm // -// * pypi +// * pypi // -// * maven +// * maven // -// * GetRepositoryPermissionsPolicy: -// Returns the resource policy that is set on a repository. +// * +// GetRepositoryPermissionsPolicy: Returns the resource policy that is set on a +// repository. // -// * ListDomains: -// Returns a list of DomainSummary objects. Each returned DomainSummary object -// contains information about a domain. +// * ListDomains: Returns a list of DomainSummary objects. Each +// returned DomainSummary object contains information about a domain. // -// * ListPackages: Lists the packages in -// a repository. +// * +// ListPackages: Lists the packages in a repository. // -// * ListPackageVersionAssets: Lists the assets for a given -// package version. +// * ListPackageVersionAssets: +// Lists the assets for a given package version. // -// * ListPackageVersionDependencies: Returns a list of the -// direct dependencies for a package version. +// * ListPackageVersionDependencies: +// Returns a list of the direct dependencies for a package version. // -// * ListPackageVersions: Returns a -// list of package versions for a specified package in a repository. +// * +// ListPackageVersions: Returns a list of package versions for a specified package +// in a repository. // -// * -// ListRepositories: Returns a list of repositories owned by the AWS account that -// called this method. +// * ListRepositories: Returns a list of repositories owned by +// the AWS account that called this method. // -// * ListRepositoriesInDomain: Returns a list of the -// repositories in a domain. +// * ListRepositoriesInDomain: Returns a +// list of the repositories in a domain. // -// * PutDomainPermissionsPolicy: Attaches a resource -// policy to a domain. +// * PutDomainPermissionsPolicy: Attaches a +// resource policy to a domain. // -// * PutRepositoryPermissionsPolicy: Sets the resource -// policy on a repository that specifies permissions to access it. +// * PutRepositoryPermissionsPolicy: Sets the +// resource policy on a repository that specifies permissions to access it. // -// * +// * // UpdatePackageVersionsStatus: Updates the status of one or more versions of a // package. // -// * UpdateRepository: Updates the properties of a repository. +// * UpdateRepository: Updates the properties of a repository. package codeartifact diff --git a/service/codeartifact/types/enums.go b/service/codeartifact/types/enums.go index 0cf281f3397..cb347a03be4 100644 --- a/service/codeartifact/types/enums.go +++ b/service/codeartifact/types/enums.go @@ -82,12 +82,12 @@ type PackageVersionErrorCode string // Enum values for PackageVersionErrorCode const ( - PackageVersionErrorCodeAlready_exists PackageVersionErrorCode = "ALREADY_EXISTS" - PackageVersionErrorCodeMismatched_revision PackageVersionErrorCode = "MISMATCHED_REVISION" - PackageVersionErrorCodeMismatched_status PackageVersionErrorCode = "MISMATCHED_STATUS" - PackageVersionErrorCodeNot_allowed PackageVersionErrorCode = "NOT_ALLOWED" - PackageVersionErrorCodeNot_found PackageVersionErrorCode = "NOT_FOUND" - PackageVersionErrorCodeSkipped PackageVersionErrorCode = "SKIPPED" + PackageVersionErrorCodeAlreadyExists PackageVersionErrorCode = "ALREADY_EXISTS" + PackageVersionErrorCodeMismatchedRevision PackageVersionErrorCode = "MISMATCHED_REVISION" + PackageVersionErrorCodeMismatchedStatus PackageVersionErrorCode = "MISMATCHED_STATUS" + PackageVersionErrorCodeNotAllowed PackageVersionErrorCode = "NOT_ALLOWED" + PackageVersionErrorCodeNotFound PackageVersionErrorCode = "NOT_FOUND" + PackageVersionErrorCodeSkipped PackageVersionErrorCode = "SKIPPED" ) // Values returns all known values for PackageVersionErrorCode. Note that this can @@ -108,7 +108,7 @@ type PackageVersionSortType string // Enum values for PackageVersionSortType const ( - PackageVersionSortTypePublished_time PackageVersionSortType = "PUBLISHED_TIME" + PackageVersionSortTypePublishedTime PackageVersionSortType = "PUBLISHED_TIME" ) // Values returns all known values for PackageVersionSortType. Note that this can @@ -150,11 +150,11 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeDomain ResourceType = "domain" - ResourceTypeRepository ResourceType = "repository" - ResourceTypePackage ResourceType = "package" - ResourceTypePackage_version ResourceType = "package-version" - ResourceTypeAsset ResourceType = "asset" + ResourceTypeDomain ResourceType = "domain" + ResourceTypeRepository ResourceType = "repository" + ResourceTypePackage ResourceType = "package" + ResourceTypePackageVersion ResourceType = "package-version" + ResourceTypeAsset ResourceType = "asset" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -174,11 +174,11 @@ type ValidationExceptionReason string // Enum values for ValidationExceptionReason const ( - ValidationExceptionReasonCannot_parse ValidationExceptionReason = "CANNOT_PARSE" - ValidationExceptionReasonEncryption_key_error ValidationExceptionReason = "ENCRYPTION_KEY_ERROR" - ValidationExceptionReasonField_validation_failed ValidationExceptionReason = "FIELD_VALIDATION_FAILED" - ValidationExceptionReasonUnknown_operation ValidationExceptionReason = "UNKNOWN_OPERATION" - ValidationExceptionReasonOther ValidationExceptionReason = "OTHER" + ValidationExceptionReasonCannotParse ValidationExceptionReason = "CANNOT_PARSE" + ValidationExceptionReasonEncryptionKeyError ValidationExceptionReason = "ENCRYPTION_KEY_ERROR" + ValidationExceptionReasonFieldValidationFailed ValidationExceptionReason = "FIELD_VALIDATION_FAILED" + ValidationExceptionReasonUnknownOperation ValidationExceptionReason = "UNKNOWN_OPERATION" + ValidationExceptionReasonOther ValidationExceptionReason = "OTHER" ) // Values returns all known values for ValidationExceptionReason. Note that this diff --git a/service/codeartifact/types/types.go b/service/codeartifact/types/types.go index 2f151ecbea4..7e08d98c2be 100644 --- a/service/codeartifact/types/types.go +++ b/service/codeartifact/types/types.go @@ -48,10 +48,9 @@ type DomainDescription struct { // The current status of a domain. The valid values are // - // * Active + // * Active // - // * - // Deleted + // * Deleted Status DomainStatus } @@ -79,10 +78,10 @@ type DomainSummary struct { // A string that contains the status of the domain. The valid values are: // - // * + // * // Active // - // * Deleted + // * Deleted Status DomainStatus } @@ -107,14 +106,14 @@ type PackageDependency struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The name of the package that this package depends on. @@ -134,24 +133,24 @@ type PackageSummary struct { // The format of the package. Valid values are: // - // * npm + // * npm // - // * pypi + // * pypi // - // * maven + // * maven Format PackageFormat // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The name of the package. @@ -168,13 +167,13 @@ type PackageVersionDescription struct { // The format of the package version. The valid package formats are: // - // * npm: A - // Node Package Manager (npm) package. + // * npm: A Node + // Package Manager (npm) package. // - // * pypi: A Python Package Index (PyPI) + // * pypi: A Python Package Index (PyPI) // package. // - // * maven: A Maven package that contains compiled code in a + // * maven: A Maven package that contains compiled code in a // distributable format, such as a JAR file. Format PackageFormat @@ -187,14 +186,14 @@ type PackageVersionDescription struct { // The namespace of the package. The package component that specifies its namespace // depends on its type. For example: // - // * The namespace of a Maven package is its + // * The namespace of a Maven package is its // groupId. // - // * The namespace of an npm package is its scope. + // * The namespace of an npm package is its scope. // - // * A Python - // package does not contain a corresponding component, so Python packages do not - // have a namespace. + // * A Python package + // does not contain a corresponding component, so Python packages do not have a + // namespace. Namespace *string // The name of the requested package. @@ -213,14 +212,13 @@ type PackageVersionDescription struct { // A string that contains the status of the package version. It can be one of the // following: // - // * Published + // * Published // - // * Unfinished + // * Unfinished // - // * Unlisted - // - // * Archived + // * Unlisted // + // * Archived // // * Disposed Status PackageVersionStatus @@ -239,19 +237,19 @@ type PackageVersionError struct { // The error code associated with the error. Valid error codes are: // - // * + // * // ALREADY_EXISTS // - // * MISMATCHED_REVISION + // * MISMATCHED_REVISION // - // * MISMATCHED_STATUS + // * MISMATCHED_STATUS // - // * - // NOT_ALLOWED + // * NOT_ALLOWED // - // * NOT_FOUND + // * + // NOT_FOUND // - // * SKIPPED + // * SKIPPED ErrorCode PackageVersionErrorCode // The error message associated with the error. @@ -267,14 +265,13 @@ type PackageVersionSummary struct { // A string that contains the status of the package version. It can be one of the // following: // - // * Published - // - // * Unfinished + // * Published // - // * Unlisted + // * Unfinished // - // * Archived + // * Unlisted // + // * Archived // // * Disposed // @@ -337,13 +334,13 @@ type RepositoryExternalConnectionInfo struct { // The package format associated with a repository's external connection. The valid // package formats are: // - // * npm: A Node Package Manager (npm) package. + // * npm: A Node Package Manager (npm) package. // - // * - // pypi: A Python Package Index (PyPI) package. + // * pypi: A + // Python Package Index (PyPI) package. // - // * maven: A Maven package that - // contains compiled code in a distributable format, such as a JAR file. + // * maven: A Maven package that contains + // compiled code in a distributable format, such as a JAR file. PackageFormat PackageFormat // The status of the external connection of a repository. There is one valid value, @@ -399,16 +396,16 @@ type SuccessfulPackageVersionInfo struct { // The status of a package version. Valid statuses are: // - // * Published + // * Published // - // * + // * // Unfinished // - // * Unlisted + // * Unlisted // - // * Archived + // * Archived // - // * Disposed + // * Disposed Status PackageVersionStatus } diff --git a/service/codebuild/api_op_CreateProject.go b/service/codebuild/api_op_CreateProject.go index 8e4c348612a..feea145303f 100644 --- a/service/codebuild/api_op_CreateProject.go +++ b/service/codebuild/api_op_CreateProject.go @@ -104,28 +104,28 @@ type CreateProjectInput struct { // A version of the build input to be built for this project. If not specified, the // latest version is used. If specified, it must be one of: // - // * For AWS - // CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: + // the commit ID, branch, or Git tag to use. // - // * For GitHub: the - // commit ID, pull request ID, branch name, or tag name that corresponds to the - // version of the source code you want to build. If a pull request ID is specified, - // it must use the format pr/pull-request-ID (for example pr/25). If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the default + // * For GitHub: the commit ID, pull + // request ID, branch name, or tag name that corresponds to the version of the + // source code you want to build. If a pull request ID is specified, it must use + // the format pr/pull-request-ID (for example pr/25). If a branch name is + // specified, the branch's HEAD commit ID is used. If not specified, the default // branch's HEAD commit ID is used. // - // * For Bitbucket: the commit ID, branch - // name, or tag name that corresponds to the version of the source code you want to + // * For Bitbucket: the commit ID, branch name, + // or tag name that corresponds to the version of the source code you want to // build. If a branch name is specified, the branch's HEAD commit ID is used. If // not specified, the default branch's HEAD commit ID is used. // - // * For Amazon - // Simple Storage Service (Amazon S3): the version ID of the object that represents - // the build input ZIP file to use. + // * For Amazon Simple + // Storage Service (Amazon S3): the version ID of the object that represents the + // build input ZIP file to use. // - // If sourceVersion is specified at the build - // level, then that version takes precedence over this sourceVersion (at the - // project level). For more information, see Source Version Sample with CodeBuild + // If sourceVersion is specified at the build level, + // then that version takes precedence over this sourceVersion (at the project + // level). For more information, see Source Version Sample with CodeBuild // (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) // in the AWS CodeBuild User Guide. SourceVersion *string diff --git a/service/codebuild/api_op_ListBuildBatches.go b/service/codebuild/api_op_ListBuildBatches.go index 71a0848c295..866bd9b8e34 100644 --- a/service/codebuild/api_op_ListBuildBatches.go +++ b/service/codebuild/api_op_ListBuildBatches.go @@ -42,12 +42,11 @@ type ListBuildBatchesInput struct { // Specifies the sort order of the returned items. Valid values include: // - // * + // * // ASCENDING: List the batch build identifiers in ascending order by identifier. // - // - // * DESCENDING: List the batch build identifiers in descending order by - // identifier. + // * + // DESCENDING: List the batch build identifiers in descending order by identifier. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListBuildBatchesForProject.go b/service/codebuild/api_op_ListBuildBatchesForProject.go index d4ee7a71be2..576664e4b6c 100644 --- a/service/codebuild/api_op_ListBuildBatchesForProject.go +++ b/service/codebuild/api_op_ListBuildBatchesForProject.go @@ -45,12 +45,11 @@ type ListBuildBatchesForProjectInput struct { // Specifies the sort order of the returned items. Valid values include: // - // * + // * // ASCENDING: List the batch build identifiers in ascending order by identifier. // - // - // * DESCENDING: List the batch build identifiers in descending order by - // identifier. + // * + // DESCENDING: List the batch build identifiers in descending order by identifier. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListBuilds.go b/service/codebuild/api_op_ListBuilds.go index 68bb1a93eed..c1ef56d81c1 100644 --- a/service/codebuild/api_op_ListBuilds.go +++ b/service/codebuild/api_op_ListBuilds.go @@ -39,11 +39,11 @@ type ListBuildsInput struct { // The order to list build IDs. Valid values include: // - // * ASCENDING: List the - // build IDs in ascending order by build ID. + // * ASCENDING: List the build + // IDs in ascending order by build ID. // - // * DESCENDING: List the build IDs - // in descending order by build ID. + // * DESCENDING: List the build IDs in + // descending order by build ID. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListBuildsForProject.go b/service/codebuild/api_op_ListBuildsForProject.go index 5fb5c9005e8..715a01ec741 100644 --- a/service/codebuild/api_op_ListBuildsForProject.go +++ b/service/codebuild/api_op_ListBuildsForProject.go @@ -45,11 +45,11 @@ type ListBuildsForProjectInput struct { // The order to list build IDs. Valid values include: // - // * ASCENDING: List the - // build IDs in ascending order by build ID. + // * ASCENDING: List the build + // IDs in ascending order by build ID. // - // * DESCENDING: List the build IDs - // in descending order by build ID. + // * DESCENDING: List the build IDs in + // descending order by build ID. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListProjects.go b/service/codebuild/api_op_ListProjects.go index e914d47c3af..48f1e2955f5 100644 --- a/service/codebuild/api_op_ListProjects.go +++ b/service/codebuild/api_op_ListProjects.go @@ -40,30 +40,29 @@ type ListProjectsInput struct { // The criterion to be used to list build project names. Valid values include: // + // * + // CREATED_TIME: List based on when each build project was created. // - // * CREATED_TIME: List based on when each build project was created. - // - // * + // * // LAST_MODIFIED_TIME: List based on when information about each build project was // last changed. // - // * NAME: List based on each build project's name. + // * NAME: List based on each build project's name. // - // Use - // sortOrder to specify in what order to list the build project names based on the - // preceding criteria. + // Use sortOrder + // to specify in what order to list the build project names based on the preceding + // criteria. SortBy types.ProjectSortByType // The order in which to list build projects. Valid values include: // - // * - // ASCENDING: List in ascending order. + // * ASCENDING: + // List in ascending order. // - // * DESCENDING: List in descending - // order. + // * DESCENDING: List in descending order. // - // Use sortBy to specify the criterion to be used to list build project - // names. + // Use sortBy to + // specify the criterion to be used to list build project names. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListReportGroups.go b/service/codebuild/api_op_ListReportGroups.go index 11eb82877e0..bf691399995 100644 --- a/service/codebuild/api_op_ListReportGroups.go +++ b/service/codebuild/api_op_ListReportGroups.go @@ -44,14 +44,14 @@ type ListReportGroupsInput struct { // The criterion to be used to list build report groups. Valid values include: // + // * + // CREATED_TIME: List based on when each report group was created. // - // * CREATED_TIME: List based on when each report group was created. - // - // * + // * // LAST_MODIFIED_TIME: List based on when each report group was last changed. // - // - // * NAME: List based on each report group's name. + // * + // NAME: List based on each report group's name. SortBy types.ReportGroupSortByType // Used to specify the order to sort the list of returned report groups. Valid diff --git a/service/codebuild/api_op_ListReports.go b/service/codebuild/api_op_ListReports.go index d762c2674e2..525f8d88960 100644 --- a/service/codebuild/api_op_ListReports.go +++ b/service/codebuild/api_op_ListReports.go @@ -46,12 +46,12 @@ type ListReportsInput struct { // Specifies the sort order for the list of returned reports. Valid values are: // - // - // * ASCENDING: return reports in chronological order based on their creation + // * + // ASCENDING: return reports in chronological order based on their creation // date. // - // * DESCENDING: return reports in the reverse chronological order based - // on their creation date. + // * DESCENDING: return reports in the reverse chronological order based on + // their creation date. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListSharedProjects.go b/service/codebuild/api_op_ListSharedProjects.go index 7213bd01518..c4f903b304e 100644 --- a/service/codebuild/api_op_ListSharedProjects.go +++ b/service/codebuild/api_op_ListSharedProjects.go @@ -45,19 +45,19 @@ type ListSharedProjectsInput struct { // The criterion to be used to list build projects shared with the current AWS // account or user. Valid values include: // - // * ARN: List based on the ARN. + // * ARN: List based on the ARN. // - // * + // * // MODIFIED_TIME: List based on when information about the shared project was last // changed. SortBy types.SharedResourceSortByType // The order in which to list shared build projects. Valid values include: // - // * + // * // ASCENDING: List in ascending order. // - // * DESCENDING: List in descending order. + // * DESCENDING: List in descending order. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_ListSharedReportGroups.go b/service/codebuild/api_op_ListSharedReportGroups.go index 5cc10a16330..e4e69d3445a 100644 --- a/service/codebuild/api_op_ListSharedReportGroups.go +++ b/service/codebuild/api_op_ListSharedReportGroups.go @@ -45,19 +45,19 @@ type ListSharedReportGroupsInput struct { // The criterion to be used to list report groups shared with the current AWS // account or user. Valid values include: // - // * ARN: List based on the ARN. + // * ARN: List based on the ARN. // - // * + // * // MODIFIED_TIME: List based on when information about the shared report group was // last changed. SortBy types.SharedResourceSortByType // The order in which to list shared report groups. Valid values include: // - // * + // * // ASCENDING: List in ascending order. // - // * DESCENDING: List in descending order. + // * DESCENDING: List in descending order. SortOrder types.SortOrderType } diff --git a/service/codebuild/api_op_UpdateProject.go b/service/codebuild/api_op_UpdateProject.go index 3e200aec5d6..fcca182fb02 100644 --- a/service/codebuild/api_op_UpdateProject.go +++ b/service/codebuild/api_op_UpdateProject.go @@ -98,28 +98,28 @@ type UpdateProjectInput struct { // A version of the build input to be built for this project. If not specified, the // latest version is used. If specified, it must be one of: // - // * For AWS - // CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: + // the commit ID, branch, or Git tag to use. // - // * For GitHub: the - // commit ID, pull request ID, branch name, or tag name that corresponds to the - // version of the source code you want to build. If a pull request ID is specified, - // it must use the format pr/pull-request-ID (for example pr/25). If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the default + // * For GitHub: the commit ID, pull + // request ID, branch name, or tag name that corresponds to the version of the + // source code you want to build. If a pull request ID is specified, it must use + // the format pr/pull-request-ID (for example pr/25). If a branch name is + // specified, the branch's HEAD commit ID is used. If not specified, the default // branch's HEAD commit ID is used. // - // * For Bitbucket: the commit ID, branch - // name, or tag name that corresponds to the version of the source code you want to + // * For Bitbucket: the commit ID, branch name, + // or tag name that corresponds to the version of the source code you want to // build. If a branch name is specified, the branch's HEAD commit ID is used. If // not specified, the default branch's HEAD commit ID is used. // - // * For Amazon - // Simple Storage Service (Amazon S3): the version ID of the object that represents - // the build input ZIP file to use. + // * For Amazon Simple + // Storage Service (Amazon S3): the version ID of the object that represents the + // build input ZIP file to use. // - // If sourceVersion is specified at the build - // level, then that version takes precedence over this sourceVersion (at the - // project level). For more information, see Source Version Sample with CodeBuild + // If sourceVersion is specified at the build level, + // then that version takes precedence over this sourceVersion (at the project + // level). For more information, see Source Version Sample with CodeBuild // (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) // in the AWS CodeBuild User Guide. SourceVersion *string diff --git a/service/codebuild/api_op_UpdateReportGroup.go b/service/codebuild/api_op_UpdateReportGroup.go index f8e4d325969..5b5b0f79117 100644 --- a/service/codebuild/api_op_UpdateReportGroup.go +++ b/service/codebuild/api_op_UpdateReportGroup.go @@ -36,11 +36,11 @@ type UpdateReportGroupInput struct { // Used to specify an updated export type. Valid values are: // - // * S3: The report + // * S3: The report // results are exported to an S3 bucket. // - // * NO_EXPORT: The report results are - // not exported. + // * NO_EXPORT: The report results are not + // exported. ExportConfig *types.ReportExportConfig // An updated list of tag key and value pairs associated with this report group. diff --git a/service/codebuild/doc.go b/service/codebuild/doc.go index 57c88b526f7..aedea98f2dc 100644 --- a/service/codebuild/doc.go +++ b/service/codebuild/doc.go @@ -15,13 +15,13 @@ // (https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html). AWS // CodeBuild supports these operations: // -// * BatchDeleteBuilds: Deletes one or -// more builds. +// * BatchDeleteBuilds: Deletes one or more +// builds. // -// * BatchGetBuilds: Gets information about one or more builds. +// * BatchGetBuilds: Gets information about one or more builds. // -// -// * BatchGetProjects: Gets information about one or more build projects. A build +// * +// BatchGetProjects: Gets information about one or more build projects. A build // project defines how AWS CodeBuild runs a build. This includes information such // as where to get the source code to build, the build environment to use, the // build commands to run, and where to store the build output. A build environment @@ -29,105 +29,104 @@ // that AWS CodeBuild uses to run a build. You can add tags to build projects to // help manage your resources and costs. // -// * BatchGetReportGroups: Returns an -// array of report groups. -// -// * BatchGetReports: Returns an array of reports. +// * BatchGetReportGroups: Returns an array +// of report groups. // +// * BatchGetReports: Returns an array of reports. // -// * CreateProject: Creates a build project. +// * +// CreateProject: Creates a build project. // -// * CreateReportGroup: Creates a -// report group. A report group contains a collection of reports. +// * CreateReportGroup: Creates a report +// group. A report group contains a collection of reports. // -// * -// CreateWebhook: For an existing AWS CodeBuild build project that has its source -// code stored in a GitHub or Bitbucket repository, enables AWS CodeBuild to start -// rebuilding the source code every time a code change is pushed to the -// repository. +// * CreateWebhook: For an +// existing AWS CodeBuild build project that has its source code stored in a GitHub +// or Bitbucket repository, enables AWS CodeBuild to start rebuilding the source +// code every time a code change is pushed to the repository. // -// * DeleteProject: Deletes a build project. +// * DeleteProject: +// Deletes a build project. // -// * DeleteReport: -// Deletes a report. +// * DeleteReport: Deletes a report. // -// * DeleteReportGroup: Deletes a report group. +// * +// DeleteReportGroup: Deletes a report group. // -// * -// DeleteResourcePolicy: Deletes a resource policy that is identified by its -// resource ARN. +// * DeleteResourcePolicy: Deletes a +// resource policy that is identified by its resource ARN. // -// * DeleteSourceCredentials: Deletes a set of GitHub, GitHub -// Enterprise, or Bitbucket source credentials. +// * +// DeleteSourceCredentials: Deletes a set of GitHub, GitHub Enterprise, or +// Bitbucket source credentials. // -// * DeleteWebhook: For an -// existing AWS CodeBuild build project that has its source code stored in a GitHub -// or Bitbucket repository, stops AWS CodeBuild from rebuilding the source code -// every time a code change is pushed to the repository. +// * DeleteWebhook: For an existing AWS CodeBuild +// build project that has its source code stored in a GitHub or Bitbucket +// repository, stops AWS CodeBuild from rebuilding the source code every time a +// code change is pushed to the repository. // -// * DescribeTestCases: -// Returns a list of details about test cases for a report. +// * DescribeTestCases: Returns a list of +// details about test cases for a report. // -// * -// GetResourcePolicy: Gets a resource policy that is identified by its resource -// ARN. +// * GetResourcePolicy: Gets a resource +// policy that is identified by its resource ARN. // -// * ImportSourceCredentials: Imports the source repository credentials -// for an AWS CodeBuild project that has its source code stored in a GitHub, GitHub -// Enterprise, or Bitbucket repository. +// * ImportSourceCredentials: +// Imports the source repository credentials for an AWS CodeBuild project that has +// its source code stored in a GitHub, GitHub Enterprise, or Bitbucket +// repository. // -// * InvalidateProjectCache: Resets the -// cache for a project. +// * InvalidateProjectCache: Resets the cache for a project. // -// * ListBuilds: Gets a list of build IDs, with each -// build ID representing a single build. +// * +// ListBuilds: Gets a list of build IDs, with each build ID representing a single +// build. // -// * ListBuildsForProject: Gets a list -// of build IDs for the specified build project, with each build ID representing a -// single build. +// * ListBuildsForProject: Gets a list of build IDs for the specified build +// project, with each build ID representing a single build. // -// * ListCuratedEnvironmentImages: Gets information about Docker -// images that are managed by AWS CodeBuild. +// * +// ListCuratedEnvironmentImages: Gets information about Docker images that are +// managed by AWS CodeBuild. // -// * ListProjects: Gets a list of -// build project names, with each build project name representing a single build -// project. +// * ListProjects: Gets a list of build project names, +// with each build project name representing a single build project. // -// * ListReportGroups: Gets a list ARNs for the report groups in the -// current AWS account. +// * +// ListReportGroups: Gets a list ARNs for the report groups in the current AWS +// account. // -// * ListReports: Gets a list ARNs for the reports in the -// current AWS account. +// * ListReports: Gets a list ARNs for the reports in the current AWS +// account. // -// * ListReportsForReportGroup: Returns a list of ARNs -// for the reports that belong to a ReportGroup. +// * ListReportsForReportGroup: Returns a list of ARNs for the reports +// that belong to a ReportGroup. // -// * ListSharedProjects: Gets a -// list of ARNs associated with projects shared with the current AWS account or -// user. +// * ListSharedProjects: Gets a list of ARNs +// associated with projects shared with the current AWS account or user. // -// * ListSharedReportGroups: Gets a list of ARNs associated with report -// groups shared with the current AWS account or user +// * +// ListSharedReportGroups: Gets a list of ARNs associated with report groups shared +// with the current AWS account or user // -// * ListSourceCredentials: -// Returns a list of SourceCredentialsInfo objects. Each SourceCredentialsInfo -// object includes the authentication type, token ARN, and type of source provider -// for one set of credentials. +// * ListSourceCredentials: Returns a list of +// SourceCredentialsInfo objects. Each SourceCredentialsInfo object includes the +// authentication type, token ARN, and type of source provider for one set of +// credentials. // -// * PutResourcePolicy: Stores a resource policy -// for the ARN of a Project or ReportGroup object. +// * PutResourcePolicy: Stores a resource policy for the ARN of a +// Project or ReportGroup object. // -// * StartBuild: Starts -// running a build. +// * StartBuild: Starts running a build. // -// * StopBuild: Attempts to stop running a build. +// * +// StopBuild: Attempts to stop running a build. // -// * -// UpdateProject: Changes the settings of an existing build project. +// * UpdateProject: Changes the +// settings of an existing build project. // -// * -// UpdateReportGroup: Changes a report group. +// * UpdateReportGroup: Changes a report +// group. // -// * UpdateWebhook: Changes the -// settings of an existing webhook. +// * UpdateWebhook: Changes the settings of an existing webhook. package codebuild diff --git a/service/codebuild/types/enums.go b/service/codebuild/types/enums.go index 38ee05c529d..ae114eaea23 100644 --- a/service/codebuild/types/enums.go +++ b/service/codebuild/types/enums.go @@ -6,8 +6,8 @@ type ArtifactNamespace string // Enum values for ArtifactNamespace const ( - ArtifactNamespaceNone ArtifactNamespace = "NONE" - ArtifactNamespaceBuild_id ArtifactNamespace = "BUILD_ID" + ArtifactNamespaceNone ArtifactNamespace = "NONE" + ArtifactNamespaceBuildId ArtifactNamespace = "BUILD_ID" ) // Values returns all known values for ArtifactNamespace. Note that this can be @@ -44,7 +44,7 @@ type ArtifactsType string const ( ArtifactsTypeCodepipeline ArtifactsType = "CODEPIPELINE" ArtifactsTypeS3 ArtifactsType = "S3" - ArtifactsTypeNo_artifacts ArtifactsType = "NO_ARTIFACTS" + ArtifactsTypeNoArtifacts ArtifactsType = "NO_ARTIFACTS" ) // Values returns all known values for ArtifactsType. Note that this can be @@ -62,9 +62,9 @@ type AuthType string // Enum values for AuthType const ( - AuthTypeOauth AuthType = "OAUTH" - AuthTypeBasic_auth AuthType = "BASIC_AUTH" - AuthTypePersonal_access_token AuthType = "PERSONAL_ACCESS_TOKEN" + AuthTypeOauth AuthType = "OAUTH" + AuthTypeBasicAuth AuthType = "BASIC_AUTH" + AuthTypePersonalAccessToken AuthType = "PERSONAL_ACCESS_TOKEN" ) // Values returns all known values for AuthType. Note that this can be expanded in @@ -82,13 +82,13 @@ type BuildBatchPhaseType string // Enum values for BuildBatchPhaseType const ( - BuildBatchPhaseTypeSubmitted BuildBatchPhaseType = "SUBMITTED" - BuildBatchPhaseTypeDownload_batchspec BuildBatchPhaseType = "DOWNLOAD_BATCHSPEC" - BuildBatchPhaseTypeIn_progress BuildBatchPhaseType = "IN_PROGRESS" - BuildBatchPhaseTypeCombine_artifacts BuildBatchPhaseType = "COMBINE_ARTIFACTS" - BuildBatchPhaseTypeSucceeded BuildBatchPhaseType = "SUCCEEDED" - BuildBatchPhaseTypeFailed BuildBatchPhaseType = "FAILED" - BuildBatchPhaseTypeStopped BuildBatchPhaseType = "STOPPED" + BuildBatchPhaseTypeSubmitted BuildBatchPhaseType = "SUBMITTED" + BuildBatchPhaseTypeDownloadBatchspec BuildBatchPhaseType = "DOWNLOAD_BATCHSPEC" + BuildBatchPhaseTypeInProgress BuildBatchPhaseType = "IN_PROGRESS" + BuildBatchPhaseTypeCombineArtifacts BuildBatchPhaseType = "COMBINE_ARTIFACTS" + BuildBatchPhaseTypeSucceeded BuildBatchPhaseType = "SUCCEEDED" + BuildBatchPhaseTypeFailed BuildBatchPhaseType = "FAILED" + BuildBatchPhaseTypeStopped BuildBatchPhaseType = "STOPPED" ) // Values returns all known values for BuildBatchPhaseType. Note that this can be @@ -110,17 +110,17 @@ type BuildPhaseType string // Enum values for BuildPhaseType const ( - BuildPhaseTypeSubmitted BuildPhaseType = "SUBMITTED" - BuildPhaseTypeQueued BuildPhaseType = "QUEUED" - BuildPhaseTypeProvisioning BuildPhaseType = "PROVISIONING" - BuildPhaseTypeDownload_source BuildPhaseType = "DOWNLOAD_SOURCE" - BuildPhaseTypeInstall BuildPhaseType = "INSTALL" - BuildPhaseTypePre_build BuildPhaseType = "PRE_BUILD" - BuildPhaseTypeBuild BuildPhaseType = "BUILD" - BuildPhaseTypePost_build BuildPhaseType = "POST_BUILD" - BuildPhaseTypeUpload_artifacts BuildPhaseType = "UPLOAD_ARTIFACTS" - BuildPhaseTypeFinalizing BuildPhaseType = "FINALIZING" - BuildPhaseTypeCompleted BuildPhaseType = "COMPLETED" + BuildPhaseTypeSubmitted BuildPhaseType = "SUBMITTED" + BuildPhaseTypeQueued BuildPhaseType = "QUEUED" + BuildPhaseTypeProvisioning BuildPhaseType = "PROVISIONING" + BuildPhaseTypeDownloadSource BuildPhaseType = "DOWNLOAD_SOURCE" + BuildPhaseTypeInstall BuildPhaseType = "INSTALL" + BuildPhaseTypePreBuild BuildPhaseType = "PRE_BUILD" + BuildPhaseTypeBuild BuildPhaseType = "BUILD" + BuildPhaseTypePostBuild BuildPhaseType = "POST_BUILD" + BuildPhaseTypeUploadArtifacts BuildPhaseType = "UPLOAD_ARTIFACTS" + BuildPhaseTypeFinalizing BuildPhaseType = "FINALIZING" + BuildPhaseTypeCompleted BuildPhaseType = "COMPLETED" ) // Values returns all known values for BuildPhaseType. Note that this can be @@ -146,9 +146,9 @@ type CacheMode string // Enum values for CacheMode const ( - CacheModeLocal_docker_layer_cache CacheMode = "LOCAL_DOCKER_LAYER_CACHE" - CacheModeLocal_source_cache CacheMode = "LOCAL_SOURCE_CACHE" - CacheModeLocal_custom_cache CacheMode = "LOCAL_CUSTOM_CACHE" + CacheModeLocalDockerLayerCache CacheMode = "LOCAL_DOCKER_LAYER_CACHE" + CacheModeLocalSourceCache CacheMode = "LOCAL_SOURCE_CACHE" + CacheModeLocalCustomCache CacheMode = "LOCAL_CUSTOM_CACHE" ) // Values returns all known values for CacheMode. Note that this can be expanded in @@ -166,9 +166,9 @@ type CacheType string // Enum values for CacheType const ( - CacheTypeNo_cache CacheType = "NO_CACHE" - CacheTypeS3 CacheType = "S3" - CacheTypeLocal CacheType = "LOCAL" + CacheTypeNoCache CacheType = "NO_CACHE" + CacheTypeS3 CacheType = "S3" + CacheTypeLocal CacheType = "LOCAL" ) // Values returns all known values for CacheType. Note that this can be expanded in @@ -186,10 +186,10 @@ type ComputeType string // Enum values for ComputeType const ( - ComputeTypeBuild_general1_small ComputeType = "BUILD_GENERAL1_SMALL" - ComputeTypeBuild_general1_medium ComputeType = "BUILD_GENERAL1_MEDIUM" - ComputeTypeBuild_general1_large ComputeType = "BUILD_GENERAL1_LARGE" - ComputeTypeBuild_general1_2xlarge ComputeType = "BUILD_GENERAL1_2XLARGE" + ComputeTypeBuildGeneral1Small ComputeType = "BUILD_GENERAL1_SMALL" + ComputeTypeBuildGeneral1Medium ComputeType = "BUILD_GENERAL1_MEDIUM" + ComputeTypeBuildGeneral1Large ComputeType = "BUILD_GENERAL1_LARGE" + ComputeTypeBuildGeneral12xlarge ComputeType = "BUILD_GENERAL1_2XLARGE" ) // Values returns all known values for ComputeType. Note that this can be expanded @@ -208,7 +208,7 @@ type CredentialProviderType string // Enum values for CredentialProviderType const ( - CredentialProviderTypeSecrets_manager CredentialProviderType = "SECRETS_MANAGER" + CredentialProviderTypeSecretsManager CredentialProviderType = "SECRETS_MANAGER" ) // Values returns all known values for CredentialProviderType. Note that this can @@ -224,11 +224,11 @@ type EnvironmentType string // Enum values for EnvironmentType const ( - EnvironmentTypeWindows_container EnvironmentType = "WINDOWS_CONTAINER" - EnvironmentTypeLinux_container EnvironmentType = "LINUX_CONTAINER" - EnvironmentTypeLinux_gpu_container EnvironmentType = "LINUX_GPU_CONTAINER" - EnvironmentTypeArm_container EnvironmentType = "ARM_CONTAINER" - EnvironmentTypeWindows_server_2019_container EnvironmentType = "WINDOWS_SERVER_2019_CONTAINER" + EnvironmentTypeWindowsContainer EnvironmentType = "WINDOWS_CONTAINER" + EnvironmentTypeLinuxContainer EnvironmentType = "LINUX_CONTAINER" + EnvironmentTypeLinuxGpuContainer EnvironmentType = "LINUX_GPU_CONTAINER" + EnvironmentTypeArmContainer EnvironmentType = "ARM_CONTAINER" + EnvironmentTypeWindowsServer2019Container EnvironmentType = "WINDOWS_SERVER_2019_CONTAINER" ) // Values returns all known values for EnvironmentType. Note that this can be @@ -248,9 +248,9 @@ type EnvironmentVariableType string // Enum values for EnvironmentVariableType const ( - EnvironmentVariableTypePlaintext EnvironmentVariableType = "PLAINTEXT" - EnvironmentVariableTypeParameter_store EnvironmentVariableType = "PARAMETER_STORE" - EnvironmentVariableTypeSecrets_manager EnvironmentVariableType = "SECRETS_MANAGER" + EnvironmentVariableTypePlaintext EnvironmentVariableType = "PLAINTEXT" + EnvironmentVariableTypeParameterStore EnvironmentVariableType = "PARAMETER_STORE" + EnvironmentVariableTypeSecretsManager EnvironmentVariableType = "SECRETS_MANAGER" ) // Values returns all known values for EnvironmentVariableType. Note that this can @@ -284,8 +284,8 @@ type ImagePullCredentialsType string // Enum values for ImagePullCredentialsType const ( - ImagePullCredentialsTypeCodebuild ImagePullCredentialsType = "CODEBUILD" - ImagePullCredentialsTypeService_role ImagePullCredentialsType = "SERVICE_ROLE" + ImagePullCredentialsTypeCodebuild ImagePullCredentialsType = "CODEBUILD" + ImagePullCredentialsTypeServiceRole ImagePullCredentialsType = "SERVICE_ROLE" ) // Values returns all known values for ImagePullCredentialsType. Note that this can @@ -304,7 +304,7 @@ type LanguageType string const ( LanguageTypeJava LanguageType = "JAVA" LanguageTypePython LanguageType = "PYTHON" - LanguageTypeNode_js LanguageType = "NODE_JS" + LanguageTypeNodeJs LanguageType = "NODE_JS" LanguageTypeRuby LanguageType = "RUBY" LanguageTypeGolang LanguageType = "GOLANG" LanguageTypeDocker LanguageType = "DOCKER" @@ -354,10 +354,10 @@ type PlatformType string // Enum values for PlatformType const ( - PlatformTypeDebian PlatformType = "DEBIAN" - PlatformTypeAmazon_linux PlatformType = "AMAZON_LINUX" - PlatformTypeUbuntu PlatformType = "UBUNTU" - PlatformTypeWindows_server PlatformType = "WINDOWS_SERVER" + PlatformTypeDebian PlatformType = "DEBIAN" + PlatformTypeAmazonLinux PlatformType = "AMAZON_LINUX" + PlatformTypeUbuntu PlatformType = "UBUNTU" + PlatformTypeWindowsServer PlatformType = "WINDOWS_SERVER" ) // Values returns all known values for PlatformType. Note that this can be expanded @@ -376,9 +376,9 @@ type ProjectSortByType string // Enum values for ProjectSortByType const ( - ProjectSortByTypeName ProjectSortByType = "NAME" - ProjectSortByTypeCreated_time ProjectSortByType = "CREATED_TIME" - ProjectSortByTypeLast_modified_time ProjectSortByType = "LAST_MODIFIED_TIME" + ProjectSortByTypeName ProjectSortByType = "NAME" + ProjectSortByTypeCreatedTime ProjectSortByType = "CREATED_TIME" + ProjectSortByTypeLastModifiedTime ProjectSortByType = "LAST_MODIFIED_TIME" ) // Values returns all known values for ProjectSortByType. Note that this can be @@ -396,8 +396,8 @@ type ReportCodeCoverageSortByType string // Enum values for ReportCodeCoverageSortByType const ( - ReportCodeCoverageSortByTypeLine_coverage_percentage ReportCodeCoverageSortByType = "LINE_COVERAGE_PERCENTAGE" - ReportCodeCoverageSortByTypeFile_path ReportCodeCoverageSortByType = "FILE_PATH" + ReportCodeCoverageSortByTypeLineCoveragePercentage ReportCodeCoverageSortByType = "LINE_COVERAGE_PERCENTAGE" + ReportCodeCoverageSortByTypeFilePath ReportCodeCoverageSortByType = "FILE_PATH" ) // Values returns all known values for ReportCodeCoverageSortByType. Note that this @@ -414,8 +414,8 @@ type ReportExportConfigType string // Enum values for ReportExportConfigType const ( - ReportExportConfigTypeS3 ReportExportConfigType = "S3" - ReportExportConfigTypeNo_export ReportExportConfigType = "NO_EXPORT" + ReportExportConfigTypeS3 ReportExportConfigType = "S3" + ReportExportConfigTypeNoExport ReportExportConfigType = "NO_EXPORT" ) // Values returns all known values for ReportExportConfigType. Note that this can @@ -432,9 +432,9 @@ type ReportGroupSortByType string // Enum values for ReportGroupSortByType const ( - ReportGroupSortByTypeName ReportGroupSortByType = "NAME" - ReportGroupSortByTypeCreated_time ReportGroupSortByType = "CREATED_TIME" - ReportGroupSortByTypeLast_modified_time ReportGroupSortByType = "LAST_MODIFIED_TIME" + ReportGroupSortByTypeName ReportGroupSortByType = "NAME" + ReportGroupSortByTypeCreatedTime ReportGroupSortByType = "CREATED_TIME" + ReportGroupSortByTypeLastModifiedTime ReportGroupSortByType = "LAST_MODIFIED_TIME" ) // Values returns all known values for ReportGroupSortByType. Note that this can be @@ -494,8 +494,8 @@ type ReportType string // Enum values for ReportType const ( - ReportTypeTest ReportType = "TEST" - ReportTypeCode_coverage ReportType = "CODE_COVERAGE" + ReportTypeTest ReportType = "TEST" + ReportTypeCodeCoverage ReportType = "CODE_COVERAGE" ) // Values returns all known values for ReportType. Note that this can be expanded @@ -512,8 +512,8 @@ type RetryBuildBatchType string // Enum values for RetryBuildBatchType const ( - RetryBuildBatchTypeRetry_all_builds RetryBuildBatchType = "RETRY_ALL_BUILDS" - RetryBuildBatchTypeRetry_failed_builds RetryBuildBatchType = "RETRY_FAILED_BUILDS" + RetryBuildBatchTypeRetryAllBuilds RetryBuildBatchType = "RETRY_ALL_BUILDS" + RetryBuildBatchTypeRetryFailedBuilds RetryBuildBatchType = "RETRY_FAILED_BUILDS" ) // Values returns all known values for RetryBuildBatchType. Note that this can be @@ -530,9 +530,9 @@ type ServerType string // Enum values for ServerType const ( - ServerTypeGithub ServerType = "GITHUB" - ServerTypeBitbucket ServerType = "BITBUCKET" - ServerTypeGithub_enterprise ServerType = "GITHUB_ENTERPRISE" + ServerTypeGithub ServerType = "GITHUB" + ServerTypeBitbucket ServerType = "BITBUCKET" + ServerTypeGithubEnterprise ServerType = "GITHUB_ENTERPRISE" ) // Values returns all known values for ServerType. Note that this can be expanded @@ -550,8 +550,8 @@ type SharedResourceSortByType string // Enum values for SharedResourceSortByType const ( - SharedResourceSortByTypeArn SharedResourceSortByType = "ARN" - SharedResourceSortByTypeModified_time SharedResourceSortByType = "MODIFIED_TIME" + SharedResourceSortByTypeArn SharedResourceSortByType = "ARN" + SharedResourceSortByTypeModifiedTime SharedResourceSortByType = "MODIFIED_TIME" ) // Values returns all known values for SharedResourceSortByType. Note that this can @@ -602,13 +602,13 @@ type SourceType string // Enum values for SourceType const ( - SourceTypeCodecommit SourceType = "CODECOMMIT" - SourceTypeCodepipeline SourceType = "CODEPIPELINE" - SourceTypeGithub SourceType = "GITHUB" - SourceTypeS3 SourceType = "S3" - SourceTypeBitbucket SourceType = "BITBUCKET" - SourceTypeGithub_enterprise SourceType = "GITHUB_ENTERPRISE" - SourceTypeNo_source SourceType = "NO_SOURCE" + SourceTypeCodecommit SourceType = "CODECOMMIT" + SourceTypeCodepipeline SourceType = "CODEPIPELINE" + SourceTypeGithub SourceType = "GITHUB" + SourceTypeS3 SourceType = "S3" + SourceTypeBitbucket SourceType = "BITBUCKET" + SourceTypeGithubEnterprise SourceType = "GITHUB_ENTERPRISE" + SourceTypeNoSource SourceType = "NO_SOURCE" ) // Values returns all known values for SourceType. Note that this can be expanded @@ -630,12 +630,12 @@ type StatusType string // Enum values for StatusType const ( - StatusTypeSucceeded StatusType = "SUCCEEDED" - StatusTypeFailed StatusType = "FAILED" - StatusTypeFault StatusType = "FAULT" - StatusTypeTimed_out StatusType = "TIMED_OUT" - StatusTypeIn_progress StatusType = "IN_PROGRESS" - StatusTypeStopped StatusType = "STOPPED" + StatusTypeSucceeded StatusType = "SUCCEEDED" + StatusTypeFailed StatusType = "FAILED" + StatusTypeFault StatusType = "FAULT" + StatusTypeTimedOut StatusType = "TIMED_OUT" + StatusTypeInProgress StatusType = "IN_PROGRESS" + StatusTypeStopped StatusType = "STOPPED" ) // Values returns all known values for StatusType. Note that this can be expanded @@ -656,8 +656,8 @@ type WebhookBuildType string // Enum values for WebhookBuildType const ( - WebhookBuildTypeBuild WebhookBuildType = "BUILD" - WebhookBuildTypeBuild_batch WebhookBuildType = "BUILD_BATCH" + WebhookBuildTypeBuild WebhookBuildType = "BUILD" + WebhookBuildTypeBuildBatch WebhookBuildType = "BUILD_BATCH" ) // Values returns all known values for WebhookBuildType. Note that this can be @@ -674,12 +674,12 @@ type WebhookFilterType string // Enum values for WebhookFilterType const ( - WebhookFilterTypeEvent WebhookFilterType = "EVENT" - WebhookFilterTypeBase_ref WebhookFilterType = "BASE_REF" - WebhookFilterTypeHead_ref WebhookFilterType = "HEAD_REF" - WebhookFilterTypeActor_account_id WebhookFilterType = "ACTOR_ACCOUNT_ID" - WebhookFilterTypeFile_path WebhookFilterType = "FILE_PATH" - WebhookFilterTypeCommit_message WebhookFilterType = "COMMIT_MESSAGE" + WebhookFilterTypeEvent WebhookFilterType = "EVENT" + WebhookFilterTypeBaseRef WebhookFilterType = "BASE_REF" + WebhookFilterTypeHeadRef WebhookFilterType = "HEAD_REF" + WebhookFilterTypeActorAccountId WebhookFilterType = "ACTOR_ACCOUNT_ID" + WebhookFilterTypeFilePath WebhookFilterType = "FILE_PATH" + WebhookFilterTypeCommitMessage WebhookFilterType = "COMMIT_MESSAGE" ) // Values returns all known values for WebhookFilterType. Note that this can be diff --git a/service/codebuild/types/types.go b/service/codebuild/types/types.go index b64fb33c7f2..39190c12b6d 100644 --- a/service/codebuild/types/types.go +++ b/service/codebuild/types/types.go @@ -41,20 +41,20 @@ type Build struct { // The current status of the build. Valid values include: // - // * FAILED: The build + // * FAILED: The build // failed. // - // * FAULT: The build faulted. + // * FAULT: The build faulted. // - // * IN_PROGRESS: The build is still - // in progress. + // * IN_PROGRESS: The build is still in + // progress. // - // * STOPPED: The build stopped. + // * STOPPED: The build stopped. // - // * SUCCEEDED: The build - // succeeded. + // * SUCCEEDED: The build succeeded. // - // * TIMED_OUT: The build timed out. + // * + // TIMED_OUT: The build timed out. BuildStatus StatusType // Information about the cache for the build. @@ -93,15 +93,15 @@ type Build struct { // The entity that started the build. Valid values include: // - // * If AWS - // CodePipeline started the build, the pipeline's name (for example, + // * If AWS CodePipeline + // started the build, the pipeline's name (for example, // codepipeline/my-demo-pipeline). // - // * If an AWS Identity and Access Management + // * If an AWS Identity and Access Management // (IAM) user started the build, the user's name (for example, MyUserName). // - // * - // If the Jenkins plugin for AWS CodeBuild started the build, the string + // * If + // the Jenkins plugin for AWS CodeBuild started the build, the string // CodeBuild-Jenkins-Plugin. Initiator *string @@ -126,14 +126,14 @@ type Build struct { // An identifier for the version of this build's source code. // - // * For AWS + // * For AWS // CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID. // - // * For - // AWS CodePipeline, the source revision provided by AWS CodePipeline. + // * For AWS + // CodePipeline, the source revision provided by AWS CodePipeline. // - // * For - // Amazon Simple Storage Service (Amazon S3), this does not apply. + // * For Amazon + // Simple Storage Service (Amazon S3), this does not apply. ResolvedSourceVersion *string // An array of ProjectArtifacts objects. @@ -142,23 +142,23 @@ type Build struct { // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one // of: // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // - // * - // For GitHub: the commit ID, pull request ID, branch name, or tag name that + // * For + // GitHub: the commit ID, pull request ID, branch name, or tag name that // corresponds to the version of the source code you want to build. If a pull // request ID is specified, it must use the format pr/pull-request-ID (for example, // pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If // not specified, the default branch's HEAD commit ID is used. // - // * For - // Bitbucket: the commit ID, branch name, or tag name that corresponds to the - // version of the source code you want to build. If a branch name is specified, the - // branch's HEAD commit ID is used. If not specified, the default branch's HEAD - // commit ID is used. + // * For Bitbucket: + // the commit ID, branch name, or tag name that corresponds to the version of the + // source code you want to build. If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit ID is + // used. // - // * For Amazon Simple Storage Service (Amazon S3): the - // version ID of the object that represents the build input ZIP file to use. + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. SecondarySourceVersions []*ProjectSourceVersion // An array of ProjectSource objects. @@ -282,15 +282,15 @@ type BuildBatch struct { // The entity that started the batch build. Valid values include: // - // * If AWS + // * If AWS // CodePipeline started the build, the pipeline's name (for example, // codepipeline/my-demo-pipeline). // - // * If an AWS Identity and Access Management + // * If an AWS Identity and Access Management // (IAM) user started the build, the user's name. // - // * If the Jenkins plugin for - // AWS CodeBuild started the build, the string CodeBuild-Jenkins-Plugin. + // * If the Jenkins plugin for AWS + // CodeBuild started the build, the string CodeBuild-Jenkins-Plugin. Initiator *string // Information about logs for a build project. These can be logs in Amazon @@ -309,14 +309,14 @@ type BuildBatch struct { // The identifier of the resolved version of this batch build's source code. // - // * - // For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID. + // * For + // AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit ID. // + // * For + // AWS CodePipeline, the source revision provided by AWS CodePipeline. // - // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. - // - // * - // For Amazon Simple Storage Service (Amazon S3), this does not apply. + // * For + // Amazon Simple Storage Service (Amazon S3), this does not apply. ResolvedSourceVersion *string // An array of BuildArtifacts objects the define the build artifacts for this batch @@ -326,23 +326,23 @@ type BuildBatch struct { // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must be one // of: // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // - // * - // For GitHub: the commit ID, pull request ID, branch name, or tag name that + // * For + // GitHub: the commit ID, pull request ID, branch name, or tag name that // corresponds to the version of the source code you want to build. If a pull // request ID is specified, it must use the format pr/pull-request-ID (for example, // pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If // not specified, the default branch's HEAD commit ID is used. // - // * For - // Bitbucket: the commit ID, branch name, or tag name that corresponds to the - // version of the source code you want to build. If a branch name is specified, the - // branch's HEAD commit ID is used. If not specified, the default branch's HEAD - // commit ID is used. + // * For Bitbucket: + // the commit ID, branch name, or tag name that corresponds to the version of the + // source code you want to build. If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit ID is + // used. // - // * For Amazon Simple Storage Service (Amazon S3): the - // version ID of the object that represents the build input ZIP file to use. + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. SecondarySourceVersions []*ProjectSourceVersion // An array of ProjectSource objects that define the sources for the batch build. @@ -461,37 +461,37 @@ type BuildPhase struct { // The name of the build phase. Valid values include: // - // * BUILD: Core build + // * BUILD: Core build // activities typically occur in this build phase. // - // * COMPLETED: The build has - // been completed. + // * COMPLETED: The build has been + // completed. // - // * DOWNLOAD_SOURCE: Source code is being downloaded in this - // build phase. - // - // * FINALIZING: The build process is completing in this build + // * DOWNLOAD_SOURCE: Source code is being downloaded in this build // phase. // - // * INSTALL: Installation activities typically occur in this build - // phase. + // * FINALIZING: The build process is completing in this build phase. // - // * POST_BUILD: Post-build activities typically occur in this build - // phase. + // * + // INSTALL: Installation activities typically occur in this build phase. // - // * PRE_BUILD: Pre-build activities typically occur in this build - // phase. + // * + // POST_BUILD: Post-build activities typically occur in this build phase. // - // * PROVISIONING: The build environment is being set up. + // * + // PRE_BUILD: Pre-build activities typically occur in this build phase. // - // * - // QUEUED: The build has been submitted and is queued behind other submitted - // builds. + // * + // PROVISIONING: The build environment is being set up. + // + // * QUEUED: The build has + // been submitted and is queued behind other submitted builds. // - // * SUBMITTED: The build has been submitted. + // * SUBMITTED: The + // build has been submitted. // - // * UPLOAD_ARTIFACTS: - // Build output artifacts are being uploaded to the output location. + // * UPLOAD_ARTIFACTS: Build output artifacts are being + // uploaded to the output location. PhaseType BuildPhaseType // When the build phase started, expressed in Unix time format. @@ -557,11 +557,11 @@ type CloudWatchLogsConfig struct { // The current status of the logs in Amazon CloudWatch Logs for a build project. // Valid values are: // - // * ENABLED: Amazon CloudWatch Logs are enabled for this - // build project. + // * ENABLED: Amazon CloudWatch Logs are enabled for this build + // project. // - // * DISABLED: Amazon CloudWatch Logs are not enabled for this - // build project. + // * DISABLED: Amazon CloudWatch Logs are not enabled for this build + // project. // // This member is required. Status LogsConfigStatusType @@ -709,19 +709,19 @@ type EnvironmentVariable struct { // The type of environment variable. Valid values include: // - // * PARAMETER_STORE: - // An environment variable stored in Amazon EC2 Systems Manager Parameter Store. To + // * PARAMETER_STORE: An + // environment variable stored in Amazon EC2 Systems Manager Parameter Store. To // learn how to specify a parameter store environment variable, see // env/parameter-store // (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.parameter-store) // in the AWS CodeBuild User Guide. // - // * PLAINTEXT: An environment variable in - // plain text format. This is the default value. + // * PLAINTEXT: An environment variable in plain + // text format. This is the default value. // - // * SECRETS_MANAGER: An - // environment variable stored in AWS Secrets Manager. To learn how to specify a - // secrets manager environment variable, see env/secrets-manager + // * SECRETS_MANAGER: An environment + // variable stored in AWS Secrets Manager. To learn how to specify a secrets + // manager environment variable, see env/secrets-manager // (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.secrets-manager) // in the AWS CodeBuild User Guide. Type EnvironmentVariableType @@ -896,28 +896,28 @@ type Project struct { // A version of the build input to be built for this project. If not specified, the // latest version is used. If specified, it must be one of: // - // * For AWS - // CodeCommit: the commit ID, branch, or Git tag to use. + // * For AWS CodeCommit: + // the commit ID, branch, or Git tag to use. // - // * For GitHub: the - // commit ID, pull request ID, branch name, or tag name that corresponds to the - // version of the source code you want to build. If a pull request ID is specified, - // it must use the format pr/pull-request-ID (for example pr/25). If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the default + // * For GitHub: the commit ID, pull + // request ID, branch name, or tag name that corresponds to the version of the + // source code you want to build. If a pull request ID is specified, it must use + // the format pr/pull-request-ID (for example pr/25). If a branch name is + // specified, the branch's HEAD commit ID is used. If not specified, the default // branch's HEAD commit ID is used. // - // * For Bitbucket: the commit ID, branch - // name, or tag name that corresponds to the version of the source code you want to + // * For Bitbucket: the commit ID, branch name, + // or tag name that corresponds to the version of the source code you want to // build. If a branch name is specified, the branch's HEAD commit ID is used. If // not specified, the default branch's HEAD commit ID is used. // - // * For Amazon - // Simple Storage Service (Amazon S3): the version ID of the object that represents - // the build input ZIP file to use. + // * For Amazon Simple + // Storage Service (Amazon S3): the version ID of the object that represents the + // build input ZIP file to use. // - // If sourceVersion is specified at the build - // level, then that version takes precedence over this sourceVersion (at the - // project level). For more information, see Source Version Sample with CodeBuild + // If sourceVersion is specified at the build level, + // then that version takes precedence over this sourceVersion (at the project + // level). For more information, see Source Version Sample with CodeBuild // (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) // in the AWS CodeBuild User Guide. SourceVersion *string @@ -945,15 +945,15 @@ type ProjectArtifacts struct { // The type of build output artifact. Valid values include: // - // * CODEPIPELINE: - // The build project has build output generated through AWS CodePipeline. The + // * CODEPIPELINE: The + // build project has build output generated through AWS CodePipeline. The // CODEPIPELINE type is not supported for secondaryArtifacts. // - // * NO_ARTIFACTS: - // The build project does not produce any build output. + // * NO_ARTIFACTS: The + // build project does not produce any build output. // - // * S3: The build - // project stores build output in Amazon Simple Storage Service (Amazon S3). + // * S3: The build project stores + // build output in Amazon Simple Storage Service (Amazon S3). // // This member is required. Type ArtifactsType @@ -968,66 +968,66 @@ type ProjectArtifacts struct { // Information about the build output artifact location: // - // * If type is set to + // * If type is set to // CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because // AWS CodePipeline manages its build output locations instead of AWS CodeBuild. // - // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, because no + // * + // If type is set to NO_ARTIFACTS, this value is ignored if specified, because no // build output is produced. // - // * If type is set to S3, this is the name of the + // * If type is set to S3, this is the name of the // output bucket. Location *string // Along with path and namespaceType, the pattern that AWS CodeBuild uses to name // and store the output artifact: // - // * If type is set to CODEPIPELINE, AWS + // * If type is set to CODEPIPELINE, AWS // CodePipeline ignores this value if specified. This is because AWS CodePipeline // manages its build output names instead of AWS CodeBuild. // - // * If type is set - // to NO_ARTIFACTS, this value is ignored if specified, because no build output is + // * If type is set to + // NO_ARTIFACTS, this value is ignored if specified, because no build output is // produced. // - // * If type is set to S3, this is the name of the output artifact + // * If type is set to S3, this is the name of the output artifact // object. If you set the name to be a forward slash ("/"), the artifact is stored // in the root of the output bucket. // // For example: // - // * If path is set to + // * If path is set to // MyArtifacts, namespaceType is set to BUILD_ID, and name is set to // MyArtifact.zip, then the output artifact is stored in // MyArtifacts//MyArtifact.zip. // - // * If path is empty, namespaceType is set to - // NONE, and name is set to "/", the output artifact is stored in the root of the - // output bucket. + // * If path is empty, namespaceType is set to NONE, + // and name is set to "/", the output artifact is stored in the root of the output + // bucket. // - // * If path is set to MyArtifacts, namespaceType is set to - // BUILD_ID, and name is set to "/", the output artifact is stored in MyArtifacts/. + // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and + // name is set to "/", the output artifact is stored in MyArtifacts/. Name *string // Along with path and name, the pattern that AWS CodeBuild uses to determine the // name and location to store the output artifact: // - // * If type is set to + // * If type is set to // CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because // AWS CodePipeline manages its build output names instead of AWS CodeBuild. // - // * - // If type is set to NO_ARTIFACTS, this value is ignored if specified, because no + // * If + // type is set to NO_ARTIFACTS, this value is ignored if specified, because no // build output is produced. // - // * If type is set to S3, valid values include: + // * If type is set to S3, valid values include: // + // * + // BUILD_ID: Include the build ID in the location of the build output artifact. // - // * BUILD_ID: Include the build ID in the location of the build output artifact. - // - // - // * NONE: Do not include the build ID. This is the default if namespaceType is not + // * + // NONE: Do not include the build ID. This is the default if namespaceType is not // specified. // // For example, if path is set to MyArtifacts, namespaceType is set to @@ -1043,38 +1043,38 @@ type ProjectArtifacts struct { // The type of build output artifact to create: // - // * If type is set to - // CODEPIPELINE, AWS CodePipeline ignores this value if specified. This is because - // AWS CodePipeline manages its build output artifacts instead of AWS CodeBuild. - // + // * If type is set to CODEPIPELINE, + // AWS CodePipeline ignores this value if specified. This is because AWS + // CodePipeline manages its build output artifacts instead of AWS CodeBuild. // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, because no + // * If + // type is set to NO_ARTIFACTS, this value is ignored if specified, because no // build output is produced. // - // * If type is set to S3, valid values include: + // * If type is set to S3, valid values include: // - // - // * NONE: AWS CodeBuild creates in the output bucket a folder that contains the + // * + // NONE: AWS CodeBuild creates in the output bucket a folder that contains the // build output. This is the default if packaging is not specified. // - // * ZIP: - // AWS CodeBuild creates in the output bucket a ZIP file that contains the build + // * ZIP: AWS + // CodeBuild creates in the output bucket a ZIP file that contains the build // output. Packaging ArtifactPackaging // Along with namespaceType and name, the pattern that AWS CodeBuild uses to name // and store the output artifact: // - // * If type is set to CODEPIPELINE, AWS + // * If type is set to CODEPIPELINE, AWS // CodePipeline ignores this value if specified. This is because AWS CodePipeline // manages its build output names instead of AWS CodeBuild. // - // * If type is set - // to NO_ARTIFACTS, this value is ignored if specified, because no build output is + // * If type is set to + // NO_ARTIFACTS, this value is ignored if specified, because no build output is // produced. // - // * If type is set to S3, this is the path to the output artifact. - // If path is not specified, path is not used. + // * If type is set to S3, this is the path to the output artifact. If + // path is not specified, path is not used. // // For example, if path is set to // MyArtifacts, namespaceType is set to NONE, and name is set to MyArtifact.zip, @@ -1119,66 +1119,65 @@ type ProjectCache struct { // The type of cache used by the build project. Valid values include: // - // * - // NO_CACHE: The build project does not use any cache. + // * NO_CACHE: + // The build project does not use any cache. // - // * S3: The build project - // reads and writes from and to S3. + // * S3: The build project reads and + // writes from and to S3. // - // * LOCAL: The build project stores a cache - // locally on a build host that is only available to that build host. + // * LOCAL: The build project stores a cache locally on a + // build host that is only available to that build host. // // This member is required. Type CacheType // Information about the cache location: // - // * NO_CACHE or LOCAL: This value is + // * NO_CACHE or LOCAL: This value is // ignored. // - // * S3: This is the S3 bucket name/prefix. + // * S3: This is the S3 bucket name/prefix. Location *string // If you use a LOCAL cache, the local cache mode. You can use one or more local // cache modes at the same time. // - // * LOCAL_SOURCE_CACHE mode caches Git metadata - // for primary and secondary sources. After the cache is created, subsequent builds + // * LOCAL_SOURCE_CACHE mode caches Git metadata for + // primary and secondary sources. After the cache is created, subsequent builds // pull only the change between commits. This mode is a good choice for projects // with a clean working directory and a source that is a large Git repository. If // you choose this option and your project does not use a Git repository (GitHub, // GitHub Enterprise, or Bitbucket), the option is ignored. // - // * + // * // LOCAL_DOCKER_LAYER_CACHE mode caches existing Docker layers. This mode is a good // choice for projects that build or pull large Docker images. It can prevent the // performance issues caused by pulling large Docker images down from the // network. // - // * You can use a Docker layer cache in the Linux environment - // only. + // * You can use a Docker layer cache in the Linux environment only. // - // * The privileged flag must be set so that your project has the - // required Docker permissions. + // * + // The privileged flag must be set so that your project has the required Docker + // permissions. // - // * You should consider the security - // implications before you use a Docker layer cache. + // * You should consider the security implications before you use a + // Docker layer cache. // - // * LOCAL_CUSTOM_CACHE mode - // caches directories you specify in the buildspec file. This mode is a good choice - // if your build scenario is not suited to one of the other three local cache - // modes. If you use a custom cache: + // * LOCAL_CUSTOM_CACHE mode caches directories you specify in + // the buildspec file. This mode is a good choice if your build scenario is not + // suited to one of the other three local cache modes. If you use a custom + // cache: // - // * Only directories can be specified - // for caching. You cannot specify individual files. + // * Only directories can be specified for caching. You cannot specify + // individual files. // - // * Symlinks are used - // to reference cached directories. + // * Symlinks are used to reference cached directories. // - // * Cached directories are linked to - // your build before it downloads its project sources. Cached items are overridden - // if a source item has the same name. Directories are specified using cache paths - // in the buildspec file. + // * + // Cached directories are linked to your build before it downloads its project + // sources. Cached items are overridden if a source item has the same name. + // Directories are specified using cache paths in the buildspec file. Modes []CacheMode } @@ -1188,31 +1187,31 @@ type ProjectEnvironment struct { // Information about the compute resources the build project uses. Available values // include: // - // * BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for + // * BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for // builds. // - // * BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for + // * BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for // builds. // - // * BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for - // builds, depending on your environment type. + // * BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, + // depending on your environment type. // - // * BUILD_GENERAL1_2XLARGE: Use - // up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This - // compute type supports Docker images up to 100 GB uncompressed. + // * BUILD_GENERAL1_2XLARGE: Use up to 145 GB + // memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type + // supports Docker images up to 100 GB uncompressed. // // If you use // BUILD_GENERAL1_LARGE: // - // * For environment type LINUX_CONTAINER, you can use - // up to 15 GB memory and 8 vCPUs for builds. + // * For environment type LINUX_CONTAINER, you can use up to + // 15 GB memory and 8 vCPUs for builds. // - // * For environment type + // * For environment type // LINUX_GPU_CONTAINER, you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA // Tesla V100 GPUs for builds. // - // * For environment type ARM_CONTAINER, you can - // use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds. + // * For environment type ARM_CONTAINER, you can use + // up to 16 GB memory and 8 vCPUs on ARM-based processors for builds. // // For more // information, see Build Environment Compute Types @@ -1225,13 +1224,13 @@ type ProjectEnvironment struct { // The image tag or image digest that identifies the Docker image to use for this // build project. Use the following formats: // - // * For an image tag: /:. For - // example, in the Docker repository that CodeBuild uses to manage its Docker - // images, this would be aws/codebuild/standard:4.0. To specify the latest version - // of this image, this would be aws/codebuild/standard:latest. + // * For an image tag: /:. For example, + // in the Docker repository that CodeBuild uses to manage its Docker images, this + // would be aws/codebuild/standard:4.0. To specify the latest version of this + // image, this would be aws/codebuild/standard:latest. // - // * For an image - // digest: /@. For example, to specify an image with the digest + // * For an image digest: /@. + // For example, to specify an image with the digest // "sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf," use // /@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf. // @@ -1240,20 +1239,20 @@ type ProjectEnvironment struct { // The type of build environment to use for related builds. // - // * The environment - // type ARM_CONTAINER is available only in regions US East (N. Virginia), US East + // * The environment type + // ARM_CONTAINER is available only in regions US East (N. Virginia), US East // (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific // (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt). // - // * The environment type + // * The environment type // LINUX_CONTAINER with compute type build.general1.2xlarge is available only in // regions US East (N. Virginia), US East (Ohio), US West (Oregon), Canada // (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia // Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China // (Beijing), and China (Ningxia). // - // * The environment type LINUX_GPU_CONTAINER - // is available only in regions US East (N. Virginia), US East (Ohio), US West + // * The environment type LINUX_GPU_CONTAINER is + // available only in regions US East (N. Virginia), US East (Ohio), US West // (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia // Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific // (Sydney) , China (Beijing), and China (Ningxia). @@ -1271,16 +1270,16 @@ type ProjectEnvironment struct { // The type of credentials AWS CodeBuild uses to pull images in your build. There // are two valid values: // - // * CODEBUILD specifies that AWS CodeBuild uses its own + // * CODEBUILD specifies that AWS CodeBuild uses its own // credentials. This requires that you modify your ECR repository policy to trust // AWS CodeBuild's service principal. // - // * SERVICE_ROLE specifies that AWS - // CodeBuild uses your build project's service role. + // * SERVICE_ROLE specifies that AWS CodeBuild + // uses your build project's service role. // - // When you use a cross-account - // or private registry image, you must use SERVICE_ROLE credentials. When you use - // an AWS CodeBuild curated image, you must use CODEBUILD credentials. + // When you use a cross-account or private + // registry image, you must use SERVICE_ROLE credentials. When you use an AWS + // CodeBuild curated image, you must use CODEBUILD credentials. ImagePullCredentialsType ImagePullCredentialsType // Enables running the Docker daemon inside a Docker container. Set to true only if @@ -1347,26 +1346,26 @@ type ProjectSource struct { // The type of repository that contains the source code to be built. Valid values // include: // - // * BITBUCKET: The source code is in a Bitbucket repository. + // * BITBUCKET: The source code is in a Bitbucket repository. // - // * + // * // CODECOMMIT: The source code is in an AWS CodeCommit repository. // - // * - // CODEPIPELINE: The source code settings are specified in the source action of a - // pipeline in AWS CodePipeline. + // * CODEPIPELINE: + // The source code settings are specified in the source action of a pipeline in AWS + // CodePipeline. // - // * GITHUB: The source code is in a GitHub or - // GitHub Enterprise Cloud repository. + // * GITHUB: The source code is in a GitHub or GitHub Enterprise + // Cloud repository. // - // * GITHUB_ENTERPRISE: The source code is - // in a GitHub Enterprise Server repository. + // * GITHUB_ENTERPRISE: The source code is in a GitHub + // Enterprise Server repository. // - // * NO_SOURCE: The project does not - // have input source code. + // * NO_SOURCE: The project does not have input + // source code. // - // * S3: The source code is in an Amazon Simple - // Storage Service (Amazon S3) input bucket. + // * S3: The source code is in an Amazon Simple Storage Service + // (Amazon S3) input bucket. // // This member is required. Type SourceType @@ -1406,47 +1405,47 @@ type ProjectSource struct { // Information about the location of the source code to be built. Valid values // include: // - // * For source code settings that are specified in the source action - // of a pipeline in AWS CodePipeline, location should not be specified. If it is + // * For source code settings that are specified in the source action of + // a pipeline in AWS CodePipeline, location should not be specified. If it is // specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses // the settings in a pipeline's source action instead of this value. // - // * For - // source code in an AWS CodeCommit repository, the HTTPS clone URL to the - // repository that contains the source code and the buildspec file (for example, + // * For source + // code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that + // contains the source code and the buildspec file (for example, // https://git-codecommit..amazonaws.com/v1/repos/). // - // * For source code in an + // * For source code in an // Amazon Simple Storage Service (Amazon S3) input bucket, one of the following. // - // - // * The path to the ZIP file that contains the source code (for example, - // //.zip). - // - // * The path to the folder that contains the source code (for - // example, ///). - // - // * For source code in a GitHub repository, the HTTPS clone - // URL to the repository that contains the source and the buildspec file. You must - // connect your AWS account to your GitHub account. Use the AWS CodeBuild console - // to start creating a build project. When you use the console to connect (or - // reconnect) with GitHub, on the GitHub Authorize application page, for - // Organization access, choose Request access next to each repository you want to - // allow AWS CodeBuild to have access to, and then choose Authorize application. - // (After you have connected to your GitHub account, you do not need to finish - // creating the build project. You can leave the AWS CodeBuild console.) To - // instruct AWS CodeBuild to use this connection, in the source object, set the - // auth object's type value to OAUTH. - // - // * For source code in a Bitbucket - // repository, the HTTPS clone URL to the repository that contains the source and - // the buildspec file. You must connect your AWS account to your Bitbucket account. - // Use the AWS CodeBuild console to start creating a build project. When you use - // the console to connect (or reconnect) with Bitbucket, on the Bitbucket Confirm - // access to your account page, choose Grant access. (After you have connected to - // your Bitbucket account, you do not need to finish creating the build project. - // You can leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this + // * + // The path to the ZIP file that contains the source code (for example, //.zip). + // + // * + // The path to the folder that contains the source code (for example, ///). + // + // * For + // source code in a GitHub repository, the HTTPS clone URL to the repository that + // contains the source and the buildspec file. You must connect your AWS account to + // your GitHub account. Use the AWS CodeBuild console to start creating a build + // project. When you use the console to connect (or reconnect) with GitHub, on the + // GitHub Authorize application page, for Organization access, choose Request + // access next to each repository you want to allow AWS CodeBuild to have access + // to, and then choose Authorize application. (After you have connected to your + // GitHub account, you do not need to finish creating the build project. You can + // leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this // connection, in the source object, set the auth object's type value to OAUTH. + // + // * + // For source code in a Bitbucket repository, the HTTPS clone URL to the repository + // that contains the source and the buildspec file. You must connect your AWS + // account to your Bitbucket account. Use the AWS CodeBuild console to start + // creating a build project. When you use the console to connect (or reconnect) + // with Bitbucket, on the Bitbucket Confirm access to your account page, choose + // Grant access. (After you have connected to your Bitbucket account, you do not + // need to finish creating the build project. You can leave the AWS CodeBuild + // console.) To instruct AWS CodeBuild to use this connection, in the source + // object, set the auth object's type value to OAUTH. Location *string // Set to true to report the status of a build's start and finish to your source @@ -1471,26 +1470,26 @@ type ProjectSourceVersion struct { // The source version for the corresponding source identifier. If specified, must // be one of: // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to - // use. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // - // * For GitHub: the commit ID, pull request ID, branch name, or tag name - // that corresponds to the version of the source code you want to build. If a pull + // * + // For GitHub: the commit ID, pull request ID, branch name, or tag name that + // corresponds to the version of the source code you want to build. If a pull // request ID is specified, it must use the format pr/pull-request-ID (for example, // pr/25). If a branch name is specified, the branch's HEAD commit ID is used. If // not specified, the default branch's HEAD commit ID is used. // - // * For - // Bitbucket: the commit ID, branch name, or tag name that corresponds to the - // version of the source code you want to build. If a branch name is specified, the - // branch's HEAD commit ID is used. If not specified, the default branch's HEAD - // commit ID is used. + // * For Bitbucket: + // the commit ID, branch name, or tag name that corresponds to the version of the + // source code you want to build. If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit ID is + // used. // - // * For Amazon Simple Storage Service (Amazon S3): the - // version ID of the object that represents the build input ZIP file to use. + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. // - // For - // more information, see Source Version Sample with CodeBuild + // For more information, + // see Source Version Sample with CodeBuild // (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) // in the AWS CodeBuild User Guide. // @@ -1501,10 +1500,10 @@ type ProjectSourceVersion struct { // Information about credentials that provide access to a private Docker registry. // When this is set: // -// * imagePullCredentialsType must be set to SERVICE_ROLE. -// +// * imagePullCredentialsType must be set to SERVICE_ROLE. // -// * images cannot be curated or an Amazon ECR image. +// * +// images cannot be curated or an Amazon ECR image. // // For more information, see // Private Registry with AWS Secrets Manager Sample for AWS CodeBuild @@ -1577,11 +1576,10 @@ type ReportExportConfig struct { // The export configuration type. Valid values are: // - // * S3: The report results - // are exported to an S3 bucket. + // * S3: The report results are + // exported to an S3 bucket. // - // * NO_EXPORT: The report results are not - // exported. + // * NO_EXPORT: The report results are not exported. ExportConfigType ReportExportConfigType // A S3ReportExportConfig object that contains information about the S3 bucket @@ -1648,11 +1646,11 @@ type S3LogsConfig struct { // The current status of the S3 build logs. Valid values are: // - // * ENABLED: S3 - // build logs are enabled for this build project. + // * ENABLED: S3 build + // logs are enabled for this build project. // - // * DISABLED: S3 build logs - // are not enabled for this build project. + // * DISABLED: S3 build logs are not + // enabled for this build project. // // This member is required. Status LogsConfigStatusType @@ -1681,12 +1679,12 @@ type S3ReportExportConfig struct { // The type of build output artifact to create. Valid values include: // - // * NONE: - // AWS CodeBuild creates the raw data in the output bucket. This is the default if + // * NONE: AWS + // CodeBuild creates the raw data in the output bucket. This is the default if // packaging is not specified. // - // * ZIP: AWS CodeBuild creates a ZIP file with - // the raw data in the output bucket. + // * ZIP: AWS CodeBuild creates a ZIP file with the + // raw data in the output bucket. Packaging ReportPackagingType // The path to the exported report's raw data results. @@ -1781,14 +1779,13 @@ type TestCaseFilter struct { // The status used to filter test cases. A TestCaseFilter can have one status. // Valid values are: // - // * SUCCEEDED - // - // * FAILED + // * SUCCEEDED // - // * ERROR + // * FAILED // - // * SKIPPED + // * ERROR // + // * SKIPPED // // * UNKNOWN Status *string diff --git a/service/codecommit/api_op_CreateApprovalRuleTemplate.go b/service/codecommit/api_op_CreateApprovalRuleTemplate.go index ac86bdc5c96..a33ee2fb091 100644 --- a/service/codecommit/api_op_CreateApprovalRuleTemplate.go +++ b/service/codecommit/api_op_CreateApprovalRuleTemplate.go @@ -40,28 +40,27 @@ type CreateApprovalRuleTemplateInput struct { // content of the approval rule template, you can specify approvers in an approval // pool in one of two ways: // - // * CodeCommitApprovers: This option only requires - // an AWS account and a resource. It can be used for both IAM users and federated + // * CodeCommitApprovers: This option only requires an + // AWS account and a resource. It can be used for both IAM users and federated // access users whose name matches the provided resource name. This is a very // powerful option that offers a great deal of flexibility. For example, if you // specify the AWS account 123456789012 and Mary_Major, all of the following are // counted as approvals coming from that user: // - // * An IAM user in the - // account (arn:aws:iam::123456789012:user/Mary_Major) + // * An IAM user in the account + // (arn:aws:iam::123456789012:user/Mary_Major) // - // * A federated user - // identified in IAM as Mary_Major - // (arn:aws:sts::123456789012:federated-user/Mary_Major) + // * A federated user identified in + // IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) // - // This option does not - // recognize an active session of someone assuming the role of CodeCommitReview - // with a role session name of Mary_Major + // This + // option does not recognize an active session of someone assuming the role of + // CodeCommitReview with a role session name of Mary_Major // (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you // include a wildcard (*Mary_Major). // - // * Fully qualified ARN: This option allows - // you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or + // * Fully qualified ARN: This option allows you + // to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or // role. // // For more information about IAM ARNs, wildcards, and formats, see IAM diff --git a/service/codecommit/api_op_CreatePullRequestApprovalRule.go b/service/codecommit/api_op_CreatePullRequestApprovalRule.go index db702641f50..6c6c9f972c8 100644 --- a/service/codecommit/api_op_CreatePullRequestApprovalRule.go +++ b/service/codecommit/api_op_CreatePullRequestApprovalRule.go @@ -35,28 +35,28 @@ type CreatePullRequestApprovalRuleInput struct { // create the content of the approval rule, you can specify approvers in an // approval pool in one of two ways: // - // * CodeCommitApprovers: This option only + // * CodeCommitApprovers: This option only // requires an AWS account and a resource. It can be used for both IAM users and // federated access users whose name matches the provided resource name. This is a // very powerful option that offers a great deal of flexibility. For example, if // you specify the AWS account 123456789012 and Mary_Major, all of the following // would be counted as approvals coming from that user: // - // * An IAM user in - // the account (arn:aws:iam::123456789012:user/Mary_Major) + // * An IAM user in the + // account (arn:aws:iam::123456789012:user/Mary_Major) // - // * A federated - // user identified in IAM as Mary_Major + // * A federated user + // identified in IAM as Mary_Major // (arn:aws:sts::123456789012:federated-user/Mary_Major) // - // This option does not + // This option does not // recognize an active session of someone assuming the role of CodeCommitReview // with a role session name of Mary_Major // (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you // include a wildcard (*Mary_Major). // - // * Fully qualified ARN: This option allows - // you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or + // * Fully qualified ARN: This option allows you + // to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or // role. // // For more information about IAM ARNs, wildcards, and formats, see IAM diff --git a/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go b/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go index 95684aa9e26..7119b8be07c 100644 --- a/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go +++ b/service/codecommit/api_op_UpdatePullRequestApprovalRuleContent.go @@ -40,28 +40,27 @@ type UpdatePullRequestApprovalRuleContentInput struct { // approval rule, you can specify approvers in an approval pool in one of two // ways: // - // * CodeCommitApprovers: This option only requires an AWS account and a + // * CodeCommitApprovers: This option only requires an AWS account and a // resource. It can be used for both IAM users and federated access users whose // name matches the provided resource name. This is a very powerful option that // offers a great deal of flexibility. For example, if you specify the AWS account // 123456789012 and Mary_Major, all of the following are counted as approvals // coming from that user: // - // * An IAM user in the account + // * An IAM user in the account // (arn:aws:iam::123456789012:user/Mary_Major) // - // * A federated user - // identified in IAM as Mary_Major - // (arn:aws:sts::123456789012:federated-user/Mary_Major) + // * A federated user identified in + // IAM as Mary_Major (arn:aws:sts::123456789012:federated-user/Mary_Major) // - // This option does not - // recognize an active session of someone assuming the role of CodeCommitReview - // with a role session name of Mary_Major + // This + // option does not recognize an active session of someone assuming the role of + // CodeCommitReview with a role session name of Mary_Major // (arn:aws:sts::123456789012:assumed-role/CodeCommitReview/Mary_Major) unless you // include a wildcard (*Mary_Major). // - // * Fully qualified ARN: This option allows - // you to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or + // * Fully qualified ARN: This option allows you + // to specify the fully qualified Amazon Resource Name (ARN) of the IAM user or // role. // // For more information about IAM ARNs, wildcards, and formats, see IAM diff --git a/service/codecommit/doc.go b/service/codecommit/doc.go index d29fbe75718..9ee5b92f2b5 100644 --- a/service/codecommit/doc.go +++ b/service/codecommit/doc.go @@ -8,302 +8,299 @@ // usage examples. You can use the AWS CodeCommit API to work with the following // objects: Repositories, by calling the following: // -// * BatchGetRepositories, -// which returns information about one or more repositories associated with your -// AWS account. +// * BatchGetRepositories, which +// returns information about one or more repositories associated with your AWS +// account. // -// * CreateRepository, which creates an AWS CodeCommit -// repository. -// -// * DeleteRepository, which deletes an AWS CodeCommit -// repository. +// * CreateRepository, which creates an AWS CodeCommit repository. // -// * GetRepository, which returns information about a specified -// repository. +// * +// DeleteRepository, which deletes an AWS CodeCommit repository. // -// * ListRepositories, which lists all AWS CodeCommit repositories -// associated with your AWS account. +// * GetRepository, +// which returns information about a specified repository. // -// * UpdateRepositoryDescription, which sets -// or updates the description of the repository. +// * ListRepositories, +// which lists all AWS CodeCommit repositories associated with your AWS account. // -// * UpdateRepositoryName, which -// changes the name of the repository. If you change the name of a repository, no -// other users of that repository can access it until you send them the new HTTPS -// or SSH URL to use. +// * +// UpdateRepositoryDescription, which sets or updates the description of the +// repository. // -// Branches, by calling the following: +// * UpdateRepositoryName, which changes the name of the repository. +// If you change the name of a repository, no other users of that repository can +// access it until you send them the new HTTPS or SSH URL to use. // -// * CreateBranch, -// which creates a branch in a specified repository. +// Branches, by +// calling the following: // -// * DeleteBranch, which -// deletes the specified branch in a repository unless it is the default branch. +// * CreateBranch, which creates a branch in a specified +// repository. // +// * DeleteBranch, which deletes the specified branch in a repository +// unless it is the default branch. // -// * GetBranch, which returns information about a specified branch. +// * GetBranch, which returns information about a +// specified branch. // -// * -// ListBranches, which lists all branches for a specified repository. +// * ListBranches, which lists all branches for a specified +// repository. // -// * -// UpdateDefaultBranch, which changes the default branch for a repository. +// * UpdateDefaultBranch, which changes the default branch for a +// repository. // -// Files, -// by calling the following: +// Files, by calling the following: // -// * DeleteFile, which deletes the content of a -// specified file from a specified branch. +// * DeleteFile, which deletes the +// content of a specified file from a specified branch. // -// * GetBlob, which returns the -// base-64 encoded content of an individual Git blob object in a repository. +// * GetBlob, which returns +// the base-64 encoded content of an individual Git blob object in a repository. // -// * +// * // GetFile, which returns the base-64 encoded content of a specified file. // -// * +// * // GetFolder, which returns the contents of a specified folder or directory. // -// * +// * // PutFile, which adds or modifies a single file in a specified repository and // branch. // // Commits, by calling the following: // -// * BatchGetCommits, which -// returns information about one or more commits in a repository. +// * BatchGetCommits, which returns +// information about one or more commits in a repository. // -// * -// CreateCommit, which creates a commit for changes to a repository. +// * CreateCommit, which +// creates a commit for changes to a repository. // -// * -// GetCommit, which returns information about a commit, including commit messages -// and author and committer information. +// * GetCommit, which returns +// information about a commit, including commit messages and author and committer +// information. // -// * GetDifferences, which returns -// information about the differences in a valid commit specifier (such as a branch, -// tag, HEAD, commit ID, or other fully qualified reference). +// * GetDifferences, which returns information about the differences +// in a valid commit specifier (such as a branch, tag, HEAD, commit ID, or other +// fully qualified reference). // -// Merges, by calling -// the following: +// Merges, by calling the following: // -// * BatchDescribeMergeConflicts, which returns information -// about conflicts in a merge between commits in a repository. +// * +// BatchDescribeMergeConflicts, which returns information about conflicts in a +// merge between commits in a repository. // -// * -// CreateUnreferencedMergeCommit, which creates an unreferenced commit between two -// branches or commits for the purpose of comparing them and identifying any -// potential conflicts. +// * CreateUnreferencedMergeCommit, which +// creates an unreferenced commit between two branches or commits for the purpose +// of comparing them and identifying any potential conflicts. // -// * DescribeMergeConflicts, which returns information -// about merge conflicts between the base, source, and destination versions of a -// file in a potential merge. +// * +// DescribeMergeConflicts, which returns information about merge conflicts between +// the base, source, and destination versions of a file in a potential merge. // -// * GetMergeCommit, which returns information -// about the merge between a source and destination commit. +// * +// GetMergeCommit, which returns information about the merge between a source and +// destination commit. // -// * -// GetMergeConflicts, which returns information about merge conflicts between the -// source and destination branch in a pull request. +// * GetMergeConflicts, which returns information about merge +// conflicts between the source and destination branch in a pull request. // -// * GetMergeOptions, which -// returns information about the available merge options between two branches or -// commit specifiers. +// * +// GetMergeOptions, which returns information about the available merge options +// between two branches or commit specifiers. // -// * MergeBranchesByFastForward, which merges two branches -// using the fast-forward merge option. +// * MergeBranchesByFastForward, which +// merges two branches using the fast-forward merge option. // -// * MergeBranchesBySquash, which merges -// two branches using the squash merge option. +// * +// MergeBranchesBySquash, which merges two branches using the squash merge +// option. // -// * MergeBranchesByThreeWay, -// which merges two branches using the three-way merge option. -// -// Pull requests, by -// calling the following: +// * MergeBranchesByThreeWay, which merges two branches using the +// three-way merge option. // -// * CreatePullRequest, which creates a pull request in -// a specified repository. +// Pull requests, by calling the following: // -// * CreatePullRequestApprovalRule, which creates an -// approval rule for a specified pull request. +// * +// CreatePullRequest, which creates a pull request in a specified repository. // -// * -// DeletePullRequestApprovalRule, which deletes an approval rule for a specified +// * +// CreatePullRequestApprovalRule, which creates an approval rule for a specified // pull request. // -// * DescribePullRequestEvents, which returns information about -// one or more pull request events. -// -// * EvaluatePullRequestApprovalRules, which -// evaluates whether a pull request has met all the conditions specified in its -// associated approval rules. +// * DeletePullRequestApprovalRule, which deletes an approval rule +// for a specified pull request. // -// * GetCommentsForPullRequest, which returns -// information about comments on a specified pull request. +// * DescribePullRequestEvents, which returns +// information about one or more pull request events. // -// * GetPullRequest, -// which returns information about a specified pull request. +// * +// EvaluatePullRequestApprovalRules, which evaluates whether a pull request has met +// all the conditions specified in its associated approval rules. // -// * -// GetPullRequestApprovalStates, which returns information about the approval -// states for a specified pull request. +// * +// GetCommentsForPullRequest, which returns information about comments on a +// specified pull request. // -// * GetPullRequestOverrideState, which -// returns information about whether approval rules have been set aside (overriden) -// for a pull request, and if so, the Amazon Resource Name (ARN) of the user or -// identity that overrode the rules and their requirements for the pull request. +// * GetPullRequest, which returns information about a +// specified pull request. // +// * GetPullRequestApprovalStates, which returns +// information about the approval states for a specified pull request. // -// * ListPullRequests, which lists all pull requests for a repository. +// * +// GetPullRequestOverrideState, which returns information about whether approval +// rules have been set aside (overriden) for a pull request, and if so, the Amazon +// Resource Name (ARN) of the user or identity that overrode the rules and their +// requirements for the pull request. // -// * -// MergePullRequestByFastForward, which merges the source destination branch of a -// pull request into the specified destination branch for that pull request using -// the fast-forward merge option. +// * ListPullRequests, which lists all pull +// requests for a repository. // -// * MergePullRequestBySquash, which merges the +// * MergePullRequestByFastForward, which merges the // source destination branch of a pull request into the specified destination -// branch for that pull request using the squash merge option. +// branch for that pull request using the fast-forward merge option. // -// * -// MergePullRequestByThreeWay. which merges the source destination branch of a pull +// * +// MergePullRequestBySquash, which merges the source destination branch of a pull // request into the specified destination branch for that pull request using the -// three-way merge option. +// squash merge option. // -// * OverridePullRequestApprovalRules, which sets -// aside all approval rule requirements for a pull request. +// * MergePullRequestByThreeWay. which merges the source +// destination branch of a pull request into the specified destination branch for +// that pull request using the three-way merge option. // -// * -// PostCommentForPullRequest, which posts a comment to a pull request at the -// specified line, file, or request. +// * +// OverridePullRequestApprovalRules, which sets aside all approval rule +// requirements for a pull request. // -// * UpdatePullRequestApprovalRuleContent, -// which updates the structure of an approval rule for a pull request. +// * PostCommentForPullRequest, which posts a +// comment to a pull request at the specified line, file, or request. // -// * -// UpdatePullRequestApprovalState, which updates the state of an approval on a pull -// request. +// * +// UpdatePullRequestApprovalRuleContent, which updates the structure of an approval +// rule for a pull request. // -// * UpdatePullRequestDescription, which updates the description of a -// pull request. +// * UpdatePullRequestApprovalState, which updates the +// state of an approval on a pull request. +// +// * UpdatePullRequestDescription, which +// updates the description of a pull request. // -// * UpdatePullRequestStatus, which updates the status of a pull -// request. +// * UpdatePullRequestStatus, which +// updates the status of a pull request. // -// * UpdatePullRequestTitle, which updates the title of a pull -// request. +// * UpdatePullRequestTitle, which updates +// the title of a pull request. // -// Approval rule templates, by calling the following: +// Approval rule templates, by calling the +// following: // -// * -// AssociateApprovalRuleTemplateWithRepository, which associates a template with a -// specified repository. After the template is associated with a repository, AWS -// CodeCommit creates approval rules that match the template conditions on every -// pull request created in the specified repository. +// * AssociateApprovalRuleTemplateWithRepository, which associates a +// template with a specified repository. After the template is associated with a +// repository, AWS CodeCommit creates approval rules that match the template +// conditions on every pull request created in the specified repository. // -// * +// * // BatchAssociateApprovalRuleTemplateWithRepositories, which associates a template // with one or more specified repositories. After the template is associated with a // repository, AWS CodeCommit creates approval rules that match the template // conditions on every pull request created in the specified repositories. // -// * +// * // BatchDisassociateApprovalRuleTemplateFromRepositories, which removes the // association between a template and specified repositories so that approval rules // based on the template are not automatically created when pull requests are // created in those repositories. // -// * CreateApprovalRuleTemplate, which creates -// a template for approval rules that can then be associated with one or more +// * CreateApprovalRuleTemplate, which creates a +// template for approval rules that can then be associated with one or more // repositories in your AWS account. // -// * DeleteApprovalRuleTemplate, which -// deletes the specified template. It does not remove approval rules on pull -// requests already created with the template. +// * DeleteApprovalRuleTemplate, which deletes +// the specified template. It does not remove approval rules on pull requests +// already created with the template. // -// * +// * // DisassociateApprovalRuleTemplateFromRepository, which removes the association // between a template and a repository so that approval rules based on the template // are not automatically created when pull requests are created in the specified // repository. // -// * GetApprovalRuleTemplate, which returns information about an +// * GetApprovalRuleTemplate, which returns information about an // approval rule template. // -// * ListApprovalRuleTemplates, which lists all -// approval rule templates in the AWS Region in your AWS account. +// * ListApprovalRuleTemplates, which lists all approval +// rule templates in the AWS Region in your AWS account. // -// * +// * // ListAssociatedApprovalRuleTemplatesForRepository, which lists all approval rule // templates that are associated with a specified repository. // -// * +// * // ListRepositoriesForApprovalRuleTemplate, which lists all repositories associated // with the specified approval rule template. // -// * +// * // UpdateApprovalRuleTemplateDescription, which updates the description of an // approval rule template. // -// * UpdateApprovalRuleTemplateName, which updates the +// * UpdateApprovalRuleTemplateName, which updates the // name of an approval rule template. // -// * UpdateApprovalRuleTemplateContent, -// which updates the content of an approval rule template. +// * UpdateApprovalRuleTemplateContent, which +// updates the content of an approval rule template. // -// Comments in a -// repository, by calling the following: +// Comments in a repository, by +// calling the following: // -// * DeleteCommentContent, which deletes -// the content of a comment on a commit in a repository. +// * DeleteCommentContent, which deletes the content of a +// comment on a commit in a repository. // -// * GetComment, which -// returns information about a comment on a commit. +// * GetComment, which returns information +// about a comment on a commit. // -// * GetCommentReactions, -// which returns information about emoji reactions to comments. +// * GetCommentReactions, which returns information +// about emoji reactions to comments. // -// * -// GetCommentsForComparedCommit, which returns information about comments on the -// comparison between two commit specifiers in a repository. +// * GetCommentsForComparedCommit, which +// returns information about comments on the comparison between two commit +// specifiers in a repository. // -// * -// PostCommentForComparedCommit, which creates a comment on the comparison between -// two commit specifiers in a repository. +// * PostCommentForComparedCommit, which creates a +// comment on the comparison between two commit specifiers in a repository. // -// * PostCommentReply, which creates a -// reply to a comment. +// * +// PostCommentReply, which creates a reply to a comment. // -// * PutCommentReaction, which creates or updates an emoji -// reaction to a comment. +// * PutCommentReaction, +// which creates or updates an emoji reaction to a comment. // -// * UpdateComment, which updates the content of a -// comment on a commit in a repository. +// * UpdateComment, which +// updates the content of a comment on a commit in a repository. // -// Tags used to tag resources in AWS -// CodeCommit (not Git tags), by calling the following: +// Tags used to tag +// resources in AWS CodeCommit (not Git tags), by calling the following: // -// * ListTagsForResource, -// which gets information about AWS tags for a specified Amazon Resource Name (ARN) -// in AWS CodeCommit. +// * +// ListTagsForResource, which gets information about AWS tags for a specified +// Amazon Resource Name (ARN) in AWS CodeCommit. // -// * TagResource, which adds or updates tags for a resource -// in AWS CodeCommit. +// * TagResource, which adds or +// updates tags for a resource in AWS CodeCommit. // -// * UntagResource, which removes tags for a resource in -// AWS CodeCommit. +// * UntagResource, which removes +// tags for a resource in AWS CodeCommit. // // Triggers, by calling the following: // -// * +// * // GetRepositoryTriggers, which returns information about triggers configured for a // repository. // -// * PutRepositoryTriggers, which replaces all triggers for a +// * PutRepositoryTriggers, which replaces all triggers for a // repository and can be used to create or delete triggers. // -// * +// * // TestRepositoryTriggers, which tests the functionality of a repository trigger by // sending data to the trigger target. // diff --git a/service/codecommit/types/enums.go b/service/codecommit/types/enums.go index db25c480d39..15b94dfa403 100644 --- a/service/codecommit/types/enums.go +++ b/service/codecommit/types/enums.go @@ -44,8 +44,8 @@ type ConflictDetailLevelTypeEnum string // Enum values for ConflictDetailLevelTypeEnum const ( - ConflictDetailLevelTypeEnumFile_level ConflictDetailLevelTypeEnum = "FILE_LEVEL" - ConflictDetailLevelTypeEnumLine_level ConflictDetailLevelTypeEnum = "LINE_LEVEL" + ConflictDetailLevelTypeEnumFileLevel ConflictDetailLevelTypeEnum = "FILE_LEVEL" + ConflictDetailLevelTypeEnumLineLevel ConflictDetailLevelTypeEnum = "LINE_LEVEL" ) // Values returns all known values for ConflictDetailLevelTypeEnum. Note that this @@ -62,10 +62,10 @@ type ConflictResolutionStrategyTypeEnum string // Enum values for ConflictResolutionStrategyTypeEnum const ( - ConflictResolutionStrategyTypeEnumNone ConflictResolutionStrategyTypeEnum = "NONE" - ConflictResolutionStrategyTypeEnumAccept_source ConflictResolutionStrategyTypeEnum = "ACCEPT_SOURCE" - ConflictResolutionStrategyTypeEnumAccept_destination ConflictResolutionStrategyTypeEnum = "ACCEPT_DESTINATION" - ConflictResolutionStrategyTypeEnumAutomerge ConflictResolutionStrategyTypeEnum = "AUTOMERGE" + ConflictResolutionStrategyTypeEnumNone ConflictResolutionStrategyTypeEnum = "NONE" + ConflictResolutionStrategyTypeEnumAcceptSource ConflictResolutionStrategyTypeEnum = "ACCEPT_SOURCE" + ConflictResolutionStrategyTypeEnumAcceptDestination ConflictResolutionStrategyTypeEnum = "ACCEPT_DESTINATION" + ConflictResolutionStrategyTypeEnumAutomerge ConflictResolutionStrategyTypeEnum = "AUTOMERGE" ) // Values returns all known values for ConflictResolutionStrategyTypeEnum. Note @@ -105,9 +105,9 @@ type MergeOptionTypeEnum string // Enum values for MergeOptionTypeEnum const ( - MergeOptionTypeEnumFast_forward_merge MergeOptionTypeEnum = "FAST_FORWARD_MERGE" - MergeOptionTypeEnumSquash_merge MergeOptionTypeEnum = "SQUASH_MERGE" - MergeOptionTypeEnumThree_way_merge MergeOptionTypeEnum = "THREE_WAY_MERGE" + MergeOptionTypeEnumFastForwardMerge MergeOptionTypeEnum = "FAST_FORWARD_MERGE" + MergeOptionTypeEnumSquashMerge MergeOptionTypeEnum = "SQUASH_MERGE" + MergeOptionTypeEnumThreeWayMerge MergeOptionTypeEnum = "THREE_WAY_MERGE" ) // Values returns all known values for MergeOptionTypeEnum. Note that this can be @@ -125,10 +125,10 @@ type ObjectTypeEnum string // Enum values for ObjectTypeEnum const ( - ObjectTypeEnumFile ObjectTypeEnum = "FILE" - ObjectTypeEnumDirectory ObjectTypeEnum = "DIRECTORY" - ObjectTypeEnumGit_link ObjectTypeEnum = "GIT_LINK" - ObjectTypeEnumSymbolic_link ObjectTypeEnum = "SYMBOLIC_LINK" + ObjectTypeEnumFile ObjectTypeEnum = "FILE" + ObjectTypeEnumDirectory ObjectTypeEnum = "DIRECTORY" + ObjectTypeEnumGitLink ObjectTypeEnum = "GIT_LINK" + ObjectTypeEnumSymbolicLink ObjectTypeEnum = "SYMBOLIC_LINK" ) // Values returns all known values for ObjectTypeEnum. Note that this can be @@ -183,15 +183,15 @@ type PullRequestEventType string // Enum values for PullRequestEventType const ( - PullRequestEventTypePull_request_created PullRequestEventType = "PULL_REQUEST_CREATED" - PullRequestEventTypePull_request_status_changed PullRequestEventType = "PULL_REQUEST_STATUS_CHANGED" - PullRequestEventTypePull_request_source_reference_updated PullRequestEventType = "PULL_REQUEST_SOURCE_REFERENCE_UPDATED" - PullRequestEventTypePull_request_merge_state_changed PullRequestEventType = "PULL_REQUEST_MERGE_STATE_CHANGED" - PullRequestEventTypePull_request_approval_rule_created PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_CREATED" - PullRequestEventTypePull_request_approval_rule_updated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_UPDATED" - PullRequestEventTypePull_request_approval_rule_deleted PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_DELETED" - PullRequestEventTypePull_request_approval_rule_overridden PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN" - PullRequestEventTypePull_request_approval_state_changed PullRequestEventType = "PULL_REQUEST_APPROVAL_STATE_CHANGED" + PullRequestEventTypePullRequestCreated PullRequestEventType = "PULL_REQUEST_CREATED" + PullRequestEventTypePullRequestStatusChanged PullRequestEventType = "PULL_REQUEST_STATUS_CHANGED" + PullRequestEventTypePullRequestSourceReferenceUpdated PullRequestEventType = "PULL_REQUEST_SOURCE_REFERENCE_UPDATED" + PullRequestEventTypePullRequestMergeStateChanged PullRequestEventType = "PULL_REQUEST_MERGE_STATE_CHANGED" + PullRequestEventTypePullRequestApprovalRuleCreated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_CREATED" + PullRequestEventTypePullRequestApprovalRuleUpdated PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_UPDATED" + PullRequestEventTypePullRequestApprovalRuleDeleted PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_DELETED" + PullRequestEventTypePullRequestApprovalRuleOverridden PullRequestEventType = "PULL_REQUEST_APPROVAL_RULE_OVERRIDDEN" + PullRequestEventTypePullRequestApprovalStateChanged PullRequestEventType = "PULL_REQUEST_APPROVAL_STATE_CHANGED" ) // Values returns all known values for PullRequestEventType. Note that this can be @@ -251,10 +251,10 @@ type ReplacementTypeEnum string // Enum values for ReplacementTypeEnum const ( - ReplacementTypeEnumKeep_base ReplacementTypeEnum = "KEEP_BASE" - ReplacementTypeEnumKeep_source ReplacementTypeEnum = "KEEP_SOURCE" - ReplacementTypeEnumKeep_destination ReplacementTypeEnum = "KEEP_DESTINATION" - ReplacementTypeEnumUse_new_content ReplacementTypeEnum = "USE_NEW_CONTENT" + ReplacementTypeEnumKeepBase ReplacementTypeEnum = "KEEP_BASE" + ReplacementTypeEnumKeepSource ReplacementTypeEnum = "KEEP_SOURCE" + ReplacementTypeEnumKeepDestination ReplacementTypeEnum = "KEEP_DESTINATION" + ReplacementTypeEnumUseNewContent ReplacementTypeEnum = "USE_NEW_CONTENT" ) // Values returns all known values for ReplacementTypeEnum. Note that this can be @@ -273,10 +273,10 @@ type RepositoryTriggerEventEnum string // Enum values for RepositoryTriggerEventEnum const ( - RepositoryTriggerEventEnumAll RepositoryTriggerEventEnum = "all" - RepositoryTriggerEventEnumUpdate_reference RepositoryTriggerEventEnum = "updateReference" - RepositoryTriggerEventEnumCreate_reference RepositoryTriggerEventEnum = "createReference" - RepositoryTriggerEventEnumDelete_reference RepositoryTriggerEventEnum = "deleteReference" + RepositoryTriggerEventEnumAll RepositoryTriggerEventEnum = "all" + RepositoryTriggerEventEnumUpdateReference RepositoryTriggerEventEnum = "updateReference" + RepositoryTriggerEventEnumCreateReference RepositoryTriggerEventEnum = "createReference" + RepositoryTriggerEventEnumDeleteReference RepositoryTriggerEventEnum = "deleteReference" ) // Values returns all known values for RepositoryTriggerEventEnum. Note that this @@ -295,8 +295,8 @@ type SortByEnum string // Enum values for SortByEnum const ( - SortByEnumRepository_name SortByEnum = "repositoryName" - SortByEnumModified_date SortByEnum = "lastModifiedDate" + SortByEnumRepositoryName SortByEnum = "repositoryName" + SortByEnumModifiedDate SortByEnum = "lastModifiedDate" ) // Values returns all known values for SortByEnum. Note that this can be expanded diff --git a/service/codecommit/types/types.go b/service/codecommit/types/types.go index 856287159e6..9dc3382f9ff 100644 --- a/service/codecommit/types/types.go +++ b/service/codecommit/types/types.go @@ -183,15 +183,15 @@ type BlobMetadata struct { // The file mode permissions of the blob. File mode permission codes include: // + // * + // 100644 indicates read/write // - // * 100644 indicates read/write + // * 100755 indicates read/write/execute // - // * 100755 indicates read/write/execute + // * 160000 + // indicates a submodule // - // * - // 160000 indicates a submodule - // - // * 120000 indicates a symlink + // * 120000 indicates a symlink Mode *string // The path to the blob and associated file name, if any. diff --git a/service/codedeploy/api_op_BatchGetDeploymentTargets.go b/service/codedeploy/api_op_BatchGetDeploymentTargets.go index 874ab354594..221288682dd 100644 --- a/service/codedeploy/api_op_BatchGetDeploymentTargets.go +++ b/service/codedeploy/api_op_BatchGetDeploymentTargets.go @@ -17,15 +17,15 @@ import ( // is 25. The type of targets returned depends on the deployment's compute platform // or deployment method: // -// * EC2/On-premises: Information about EC2 instance +// * EC2/On-premises: Information about EC2 instance // targets. // -// * AWS Lambda: Information about Lambda functions targets. +// * AWS Lambda: Information about Lambda functions targets. // -// * -// Amazon ECS: Information about Amazon ECS service targets. +// * Amazon +// ECS: Information about Amazon ECS service targets. // -// * CloudFormation: +// * CloudFormation: // Information about targets of blue/green deployments initiated by a // CloudFormation stack update. func (c *Client) BatchGetDeploymentTargets(ctx context.Context, params *BatchGetDeploymentTargetsInput, optFns ...func(*Options)) (*BatchGetDeploymentTargetsOutput, error) { @@ -52,22 +52,21 @@ type BatchGetDeploymentTargetsInput struct { // determines the type of the targets and their formats. The maximum number of // deployment target IDs you can specify is 25. // - // * For deployments that use the + // * For deployments that use the // EC2/On-premises compute platform, the target IDs are EC2 or on-premises // instances IDs, and their target type is instanceTarget. // - // * For deployments - // that use the AWS Lambda compute platform, the target IDs are the names of Lambda + // * For deployments that + // use the AWS Lambda compute platform, the target IDs are the names of Lambda // functions, and their target type is instanceTarget. // - // * For deployments that - // use the Amazon ECS compute platform, the target IDs are pairs of Amazon ECS - // clusters and services specified using the format :. Their target type is - // ecsTarget. + // * For deployments that use + // the Amazon ECS compute platform, the target IDs are pairs of Amazon ECS clusters + // and services specified using the format :. Their target type is ecsTarget. // - // * For deployments that are deployed with AWS CloudFormation, the - // target IDs are CloudFormation stack IDs. Their target type is - // cloudFormationTarget. + // * + // For deployments that are deployed with AWS CloudFormation, the target IDs are + // CloudFormation stack IDs. Their target type is cloudFormationTarget. TargetIds []*string } @@ -77,17 +76,17 @@ type BatchGetDeploymentTargetsOutput struct { // about the target, such as its status and lifecycle events. The type of the // target objects depends on the deployment' compute platform. // - // * - // EC2/On-premises: Each target object is an EC2 or on-premises instance. + // * EC2/On-premises: + // Each target object is an EC2 or on-premises instance. // - // * - // AWS Lambda: The target object is a specific version of an AWS Lambda function. + // * AWS Lambda: The target + // object is a specific version of an AWS Lambda function. // + // * Amazon ECS: The + // target object is an Amazon ECS service. // - // * Amazon ECS: The target object is an Amazon ECS service. - // - // * CloudFormation: - // The target object is an AWS CloudFormation blue/green deployment. + // * CloudFormation: The target object is + // an AWS CloudFormation blue/green deployment. DeploymentTargets []*types.DeploymentTarget // Metadata pertaining to the operation's result. diff --git a/service/codedeploy/api_op_CreateDeployment.go b/service/codedeploy/api_op_CreateDeployment.go index 9b822a6c5fe..df312bed8df 100644 --- a/service/codedeploy/api_op_CreateDeployment.go +++ b/service/codedeploy/api_op_CreateDeployment.go @@ -57,15 +57,15 @@ type CreateDeploymentInput struct { // deployment. The fileExistsBehavior parameter takes any of the following // values: // - // * DISALLOW: The deployment fails. This is also the default behavior - // if no option is specified. + // * DISALLOW: The deployment fails. This is also the default behavior if + // no option is specified. // - // * OVERWRITE: The version of the file from the + // * OVERWRITE: The version of the file from the // application revision currently being deployed replaces the version already on // the instance. // - // * RETAIN: The version of the file already on the instance is - // kept and used as part of the new deployment. + // * RETAIN: The version of the file already on the instance is kept + // and used as part of the new deployment. FileExistsBehavior types.FileExistsBehavior // If true, then if an ApplicationStop, BeforeBlockTraffic, or AfterBlockTraffic diff --git a/service/codedeploy/api_op_CreateDeploymentConfig.go b/service/codedeploy/api_op_CreateDeploymentConfig.go index b359419236c..81f0b3d6ec7 100644 --- a/service/codedeploy/api_op_CreateDeploymentConfig.go +++ b/service/codedeploy/api_op_CreateDeploymentConfig.go @@ -42,19 +42,19 @@ type CreateDeploymentConfigInput struct { // during the deployment. There are two parameters expected in the input: type and // value. The type parameter takes either of the following values: // - // * - // HOST_COUNT: The value parameter represents the minimum number of healthy - // instances as an absolute value. + // * HOST_COUNT: + // The value parameter represents the minimum number of healthy instances as an + // absolute value. // - // * FLEET_PERCENT: The value parameter - // represents the minimum number of healthy instances as a percentage of the total - // number of instances in the deployment. If you specify FLEET_PERCENT, at the - // start of the deployment, AWS CodeDeploy converts the percentage to the - // equivalent number of instances and rounds up fractional instances. + // * FLEET_PERCENT: The value parameter represents the minimum + // number of healthy instances as a percentage of the total number of instances in + // the deployment. If you specify FLEET_PERCENT, at the start of the deployment, + // AWS CodeDeploy converts the percentage to the equivalent number of instances and + // rounds up fractional instances. // - // The value - // parameter takes an integer. For example, to set a minimum of 95% healthy - // instance, specify a type of FLEET_PERCENT and a value of 95. + // The value parameter takes an integer. For + // example, to set a minimum of 95% healthy instance, specify a type of + // FLEET_PERCENT and a value of 95. MinimumHealthyHosts *types.MinimumHealthyHosts // The configuration that specifies how the deployment traffic is routed. diff --git a/service/codedeploy/api_op_ListApplicationRevisions.go b/service/codedeploy/api_op_ListApplicationRevisions.go index c31b13285f5..c8629c6d11c 100644 --- a/service/codedeploy/api_op_ListApplicationRevisions.go +++ b/service/codedeploy/api_op_ListApplicationRevisions.go @@ -39,13 +39,13 @@ type ListApplicationRevisionsInput struct { // Whether to list revisions based on whether the revision is the target revision // of a deployment group: // - // * include: List revisions that are target revisions - // of a deployment group. + // * include: List revisions that are target revisions of a + // deployment group. // - // * exclude: Do not list revisions that are target - // revisions of a deployment group. + // * exclude: Do not list revisions that are target revisions of + // a deployment group. // - // * ignore: List all revisions. + // * ignore: List all revisions. Deployed types.ListStateFilterAction // An identifier returned from the previous ListApplicationRevisions call. It can @@ -61,30 +61,28 @@ type ListApplicationRevisionsInput struct { // The column name to use to sort the list results: // - // * registerTime: Sort by - // the time the revisions were registered with AWS CodeDeploy. + // * registerTime: Sort by the + // time the revisions were registered with AWS CodeDeploy. // - // * - // firstUsedTime: Sort by the time the revisions were first used in a deployment. + // * firstUsedTime: Sort + // by the time the revisions were first used in a deployment. // + // * lastUsedTime: Sort + // by the time the revisions were last used in a deployment. // - // * lastUsedTime: Sort by the time the revisions were last used in a - // deployment. - // - // If not specified or set to null, the results are returned in an - // arbitrary order. + // If not specified or + // set to null, the results are returned in an arbitrary order. SortBy types.ApplicationRevisionSortBy // The order in which to sort the list results: // - // * ascending: ascending - // order. + // * ascending: ascending order. // - // * descending: descending order. + // * + // descending: descending order. // - // If not specified, the results are - // sorted in ascending order. If set to null, the results are sorted in an - // arbitrary order. + // If not specified, the results are sorted in + // ascending order. If set to null, the results are sorted in an arbitrary order. SortOrder types.SortOrder } diff --git a/service/codedeploy/api_op_ListDeploymentInstances.go b/service/codedeploy/api_op_ListDeploymentInstances.go index dafa2f0023e..f0717a064a4 100644 --- a/service/codedeploy/api_op_ListDeploymentInstances.go +++ b/service/codedeploy/api_op_ListDeploymentInstances.go @@ -40,22 +40,22 @@ type ListDeploymentInstancesInput struct { // A subset of instances to list by status: // - // * Pending: Include those instances + // * Pending: Include those instances // with pending deployments. // - // * InProgress: Include those instances where + // * InProgress: Include those instances where // deployments are still in progress. // - // * Succeeded: Include those instances - // with successful deployments. + // * Succeeded: Include those instances with + // successful deployments. // - // * Failed: Include those instances with failed + // * Failed: Include those instances with failed // deployments. // - // * Skipped: Include those instances with skipped deployments. + // * Skipped: Include those instances with skipped deployments. // - // - // * Unknown: Include those instances with deployments in an unknown state. + // * + // Unknown: Include those instances with deployments in an unknown state. InstanceStatusFilter []types.InstanceStatus // The set of instances in a blue/green deployment, either those in the original diff --git a/service/codedeploy/api_op_ListDeploymentTargets.go b/service/codedeploy/api_op_ListDeploymentTargets.go index 7a9937fb208..5e5ffb61d93 100644 --- a/service/codedeploy/api_op_ListDeploymentTargets.go +++ b/service/codedeploy/api_op_ListDeploymentTargets.go @@ -37,11 +37,11 @@ type ListDeploymentTargetsInput struct { // A key used to filter the returned targets. The two valid values are: // - // * + // * // TargetStatus - A TargetStatus filter string can be Failed, InProgress, Pending, // Ready, Skipped, Succeeded, or Unknown. // - // * ServerInstanceLabel - A + // * ServerInstanceLabel - A // ServerInstanceLabel filter string can be Blue or Green. TargetFilters map[string][]*string } diff --git a/service/codedeploy/api_op_ListDeployments.go b/service/codedeploy/api_op_ListDeployments.go index 45230fa2e7e..eaf2887f9f4 100644 --- a/service/codedeploy/api_op_ListDeployments.go +++ b/service/codedeploy/api_op_ListDeployments.go @@ -51,22 +51,22 @@ type ListDeploymentsInput struct { // A subset of deployments to list by status: // - // * Created: Include created + // * Created: Include created // deployments in the resulting list. // - // * Queued: Include queued deployments in - // the resulting list. - // - // * In Progress: Include in-progress deployments in the - // resulting list. - // - // * Succeeded: Include successful deployments in the + // * Queued: Include queued deployments in the // resulting list. // - // * Failed: Include failed deployments in the resulting + // * In Progress: Include in-progress deployments in the resulting // list. // - // * Stopped: Include stopped deployments in the resulting list. + // * Succeeded: Include successful deployments in the resulting list. + // + // * + // Failed: Include failed deployments in the resulting list. + // + // * Stopped: Include + // stopped deployments in the resulting list. IncludeOnlyStatuses []types.DeploymentStatus // An identifier returned from the previous list deployments call. It can be used diff --git a/service/codedeploy/api_op_ListOnPremisesInstances.go b/service/codedeploy/api_op_ListOnPremisesInstances.go index 9f19d103c3b..780092f59f9 100644 --- a/service/codedeploy/api_op_ListOnPremisesInstances.go +++ b/service/codedeploy/api_op_ListOnPremisesInstances.go @@ -39,11 +39,11 @@ type ListOnPremisesInstancesInput struct { // The registration status of the on-premises instances: // - // * Deregistered: - // Include deregistered on-premises instances in the resulting list. + // * Deregistered: Include + // deregistered on-premises instances in the resulting list. // - // * - // Registered: Include registered on-premises instances in the resulting list. + // * Registered: Include + // registered on-premises instances in the resulting list. RegistrationStatus types.RegistrationStatus // The on-premises instance tags that are used to restrict the on-premises instance diff --git a/service/codedeploy/api_op_StopDeployment.go b/service/codedeploy/api_op_StopDeployment.go index e9d4ca252bb..4646aa3ef69 100644 --- a/service/codedeploy/api_op_StopDeployment.go +++ b/service/codedeploy/api_op_StopDeployment.go @@ -46,10 +46,10 @@ type StopDeploymentOutput struct { // The status of the stop deployment operation: // - // * Pending: The stop operation - // is pending. + // * Pending: The stop operation is + // pending. // - // * Succeeded: The stop operation was successful. + // * Succeeded: The stop operation was successful. Status types.StopStatus // An accompanying status message. diff --git a/service/codedeploy/doc.go b/service/codedeploy/doc.go index c809e6dd2b8..ef84445d234 100644 --- a/service/codedeploy/doc.go +++ b/service/codedeploy/doc.go @@ -18,13 +18,13 @@ // error-prone manual deployments. AWS CodeDeploy Components Use the information in // this guide to help you work with the following AWS CodeDeploy components: // -// * +// * // Application: A name that uniquely identifies the application you want to deploy. // AWS CodeDeploy uses this name, which functions as a container, to ensure the // correct combination of revision, deployment configuration, and deployment group // are referenced during a deployment. // -// * Deployment group: A set of individual +// * Deployment group: A set of individual // instances, CodeDeploy Lambda deployment configuration settings, or an Amazon ECS // service and network details. A Lambda deployment group specifies how to route // traffic to a new version of a Lambda function. An Amazon ECS deployment group @@ -34,45 +34,44 @@ // Amazon EC2 instances in Amazon EC2 Auto Scaling groups, or both. All deployment // groups can specify optional trigger, alarm, and rollback settings. // -// * -// Deployment configuration: A set of deployment rules and deployment success and -// failure conditions used by AWS CodeDeploy during a deployment. +// * Deployment +// configuration: A set of deployment rules and deployment success and failure +// conditions used by AWS CodeDeploy during a deployment. // -// * -// Deployment: The process and the components used when updating a Lambda function, -// a containerized application in an Amazon ECS service, or of installing content -// on one or more instances. +// * Deployment: The +// process and the components used when updating a Lambda function, a containerized +// application in an Amazon ECS service, or of installing content on one or more +// instances. // -// * Application revisions: For an AWS Lambda -// deployment, this is an AppSpec file that specifies the Lambda function to be -// updated and one or more functions to validate deployment lifecycle events. For -// an Amazon ECS deployment, this is an AppSpec file that specifies the Amazon ECS -// task definition, container, and port where production traffic is rerouted. For -// an EC2/On-premises deployment, this is an archive file that contains source -// content—source code, webpages, executable files, and deployment scripts—along -// with an AppSpec file. Revisions are stored in Amazon S3 buckets or GitHub -// repositories. For Amazon S3, a revision is uniquely identified by its Amazon S3 -// object key and its ETag, version, or both. For GitHub, a revision is uniquely -// identified by its commit ID. +// * Application revisions: For an AWS Lambda deployment, this is an +// AppSpec file that specifies the Lambda function to be updated and one or more +// functions to validate deployment lifecycle events. For an Amazon ECS deployment, +// this is an AppSpec file that specifies the Amazon ECS task definition, +// container, and port where production traffic is rerouted. For an EC2/On-premises +// deployment, this is an archive file that contains source content—source code, +// webpages, executable files, and deployment scripts—along with an AppSpec file. +// Revisions are stored in Amazon S3 buckets or GitHub repositories. For Amazon S3, +// a revision is uniquely identified by its Amazon S3 object key and its ETag, +// version, or both. For GitHub, a revision is uniquely identified by its commit +// ID. // -// This guide also contains information to help you -// get details about the instances in your deployments, to make on-premises -// instances available for AWS CodeDeploy deployments, to get details about a -// Lambda function deployment, and to get details about Amazon ECS service -// deployments. AWS CodeDeploy Information Resources +// This guide also contains information to help you get details about the +// instances in your deployments, to make on-premises instances available for AWS +// CodeDeploy deployments, to get details about a Lambda function deployment, and +// to get details about Amazon ECS service deployments. AWS CodeDeploy Information +// Resources // -// * AWS CodeDeploy User -// Guide (https://docs.aws.amazon.com/codedeploy/latest/userguide) +// * AWS CodeDeploy User Guide +// (https://docs.aws.amazon.com/codedeploy/latest/userguide) // -// * AWS -// CodeDeploy API Reference Guide -// (https://docs.aws.amazon.com/codedeploy/latest/APIReference/) +// * AWS CodeDeploy API +// Reference Guide (https://docs.aws.amazon.com/codedeploy/latest/APIReference/) // -// * AWS CLI -// Reference for AWS CodeDeploy +// * +// AWS CLI Reference for AWS CodeDeploy // (https://docs.aws.amazon.com/cli/latest/reference/deploy/index.html) // -// * AWS +// * AWS // CodeDeploy Developer Forum // (https://forums.aws.amazon.com/forum.jspa?forumID=179) package codedeploy diff --git a/service/codedeploy/types/enums.go b/service/codedeploy/types/enums.go index 7652c57a3d5..9077774ee36 100644 --- a/service/codedeploy/types/enums.go +++ b/service/codedeploy/types/enums.go @@ -26,9 +26,9 @@ type AutoRollbackEvent string // Enum values for AutoRollbackEvent const ( - AutoRollbackEventDeployment_failure AutoRollbackEvent = "DEPLOYMENT_FAILURE" - AutoRollbackEventDeployment_stop_on_alarm AutoRollbackEvent = "DEPLOYMENT_STOP_ON_ALARM" - AutoRollbackEventDeployment_stop_on_request AutoRollbackEvent = "DEPLOYMENT_STOP_ON_REQUEST" + AutoRollbackEventDeploymentFailure AutoRollbackEvent = "DEPLOYMENT_FAILURE" + AutoRollbackEventDeploymentStopOnAlarm AutoRollbackEvent = "DEPLOYMENT_STOP_ON_ALARM" + AutoRollbackEventDeploymentStopOnRequest AutoRollbackEvent = "DEPLOYMENT_STOP_ON_REQUEST" ) // Values returns all known values for AutoRollbackEvent. Note that this can be @@ -116,8 +116,8 @@ type DeploymentOption string // Enum values for DeploymentOption const ( - DeploymentOptionWith_traffic_control DeploymentOption = "WITH_TRAFFIC_CONTROL" - DeploymentOptionWithout_traffic_control DeploymentOption = "WITHOUT_TRAFFIC_CONTROL" + DeploymentOptionWithTrafficControl DeploymentOption = "WITH_TRAFFIC_CONTROL" + DeploymentOptionWithoutTrafficControl DeploymentOption = "WITHOUT_TRAFFIC_CONTROL" ) // Values returns all known values for DeploymentOption. Note that this can be @@ -134,8 +134,8 @@ type DeploymentReadyAction string // Enum values for DeploymentReadyAction const ( - DeploymentReadyActionContinue_deployment DeploymentReadyAction = "CONTINUE_DEPLOYMENT" - DeploymentReadyActionStop_deployment DeploymentReadyAction = "STOP_DEPLOYMENT" + DeploymentReadyActionContinueDeployment DeploymentReadyAction = "CONTINUE_DEPLOYMENT" + DeploymentReadyActionStopDeployment DeploymentReadyAction = "STOP_DEPLOYMENT" ) // Values returns all known values for DeploymentReadyAction. Note that this can be @@ -152,14 +152,14 @@ type DeploymentStatus string // Enum values for DeploymentStatus const ( - DeploymentStatusCreated DeploymentStatus = "Created" - DeploymentStatusQueued DeploymentStatus = "Queued" - DeploymentStatusIn_progress DeploymentStatus = "InProgress" - DeploymentStatusBaking DeploymentStatus = "Baking" - DeploymentStatusSucceeded DeploymentStatus = "Succeeded" - DeploymentStatusFailed DeploymentStatus = "Failed" - DeploymentStatusStopped DeploymentStatus = "Stopped" - DeploymentStatusReady DeploymentStatus = "Ready" + DeploymentStatusCreated DeploymentStatus = "Created" + DeploymentStatusQueued DeploymentStatus = "Queued" + DeploymentStatusInProgress DeploymentStatus = "InProgress" + DeploymentStatusBaking DeploymentStatus = "Baking" + DeploymentStatusSucceeded DeploymentStatus = "Succeeded" + DeploymentStatusFailed DeploymentStatus = "Failed" + DeploymentStatusStopped DeploymentStatus = "Stopped" + DeploymentStatusReady DeploymentStatus = "Ready" ) // Values returns all known values for DeploymentStatus. Note that this can be @@ -182,10 +182,10 @@ type DeploymentTargetType string // Enum values for DeploymentTargetType const ( - DeploymentTargetTypeInstance_target DeploymentTargetType = "InstanceTarget" - DeploymentTargetTypeLambda_target DeploymentTargetType = "LambdaTarget" - DeploymentTargetTypeEcs_target DeploymentTargetType = "ECSTarget" - DeploymentTargetTypeCloudformation_target DeploymentTargetType = "CloudFormationTarget" + DeploymentTargetTypeInstanceTarget DeploymentTargetType = "InstanceTarget" + DeploymentTargetTypeLambdaTarget DeploymentTargetType = "LambdaTarget" + DeploymentTargetTypeEcsTarget DeploymentTargetType = "ECSTarget" + DeploymentTargetTypeCloudformationTarget DeploymentTargetType = "CloudFormationTarget" ) // Values returns all known values for DeploymentTargetType. Note that this can be @@ -204,8 +204,8 @@ type DeploymentType string // Enum values for DeploymentType const ( - DeploymentTypeIn_place DeploymentType = "IN_PLACE" - DeploymentTypeBlue_green DeploymentType = "BLUE_GREEN" + DeploymentTypeInPlace DeploymentType = "IN_PLACE" + DeploymentTypeBlueGreen DeploymentType = "BLUE_GREEN" ) // Values returns all known values for DeploymentType. Note that this can be @@ -222,8 +222,8 @@ type DeploymentWaitType string // Enum values for DeploymentWaitType const ( - DeploymentWaitTypeReady_wait DeploymentWaitType = "READY_WAIT" - DeploymentWaitTypeTermination_wait DeploymentWaitType = "TERMINATION_WAIT" + DeploymentWaitTypeReadyWait DeploymentWaitType = "READY_WAIT" + DeploymentWaitTypeTerminationWait DeploymentWaitType = "TERMINATION_WAIT" ) // Values returns all known values for DeploymentWaitType. Note that this can be @@ -240,9 +240,9 @@ type EC2TagFilterType string // Enum values for EC2TagFilterType const ( - EC2TagFilterTypeKey_only EC2TagFilterType = "KEY_ONLY" - EC2TagFilterTypeValue_only EC2TagFilterType = "VALUE_ONLY" - EC2TagFilterTypeKey_and_value EC2TagFilterType = "KEY_AND_VALUE" + EC2TagFilterTypeKeyOnly EC2TagFilterType = "KEY_ONLY" + EC2TagFilterTypeValueOnly EC2TagFilterType = "VALUE_ONLY" + EC2TagFilterTypeKeyAndValue EC2TagFilterType = "KEY_AND_VALUE" ) // Values returns all known values for EC2TagFilterType. Note that this can be @@ -260,40 +260,40 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeAgent_issue ErrorCode = "AGENT_ISSUE" - ErrorCodeAlarm_active ErrorCode = "ALARM_ACTIVE" - ErrorCodeApplication_missing ErrorCode = "APPLICATION_MISSING" - ErrorCodeAutoscaling_validation_error ErrorCode = "AUTOSCALING_VALIDATION_ERROR" - ErrorCodeAuto_scaling_configuration ErrorCode = "AUTO_SCALING_CONFIGURATION" - ErrorCodeAuto_scaling_iam_role_permissions ErrorCode = "AUTO_SCALING_IAM_ROLE_PERMISSIONS" - ErrorCodeCodedeploy_resource_cannot_be_found ErrorCode = "CODEDEPLOY_RESOURCE_CANNOT_BE_FOUND" - ErrorCodeCustomer_application_unhealthy ErrorCode = "CUSTOMER_APPLICATION_UNHEALTHY" - ErrorCodeDeployment_group_missing ErrorCode = "DEPLOYMENT_GROUP_MISSING" - ErrorCodeEcs_update_error ErrorCode = "ECS_UPDATE_ERROR" - ErrorCodeElastic_load_balancing_invalid ErrorCode = "ELASTIC_LOAD_BALANCING_INVALID" - ErrorCodeElb_invalid_instance ErrorCode = "ELB_INVALID_INSTANCE" - ErrorCodeHealth_constraints ErrorCode = "HEALTH_CONSTRAINTS" - ErrorCodeHealth_constraints_invalid ErrorCode = "HEALTH_CONSTRAINTS_INVALID" - ErrorCodeHook_execution_failure ErrorCode = "HOOK_EXECUTION_FAILURE" - ErrorCodeIam_role_missing ErrorCode = "IAM_ROLE_MISSING" - ErrorCodeIam_role_permissions ErrorCode = "IAM_ROLE_PERMISSIONS" - ErrorCodeInternal_error ErrorCode = "INTERNAL_ERROR" - ErrorCodeInvalid_ecs_service ErrorCode = "INVALID_ECS_SERVICE" - ErrorCodeInvalid_lambda_configuration ErrorCode = "INVALID_LAMBDA_CONFIGURATION" - ErrorCodeInvalid_lambda_function ErrorCode = "INVALID_LAMBDA_FUNCTION" - ErrorCodeInvalid_revision ErrorCode = "INVALID_REVISION" - ErrorCodeManual_stop ErrorCode = "MANUAL_STOP" - ErrorCodeMissing_blue_green_deployment_configuration ErrorCode = "MISSING_BLUE_GREEN_DEPLOYMENT_CONFIGURATION" - ErrorCodeMissing_elb_information ErrorCode = "MISSING_ELB_INFORMATION" - ErrorCodeMissing_github_token ErrorCode = "MISSING_GITHUB_TOKEN" - ErrorCodeNo_ec2_subscription ErrorCode = "NO_EC2_SUBSCRIPTION" - ErrorCodeNo_instances ErrorCode = "NO_INSTANCES" - ErrorCodeOver_max_instances ErrorCode = "OVER_MAX_INSTANCES" - ErrorCodeResource_limit_exceeded ErrorCode = "RESOURCE_LIMIT_EXCEEDED" - ErrorCodeRevision_missing ErrorCode = "REVISION_MISSING" - ErrorCodeThrottled ErrorCode = "THROTTLED" - ErrorCodeTimeout ErrorCode = "TIMEOUT" - ErrorCodeCloudformation_stack_failure ErrorCode = "CLOUDFORMATION_STACK_FAILURE" + ErrorCodeAgentIssue ErrorCode = "AGENT_ISSUE" + ErrorCodeAlarmActive ErrorCode = "ALARM_ACTIVE" + ErrorCodeApplicationMissing ErrorCode = "APPLICATION_MISSING" + ErrorCodeAutoscalingValidationError ErrorCode = "AUTOSCALING_VALIDATION_ERROR" + ErrorCodeAutoScalingConfiguration ErrorCode = "AUTO_SCALING_CONFIGURATION" + ErrorCodeAutoScalingIamRolePermissions ErrorCode = "AUTO_SCALING_IAM_ROLE_PERMISSIONS" + ErrorCodeCodedeployResourceCannotBeFound ErrorCode = "CODEDEPLOY_RESOURCE_CANNOT_BE_FOUND" + ErrorCodeCustomerApplicationUnhealthy ErrorCode = "CUSTOMER_APPLICATION_UNHEALTHY" + ErrorCodeDeploymentGroupMissing ErrorCode = "DEPLOYMENT_GROUP_MISSING" + ErrorCodeEcsUpdateError ErrorCode = "ECS_UPDATE_ERROR" + ErrorCodeElasticLoadBalancingInvalid ErrorCode = "ELASTIC_LOAD_BALANCING_INVALID" + ErrorCodeElbInvalidInstance ErrorCode = "ELB_INVALID_INSTANCE" + ErrorCodeHealthConstraints ErrorCode = "HEALTH_CONSTRAINTS" + ErrorCodeHealthConstraintsInvalid ErrorCode = "HEALTH_CONSTRAINTS_INVALID" + ErrorCodeHookExecutionFailure ErrorCode = "HOOK_EXECUTION_FAILURE" + ErrorCodeIamRoleMissing ErrorCode = "IAM_ROLE_MISSING" + ErrorCodeIamRolePermissions ErrorCode = "IAM_ROLE_PERMISSIONS" + ErrorCodeInternalError ErrorCode = "INTERNAL_ERROR" + ErrorCodeInvalidEcsService ErrorCode = "INVALID_ECS_SERVICE" + ErrorCodeInvalidLambdaConfiguration ErrorCode = "INVALID_LAMBDA_CONFIGURATION" + ErrorCodeInvalidLambdaFunction ErrorCode = "INVALID_LAMBDA_FUNCTION" + ErrorCodeInvalidRevision ErrorCode = "INVALID_REVISION" + ErrorCodeManualStop ErrorCode = "MANUAL_STOP" + ErrorCodeMissingBlueGreenDeploymentConfiguration ErrorCode = "MISSING_BLUE_GREEN_DEPLOYMENT_CONFIGURATION" + ErrorCodeMissingElbInformation ErrorCode = "MISSING_ELB_INFORMATION" + ErrorCodeMissingGithubToken ErrorCode = "MISSING_GITHUB_TOKEN" + ErrorCodeNoEc2Subscription ErrorCode = "NO_EC2_SUBSCRIPTION" + ErrorCodeNoInstances ErrorCode = "NO_INSTANCES" + ErrorCodeOverMaxInstances ErrorCode = "OVER_MAX_INSTANCES" + ErrorCodeResourceLimitExceeded ErrorCode = "RESOURCE_LIMIT_EXCEEDED" + ErrorCodeRevisionMissing ErrorCode = "REVISION_MISSING" + ErrorCodeThrottled ErrorCode = "THROTTLED" + ErrorCodeTimeout ErrorCode = "TIMEOUT" + ErrorCodeCloudformationStackFailure ErrorCode = "CLOUDFORMATION_STACK_FAILURE" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -362,8 +362,8 @@ type GreenFleetProvisioningAction string // Enum values for GreenFleetProvisioningAction const ( - GreenFleetProvisioningActionDiscover_existing GreenFleetProvisioningAction = "DISCOVER_EXISTING" - GreenFleetProvisioningActionCopy_auto_scaling_group GreenFleetProvisioningAction = "COPY_AUTO_SCALING_GROUP" + GreenFleetProvisioningActionDiscoverExisting GreenFleetProvisioningAction = "DISCOVER_EXISTING" + GreenFleetProvisioningActionCopyAutoScalingGroup GreenFleetProvisioningAction = "COPY_AUTO_SCALING_GROUP" ) // Values returns all known values for GreenFleetProvisioningAction. Note that this @@ -380,8 +380,8 @@ type InstanceAction string // Enum values for InstanceAction const ( - InstanceActionTerminate InstanceAction = "TERMINATE" - InstanceActionKeep_alive InstanceAction = "KEEP_ALIVE" + InstanceActionTerminate InstanceAction = "TERMINATE" + InstanceActionKeepAlive InstanceAction = "KEEP_ALIVE" ) // Values returns all known values for InstanceAction. Note that this can be @@ -398,13 +398,13 @@ type InstanceStatus string // Enum values for InstanceStatus const ( - InstanceStatusPending InstanceStatus = "Pending" - InstanceStatusIn_progress InstanceStatus = "InProgress" - InstanceStatusSucceeded InstanceStatus = "Succeeded" - InstanceStatusFailed InstanceStatus = "Failed" - InstanceStatusSkipped InstanceStatus = "Skipped" - InstanceStatusUnknown InstanceStatus = "Unknown" - InstanceStatusReady InstanceStatus = "Ready" + InstanceStatusPending InstanceStatus = "Pending" + InstanceStatusInProgress InstanceStatus = "InProgress" + InstanceStatusSucceeded InstanceStatus = "Succeeded" + InstanceStatusFailed InstanceStatus = "Failed" + InstanceStatusSkipped InstanceStatus = "Skipped" + InstanceStatusUnknown InstanceStatus = "Unknown" + InstanceStatusReady InstanceStatus = "Ready" ) // Values returns all known values for InstanceStatus. Note that this can be @@ -444,12 +444,12 @@ type LifecycleErrorCode string // Enum values for LifecycleErrorCode const ( - LifecycleErrorCodeSuccess LifecycleErrorCode = "Success" - LifecycleErrorCodeScript_missing LifecycleErrorCode = "ScriptMissing" - LifecycleErrorCodeScript_not_executable LifecycleErrorCode = "ScriptNotExecutable" - LifecycleErrorCodeScript_timed_out LifecycleErrorCode = "ScriptTimedOut" - LifecycleErrorCodeScript_failed LifecycleErrorCode = "ScriptFailed" - LifecycleErrorCodeUnknown_error LifecycleErrorCode = "UnknownError" + LifecycleErrorCodeSuccess LifecycleErrorCode = "Success" + LifecycleErrorCodeScriptMissing LifecycleErrorCode = "ScriptMissing" + LifecycleErrorCodeScriptNotExecutable LifecycleErrorCode = "ScriptNotExecutable" + LifecycleErrorCodeScriptTimedOut LifecycleErrorCode = "ScriptTimedOut" + LifecycleErrorCodeScriptFailed LifecycleErrorCode = "ScriptFailed" + LifecycleErrorCodeUnknownError LifecycleErrorCode = "UnknownError" ) // Values returns all known values for LifecycleErrorCode. Note that this can be @@ -470,12 +470,12 @@ type LifecycleEventStatus string // Enum values for LifecycleEventStatus const ( - LifecycleEventStatusPending LifecycleEventStatus = "Pending" - LifecycleEventStatusIn_progress LifecycleEventStatus = "InProgress" - LifecycleEventStatusSucceeded LifecycleEventStatus = "Succeeded" - LifecycleEventStatusFailed LifecycleEventStatus = "Failed" - LifecycleEventStatusSkipped LifecycleEventStatus = "Skipped" - LifecycleEventStatusUnknown LifecycleEventStatus = "Unknown" + LifecycleEventStatusPending LifecycleEventStatus = "Pending" + LifecycleEventStatusInProgress LifecycleEventStatus = "InProgress" + LifecycleEventStatusSucceeded LifecycleEventStatus = "Succeeded" + LifecycleEventStatusFailed LifecycleEventStatus = "Failed" + LifecycleEventStatusSkipped LifecycleEventStatus = "Skipped" + LifecycleEventStatusUnknown LifecycleEventStatus = "Unknown" ) // Values returns all known values for LifecycleEventStatus. Note that this can be @@ -516,8 +516,8 @@ type MinimumHealthyHostsType string // Enum values for MinimumHealthyHostsType const ( - MinimumHealthyHostsTypeHost_count MinimumHealthyHostsType = "HOST_COUNT" - MinimumHealthyHostsTypeFleet_percent MinimumHealthyHostsType = "FLEET_PERCENT" + MinimumHealthyHostsTypeHostCount MinimumHealthyHostsType = "HOST_COUNT" + MinimumHealthyHostsTypeFleetPercent MinimumHealthyHostsType = "FLEET_PERCENT" ) // Values returns all known values for MinimumHealthyHostsType. Note that this can @@ -610,9 +610,9 @@ type TagFilterType string // Enum values for TagFilterType const ( - TagFilterTypeKey_only TagFilterType = "KEY_ONLY" - TagFilterTypeValue_only TagFilterType = "VALUE_ONLY" - TagFilterTypeKey_and_value TagFilterType = "KEY_AND_VALUE" + TagFilterTypeKeyOnly TagFilterType = "KEY_ONLY" + TagFilterTypeValueOnly TagFilterType = "VALUE_ONLY" + TagFilterTypeKeyAndValue TagFilterType = "KEY_AND_VALUE" ) // Values returns all known values for TagFilterType. Note that this can be @@ -630,8 +630,8 @@ type TargetFilterName string // Enum values for TargetFilterName const ( - TargetFilterNameTarget_status TargetFilterName = "TargetStatus" - TargetFilterNameServer_instance_label TargetFilterName = "ServerInstanceLabel" + TargetFilterNameTargetStatus TargetFilterName = "TargetStatus" + TargetFilterNameServerInstanceLabel TargetFilterName = "ServerInstanceLabel" ) // Values returns all known values for TargetFilterName. Note that this can be @@ -666,13 +666,13 @@ type TargetStatus string // Enum values for TargetStatus const ( - TargetStatusPending TargetStatus = "Pending" - TargetStatusIn_progress TargetStatus = "InProgress" - TargetStatusSucceeded TargetStatus = "Succeeded" - TargetStatusFailed TargetStatus = "Failed" - TargetStatusSkipped TargetStatus = "Skipped" - TargetStatusUnknown TargetStatus = "Unknown" - TargetStatusReady TargetStatus = "Ready" + TargetStatusPending TargetStatus = "Pending" + TargetStatusInProgress TargetStatus = "InProgress" + TargetStatusSucceeded TargetStatus = "Succeeded" + TargetStatusFailed TargetStatus = "Failed" + TargetStatusSkipped TargetStatus = "Skipped" + TargetStatusUnknown TargetStatus = "Unknown" + TargetStatusReady TargetStatus = "Ready" ) // Values returns all known values for TargetStatus. Note that this can be expanded @@ -714,16 +714,16 @@ type TriggerEventType string // Enum values for TriggerEventType const ( - TriggerEventTypeDeployment_start TriggerEventType = "DeploymentStart" - TriggerEventTypeDeployment_success TriggerEventType = "DeploymentSuccess" - TriggerEventTypeDeployment_failure TriggerEventType = "DeploymentFailure" - TriggerEventTypeDeployment_stop TriggerEventType = "DeploymentStop" - TriggerEventTypeDeployment_rollback TriggerEventType = "DeploymentRollback" - TriggerEventTypeDeployment_ready TriggerEventType = "DeploymentReady" - TriggerEventTypeInstance_start TriggerEventType = "InstanceStart" - TriggerEventTypeInstance_success TriggerEventType = "InstanceSuccess" - TriggerEventTypeInstance_failure TriggerEventType = "InstanceFailure" - TriggerEventTypeInstance_ready TriggerEventType = "InstanceReady" + TriggerEventTypeDeploymentStart TriggerEventType = "DeploymentStart" + TriggerEventTypeDeploymentSuccess TriggerEventType = "DeploymentSuccess" + TriggerEventTypeDeploymentFailure TriggerEventType = "DeploymentFailure" + TriggerEventTypeDeploymentStop TriggerEventType = "DeploymentStop" + TriggerEventTypeDeploymentRollback TriggerEventType = "DeploymentRollback" + TriggerEventTypeDeploymentReady TriggerEventType = "DeploymentReady" + TriggerEventTypeInstanceStart TriggerEventType = "InstanceStart" + TriggerEventTypeInstanceSuccess TriggerEventType = "InstanceSuccess" + TriggerEventTypeInstanceFailure TriggerEventType = "InstanceFailure" + TriggerEventTypeInstanceReady TriggerEventType = "InstanceReady" ) // Values returns all known values for TriggerEventType. Note that this can be diff --git a/service/codedeploy/types/errors.go b/service/codedeploy/types/errors.go index a6483108701..aec3e63cd6e 100644 --- a/service/codedeploy/types/errors.go +++ b/service/codedeploy/types/errors.go @@ -790,19 +790,19 @@ func (e *InstanceNotRegisteredException) ErrorFault() smithy.ErrorFault { return // The format of the alarm configuration is invalid. Possible causes include: // +// * +// The alarm list is null. // -// * The alarm list is null. +// * The alarm object is null. // -// * The alarm object is null. +// * The alarm name is empty +// or null or exceeds the limit of 255 characters. // -// * The alarm name -// is empty or null or exceeds the limit of 255 characters. +// * Two alarms with the same name +// have been specified. // -// * Two alarms with -// the same name have been specified. -// -// * The alarm configuration is enabled, -// but the alarm list is empty. +// * The alarm configuration is enabled, but the alarm list +// is empty. type InvalidAlarmConfigException struct { Message *string } @@ -1748,17 +1748,17 @@ func (e *InvalidTargetGroupPairException) ErrorFault() smithy.ErrorFault { retur // The target instance configuration is invalid. Possible causes include: // -// * +// * // Configuration data for target instances was entered for an in-place // deployment. // -// * The limit of 10 tags for a tag type was exceeded. +// * The limit of 10 tags for a tag type was exceeded. // -// * The -// combined length of the tag names exceeded the limit. +// * The combined +// length of the tag names exceeded the limit. // -// * A specified tag is -// not currently applied to any instances. +// * A specified tag is not currently +// applied to any instances. type InvalidTargetInstancesException struct { Message *string } diff --git a/service/codedeploy/types/types.go b/service/codedeploy/types/types.go index 6bbbd1dfe60..7f7760be29c 100644 --- a/service/codedeploy/types/types.go +++ b/service/codedeploy/types/types.go @@ -28,11 +28,11 @@ type AlarmConfiguration struct { // state of alarms cannot be retrieved from Amazon CloudWatch. The default value is // false. // - // * true: The deployment proceeds even if alarm status information - // can't be retrieved from Amazon CloudWatch. + // * true: The deployment proceeds even if alarm status information can't + // be retrieved from Amazon CloudWatch. // - // * false: The deployment stops if - // alarm status information can't be retrieved from Amazon CloudWatch. + // * false: The deployment stops if alarm + // status information can't be retrieved from Amazon CloudWatch. IgnorePollAlarmFailure *bool } @@ -128,11 +128,11 @@ type BlueInstanceTerminationOption struct { // The action to take on instances in the original environment after a successful // blue/green deployment. // - // * TERMINATE: Instances are terminated after a - // specified wait time. + // * TERMINATE: Instances are terminated after a specified + // wait time. // - // * KEEP_ALIVE: Instances are left running after they - // are deregistered from the load balancer and removed from the deployment group. + // * KEEP_ALIVE: Instances are left running after they are deregistered + // from the load balancer and removed from the deployment group. Action InstanceAction // For an Amazon EC2 deployment, the number of minutes to wait after a successful @@ -309,14 +309,13 @@ type DeploymentInfo struct { // The means by which the deployment was created: // - // * user: A user created the + // * user: A user created the // deployment. // - // * autoscaling: Amazon EC2 Auto Scaling created the - // deployment. + // * autoscaling: Amazon EC2 Auto Scaling created the deployment. // - // * codeDeployRollback: A rollback process created the - // deployment. + // * + // codeDeployRollback: A rollback process created the deployment. Creator DeploymentCreator // The deployment configuration name. @@ -352,15 +351,15 @@ type DeploymentInfo struct { // deployment target location but weren't part of the previous successful // deployment. // - // * DISALLOW: The deployment fails. This is also the default - // behavior if no option is specified. + // * DISALLOW: The deployment fails. This is also the default behavior + // if no option is specified. // - // * OVERWRITE: The version of the file - // from the application revision currently being deployed replaces the version - // already on the instance. + // * OVERWRITE: The version of the file from the + // application revision currently being deployed replaces the version already on + // the instance. // - // * RETAIN: The version of the file already on the - // instance is kept and used as part of the new deployment. + // * RETAIN: The version of the file already on the instance is kept + // and used as part of the new deployment. FileExistsBehavior FileExistsBehavior // If true, then if an ApplicationStop, BeforeBlockTraffic, or AfterBlockTraffic @@ -452,12 +451,12 @@ type DeploymentReadyOption struct { // Information about when to reroute traffic from an original environment to a // replacement environment in a blue/green deployment. // - // * CONTINUE_DEPLOYMENT: + // * CONTINUE_DEPLOYMENT: // Register new instances with the load balancer immediately after the new // application revision is installed on the instances in the replacement // environment. // - // * STOP_DEPLOYMENT: Do not register new instances with a load + // * STOP_DEPLOYMENT: Do not register new instances with a load // balancer unless traffic rerouting is started using ContinueDeployment. If // traffic rerouting is not started before the end of the specified wait period, // the deployment status is changed to Stopped. @@ -510,23 +509,23 @@ type Diagnostics struct { // The associated error code: // - // * Success: The specified script ran. + // * Success: The specified script ran. // - // * + // * // ScriptMissing: The specified script was not found in the specified location. // - // - // * ScriptNotExecutable: The specified script is not a recognized executable file + // * + // ScriptNotExecutable: The specified script is not a recognized executable file // type. // - // * ScriptTimedOut: The specified script did not finish running in the + // * ScriptTimedOut: The specified script did not finish running in the // specified time period. // - // * ScriptFailed: The specified script failed to run - // as expected. + // * ScriptFailed: The specified script failed to run as + // expected. // - // * UnknownError: The specified script did not run for an - // unknown reason. + // * UnknownError: The specified script did not run for an unknown + // reason. ErrorCode LifecycleErrorCode // The last portion of the diagnostic log. If available, AWS CodeDeploy returns up @@ -548,12 +547,12 @@ type EC2TagFilter struct { // The tag filter type: // - // * KEY_ONLY: Key only. + // * KEY_ONLY: Key only. // - // * VALUE_ONLY: Value only. + // * VALUE_ONLY: Value only. // - // - // * KEY_AND_VALUE: Key and value. + // * + // KEY_AND_VALUE: Key and value. Type EC2TagFilterType // The tag filter value. @@ -634,15 +633,15 @@ type ECSTaskSet struct { // The status of the task set. There are three valid task set statuses: // - // * - // PRIMARY: Indicates the task set is serving production traffic. + // * PRIMARY: + // Indicates the task set is serving production traffic. // - // * ACTIVE: - // Indicates the task set is not serving production traffic. + // * ACTIVE: Indicates the + // task set is not serving production traffic. // - // * DRAINING: - // Indicates the tasks in the task set are being stopped and their corresponding - // targets are being deregistered from their target group. + // * DRAINING: Indicates the tasks in + // the task set are being stopped and their corresponding targets are being + // deregistered from their target group. Status *string // The target group associated with the task set. The target group is used by AWS @@ -678,50 +677,49 @@ type ErrorInformation struct { // the AWS CodeDeploy User Guide // (https://docs.aws.amazon.com/codedeploy/latest/userguide). The error code: // + // * + // APPLICATION_MISSING: The application was missing. This error code is most likely + // raised if the application is deleted after the deployment is created, but before + // it is started. // - // * APPLICATION_MISSING: The application was missing. This error code is most - // likely raised if the application is deleted after the deployment is created, but - // before it is started. - // - // * DEPLOYMENT_GROUP_MISSING: The deployment group was - // missing. This error code is most likely raised if the deployment group is - // deleted after the deployment is created, but before it is started. + // * DEPLOYMENT_GROUP_MISSING: The deployment group was missing. + // This error code is most likely raised if the deployment group is deleted after + // the deployment is created, but before it is started. // - // * - // HEALTH_CONSTRAINTS: The deployment failed on too many instances to be - // successfully deployed within the instance health constraints specified. + // * HEALTH_CONSTRAINTS: The + // deployment failed on too many instances to be successfully deployed within the + // instance health constraints specified. // - // * - // HEALTH_CONSTRAINTS_INVALID: The revision cannot be successfully deployed within - // the instance health constraints specified. + // * HEALTH_CONSTRAINTS_INVALID: The + // revision cannot be successfully deployed within the instance health constraints + // specified. // - // * IAM_ROLE_MISSING: The service - // role cannot be accessed. + // * IAM_ROLE_MISSING: The service role cannot be accessed. // - // * IAM_ROLE_PERMISSIONS: The service role does not - // have the correct permissions. + // * + // IAM_ROLE_PERMISSIONS: The service role does not have the correct permissions. // - // * INTERNAL_ERROR: There was an internal - // error. + // * + // INTERNAL_ERROR: There was an internal error. // - // * NO_EC2_SUBSCRIPTION: The calling account is not subscribed to - // Amazon EC2. + // * NO_EC2_SUBSCRIPTION: The calling + // account is not subscribed to Amazon EC2. // - // * NO_INSTANCES: No instances were specified, or no instances - // can be found. + // * NO_INSTANCES: No instances were + // specified, or no instances can be found. // - // * OVER_MAX_INSTANCES: The maximum number of instances was - // exceeded. + // * OVER_MAX_INSTANCES: The maximum + // number of instances was exceeded. // - // * THROTTLED: The operation was throttled because the calling - // account exceeded the throttling limits of one or more AWS services. + // * THROTTLED: The operation was throttled + // because the calling account exceeded the throttling limits of one or more AWS + // services. // - // * - // TIMEOUT: The deployment has timed out. + // * TIMEOUT: The deployment has timed out. // - // * REVISION_MISSING: The revision ID - // was missing. This error code is most likely raised if the revision is deleted - // after the deployment is created, but before it is started. + // * REVISION_MISSING: The + // revision ID was missing. This error code is most likely raised if the revision + // is deleted after the deployment is created, but before it is started. Code ErrorCode // An accompanying error message. @@ -766,12 +764,12 @@ type GreenFleetProvisioningOption struct { // The method used to add instances to a replacement environment. // - // * + // * // DISCOVER_EXISTING: Use instances that already exist or will be created // manually. // - // * COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto - // Scaling group to define and create instances in a new Auto Scaling group. + // * COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling + // group to define and create instances in a new Auto Scaling group. Action GreenFleetProvisioningAction } @@ -813,10 +811,10 @@ type InstanceSummary struct { // Information about which environment an instance belongs to in a blue/green // deployment. // - // * BLUE: The instance is part of the original environment. + // * BLUE: The instance is part of the original environment. // - // - // * GREEN: The instance is part of the replacement environment. + // * GREEN: + // The instance is part of the replacement environment. InstanceType InstanceType // A timestamp that indicates when the instance information was last updated. @@ -827,22 +825,22 @@ type InstanceSummary struct { // The deployment status for this instance: // - // * Pending: The deployment is - // pending for this instance. - // - // * In Progress: The deployment is in progress for - // this instance. + // * Pending: The deployment is pending + // for this instance. // - // * Succeeded: The deployment has succeeded for this + // * In Progress: The deployment is in progress for this // instance. // - // * Failed: The deployment has failed for this instance. + // * Succeeded: The deployment has succeeded for this instance. + // + // * + // Failed: The deployment has failed for this instance. // - // * - // Skipped: The deployment has been skipped for this instance. + // * Skipped: The deployment + // has been skipped for this instance. // - // * Unknown: The - // deployment status is unknown for this instance. + // * Unknown: The deployment status is unknown + // for this instance. Status InstanceStatus } @@ -959,22 +957,22 @@ type LifecycleEvent struct { // The deployment lifecycle event status: // - // * Pending: The deployment lifecycle + // * Pending: The deployment lifecycle // event is pending. // - // * InProgress: The deployment lifecycle event is in + // * InProgress: The deployment lifecycle event is in // progress. // - // * Succeeded: The deployment lifecycle event ran successfully. + // * Succeeded: The deployment lifecycle event ran successfully. // + // * + // Failed: The deployment lifecycle event has failed. // - // * Failed: The deployment lifecycle event has failed. + // * Skipped: The deployment + // lifecycle event has been skipped. // - // * Skipped: The - // deployment lifecycle event has been skipped. - // - // * Unknown: The deployment - // lifecycle event is unknown. + // * Unknown: The deployment lifecycle event is + // unknown. Status LifecycleEventStatus } @@ -1004,11 +1002,11 @@ type MinimumHealthyHosts struct { // The minimum healthy instance type: // - // * HOST_COUNT: The minimum number of - // healthy instances as an absolute value. + // * HOST_COUNT: The minimum number of healthy + // instances as an absolute value. // - // * FLEET_PERCENT: The minimum number - // of healthy instances as a percentage of the total number of instances in the + // * FLEET_PERCENT: The minimum number of healthy + // instances as a percentage of the total number of instances in the // deployment. // // In an example of nine instances, if a HOST_COUNT of six is @@ -1080,19 +1078,18 @@ type RevisionLocation struct { // The type of application revision: // - // * S3: An application revision stored in + // * S3: An application revision stored in // Amazon S3. // - // * GitHub: An application revision stored in GitHub - // (EC2/On-premises deployments only). + // * GitHub: An application revision stored in GitHub (EC2/On-premises + // deployments only). // - // * String: A YAML-formatted or - // JSON-formatted string (AWS Lambda deployments only). + // * String: A YAML-formatted or JSON-formatted string (AWS + // Lambda deployments only). // - // * AppSpecContent: An - // AppSpecContent object that contains the contents of an AppSpec file for an AWS - // Lambda or Amazon ECS deployment. The content is formatted as JSON or YAML stored - // as a RawString. + // * AppSpecContent: An AppSpecContent object that + // contains the contents of an AppSpec file for an AWS Lambda or Amazon ECS + // deployment. The content is formatted as JSON or YAML stored as a RawString. RevisionType RevisionLocationType // Information about the location of a revision stored in Amazon S3. @@ -1127,13 +1124,13 @@ type S3Location struct { // The file type of the application revision. Must be one of the following: // - // * - // tar: A tar archive file. + // * tar: + // A tar archive file. // - // * tgz: A compressed tar archive file. + // * tgz: A compressed tar archive file. // - // * zip: - // A zip archive file. + // * zip: A zip archive + // file. BundleType BundleType // The ETag of the Amazon S3 object that represents the bundled artifacts for the @@ -1169,12 +1166,12 @@ type TagFilter struct { // The on-premises instance tag filter type: // - // * KEY_ONLY: Key only. + // * KEY_ONLY: Key only. // - // * - // VALUE_ONLY: Value only. + // * VALUE_ONLY: + // Value only. // - // * KEY_AND_VALUE: Key and value. + // * KEY_AND_VALUE: Key and value. Type TagFilterType // The on-premises instance tag filter value. diff --git a/service/codeguruprofiler/types/enums.go b/service/codeguruprofiler/types/enums.go index 7442a81ba51..828c74299d1 100644 --- a/service/codeguruprofiler/types/enums.go +++ b/service/codeguruprofiler/types/enums.go @@ -7,16 +7,16 @@ type AgentParameterField string // Enum values for AgentParameterField const ( // Sampling interval in milliseconds used to sample profiles. - AgentParameterFieldSampling_interval_in_milliseconds AgentParameterField = "SamplingIntervalInMilliseconds" + AgentParameterFieldSamplingIntervalInMilliseconds AgentParameterField = "SamplingIntervalInMilliseconds" // Reporting interval in milliseconds used to report profiles. - AgentParameterFieldReporting_interval_in_milliseconds AgentParameterField = "ReportingIntervalInMilliseconds" + AgentParameterFieldReportingIntervalInMilliseconds AgentParameterField = "ReportingIntervalInMilliseconds" // Minimum time in milliseconds between sending reports. - AgentParameterFieldMinimum_time_for_reporting_in_milliseconds AgentParameterField = "MinimumTimeForReportingInMilliseconds" + AgentParameterFieldMinimumTimeForReportingInMilliseconds AgentParameterField = "MinimumTimeForReportingInMilliseconds" // Percentage of memory to be used by CodeGuru profiler. Minimum of 30MB is // required for the agent. - AgentParameterFieldMemory_usage_limit_percent AgentParameterField = "MemoryUsageLimitPercent" + AgentParameterFieldMemoryUsageLimitPercent AgentParameterField = "MemoryUsageLimitPercent" // Maximum stack depth to be captured by the CodeGuru Profiler. - AgentParameterFieldMax_stack_depth AgentParameterField = "MaxStackDepth" + AgentParameterFieldMaxStackDepth AgentParameterField = "MaxStackDepth" ) // Values returns all known values for AgentParameterField. Note that this can be @@ -82,7 +82,7 @@ type MetricType string const ( // Metric value aggregated for all instances of a frame name in a profile relative // to the root frame. - MetricTypeAggregated_relative_total_time MetricType = "AGGREGATED_RELATIVE_TOTAL_TIME" + MetricTypeAggregatedRelativeTotalTime MetricType = "AGGREGATED_RELATIVE_TOTAL_TIME" ) // Values returns all known values for MetricType. Note that this can be expanded @@ -99,9 +99,9 @@ type OrderBy string // Enum values for OrderBy const ( // Order by timestamp in descending order. - OrderByTimestamp_descending OrderBy = "TimestampDescending" + OrderByTimestampDescending OrderBy = "TimestampDescending" // Order by timestamp in ascending order. - OrderByTimestamp_ascending OrderBy = "TimestampAscending" + OrderByTimestampAscending OrderBy = "TimestampAscending" ) // Values returns all known values for OrderBy. Note that this can be expanded in @@ -119,23 +119,23 @@ type MetadataField string // Enum values for MetadataField const ( // Compute platform on which agent is running. - MetadataFieldCompute_platform MetadataField = "ComputePlatform" + MetadataFieldComputePlatform MetadataField = "ComputePlatform" // Unique identifier for the agent instance. - MetadataFieldAgent_id MetadataField = "AgentId" + MetadataFieldAgentId MetadataField = "AgentId" // AWS requestId of the Lambda invocation. - MetadataFieldAws_request_id MetadataField = "AwsRequestId" + MetadataFieldAwsRequestId MetadataField = "AwsRequestId" // Execution environment on which Lambda function is running. - MetadataFieldExecution_environment MetadataField = "ExecutionEnvironment" + MetadataFieldExecutionEnvironment MetadataField = "ExecutionEnvironment" // Function ARN that's used to invoke the Lambda function. - MetadataFieldLambda_function_arn MetadataField = "LambdaFunctionArn" + MetadataFieldLambdaFunctionArn MetadataField = "LambdaFunctionArn" // Memory allocated for the Lambda function. - MetadataFieldLambda_memory_limit_in_mb MetadataField = "LambdaMemoryLimitInMB" + MetadataFieldLambdaMemoryLimitInMb MetadataField = "LambdaMemoryLimitInMB" // Time in milliseconds left before the execution times out. - MetadataFieldLambda_remaining_time_in_milliseconds MetadataField = "LambdaRemainingTimeInMilliseconds" + MetadataFieldLambdaRemainingTimeInMilliseconds MetadataField = "LambdaRemainingTimeInMilliseconds" // Time in milliseconds between two invocations of the Lambda function. - MetadataFieldLambda_time_gap_between_invokes_in_milliseconds MetadataField = "LambdaTimeGapBetweenInvokesInMilliseconds" + MetadataFieldLambdaTimeGapBetweenInvokesInMilliseconds MetadataField = "LambdaTimeGapBetweenInvokesInMilliseconds" // Time in milliseconds for the previous Lambda invocation. - MetadataFieldLambda_previous_execution_time_in_milliseconds MetadataField = "LambdaPreviousExecutionTimeInMilliseconds" + MetadataFieldLambdaPreviousExecutionTimeInMilliseconds MetadataField = "LambdaPreviousExecutionTimeInMilliseconds" ) // Values returns all known values for MetadataField. Note that this can be @@ -160,7 +160,7 @@ type ActionGroup string // Enum values for ActionGroup const ( // Permission group type for Agent APIs - ConfigureAgent, PostAgentProfile - ActionGroupAgent_permissions ActionGroup = "agentPermissions" + ActionGroupAgentPermissions ActionGroup = "agentPermissions" ) // Values returns all known values for ActionGroup. Note that this can be expanded diff --git a/service/codegurureviewer/api_op_ListCodeReviews.go b/service/codegurureviewer/api_op_ListCodeReviews.go index 9e069e69c27..eaf7bef124d 100644 --- a/service/codegurureviewer/api_op_ListCodeReviews.go +++ b/service/codegurureviewer/api_op_ListCodeReviews.go @@ -55,16 +55,15 @@ type ListCodeReviewsInput struct { // result. For example, states=[Pending] lists code reviews in the Pending state. // The valid code review states are: // - // * Completed: The code review is - // complete. + // * Completed: The code review is complete. // - // * Pending: The code review started and has not completed or - // failed. + // * + // Pending: The code review started and has not completed or failed. // - // * Failed: The code review failed. + // * Failed: The + // code review failed. // - // * Deleting: The code review - // is being deleted. + // * Deleting: The code review is being deleted. States []types.JobState } diff --git a/service/codegurureviewer/api_op_ListRepositoryAssociations.go b/service/codegurureviewer/api_op_ListRepositoryAssociations.go index 4903ad26e59..d15c9d4305f 100644 --- a/service/codegurureviewer/api_op_ListRepositoryAssociations.go +++ b/service/codegurureviewer/api_op_ListRepositoryAssociations.go @@ -72,27 +72,27 @@ type ListRepositoryAssociationsInput struct { // List of repository association states to use as a filter. The valid repository // association states are: // - // * Associated: The repository association is + // * Associated: The repository association is // complete. // - // * Associating: CodeGuru Reviewer is: + // * Associating: CodeGuru Reviewer is: // - // * Setting up pull - // request notifications. This is required for pull requests to trigger a CodeGuru - // Reviewer review. If your repository ProviderType is GitHub, GitHub Enterprise - // Server, or Bitbucket, CodeGuru Reviewer creates webhooks in your repository to - // trigger CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code - // in your repository cannot be triggered. + // * Setting up pull request + // notifications. This is required for pull requests to trigger a CodeGuru Reviewer + // review. If your repository ProviderType is GitHub, GitHub Enterprise Server, or + // Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger + // CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your + // repository cannot be triggered. // - // * Setting up source code - // access. This is required for CodeGuru Reviewer to securely clone code in your - // repository. + // * Setting up source code access. This is + // required for CodeGuru Reviewer to securely clone code in your repository. // - // * Failed: The repository failed to associate or disassociate. + // * + // Failed: The repository failed to associate or disassociate. // - // - // * Disassociating: CodeGuru Reviewer is removing the repository's pull request - // notifications and source code access. + // * Disassociating: + // CodeGuru Reviewer is removing the repository's pull request notifications and + // source code access. States []types.RepositoryAssociationState } diff --git a/service/codegurureviewer/types/enums.go b/service/codegurureviewer/types/enums.go index eca941b5631..f4f944fc5af 100644 --- a/service/codegurureviewer/types/enums.go +++ b/service/codegurureviewer/types/enums.go @@ -28,10 +28,10 @@ type ProviderType string // Enum values for ProviderType const ( - ProviderTypeCode_commit ProviderType = "CodeCommit" - ProviderTypeGit_hub ProviderType = "GitHub" - ProviderTypeBitbucket ProviderType = "Bitbucket" - ProviderTypeGit_hub_enterprise_server ProviderType = "GitHubEnterpriseServer" + ProviderTypeCodeCommit ProviderType = "CodeCommit" + ProviderTypeGitHub ProviderType = "GitHub" + ProviderTypeBitbucket ProviderType = "Bitbucket" + ProviderTypeGitHubEnterpriseServer ProviderType = "GitHubEnterpriseServer" ) // Values returns all known values for ProviderType. Note that this can be expanded @@ -50,8 +50,8 @@ type Reaction string // Enum values for Reaction const ( - ReactionThumbs_up Reaction = "ThumbsUp" - ReactionThumbs_down Reaction = "ThumbsDown" + ReactionThumbsUp Reaction = "ThumbsUp" + ReactionThumbsDown Reaction = "ThumbsDown" ) // Values returns all known values for Reaction. Note that this can be expanded in @@ -90,8 +90,8 @@ type Type string // Enum values for Type const ( - TypePull_request Type = "PullRequest" - TypeRepository_analysis Type = "RepositoryAnalysis" + TypePullRequest Type = "PullRequest" + TypeRepositoryAnalysis Type = "RepositoryAnalysis" ) // Values returns all known values for Type. Note that this can be expanded in the diff --git a/service/codegurureviewer/types/types.go b/service/codegurureviewer/types/types.go index 554f85836ee..72685dfccef 100644 --- a/service/codegurureviewer/types/types.go +++ b/service/codegurureviewer/types/types.go @@ -62,16 +62,15 @@ type CodeReview struct { // The valid code review states are: // - // * Completed: The code review is - // complete. + // * Completed: The code review is complete. // - // * Pending: The code review started and has not completed or - // failed. + // * + // Pending: The code review started and has not completed or failed. // - // * Failed: The code review failed. + // * Failed: The + // code review failed. // - // * Deleting: The code review - // is being deleted. + // * Deleting: The code review is being deleted. State JobState // The reason for the state of the code review. @@ -119,16 +118,16 @@ type CodeReviewSummary struct { // The state of the code review. The valid code review states are: // - // * - // Completed: The code review is complete. + // * Completed: + // The code review is complete. // - // * Pending: The code review started - // and has not completed or failed. + // * Pending: The code review started and has not + // completed or failed. // - // * Failed: The code review failed. + // * Failed: The code review failed. // - // * - // Deleting: The code review is being deleted. + // * Deleting: The code + // review is being deleted. State JobState // The type of the code review. @@ -137,14 +136,14 @@ type CodeReviewSummary struct { // The type of a code review. There are two code review types: // -// * PullRequest - -// A code review that is automatically triggered by a pull request on an assocaited +// * PullRequest - A +// code review that is automatically triggered by a pull request on an assocaited // repository. Because this type of code review is automatically generated, you // cannot specify this code review type using CreateCodeReview // (https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CreateCodeReview). // -// -// * RepositoryAnalysis - A code review that analyzes all code under a specified +// * +// RepositoryAnalysis - A code review that analyzes all code under a specified // branch in an associated respository. The assocated repository is specified using // its ARN in CreateCodeReview // (https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CreateCodeReview). @@ -355,27 +354,26 @@ type RepositoryAssociation struct { // The state of the repository association. The valid repository association states // are: // - // * Associated: The repository association is complete. - // - // * - // Associating: CodeGuru Reviewer is: + // * Associated: The repository association is complete. // - // * Setting up pull request - // notifications. This is required for pull requests to trigger a CodeGuru Reviewer - // review. If your repository ProviderType is GitHub, GitHub Enterprise Server, or - // Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger - // CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your - // repository cannot be triggered. + // * Associating: + // CodeGuru Reviewer is: // - // * Setting up source code access. This - // is required for CodeGuru Reviewer to securely clone code in your repository. + // * Setting up pull request notifications. This is required + // for pull requests to trigger a CodeGuru Reviewer review. If your repository + // ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru + // Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer + // reviews. If you delete these webhooks, reviews of code in your repository cannot + // be triggered. // + // * Setting up source code access. This is required for CodeGuru + // Reviewer to securely clone code in your repository. // - // * Failed: The repository failed to associate or disassociate. + // * Failed: The repository + // failed to associate or disassociate. // - // * - // Disassociating: CodeGuru Reviewer is removing the repository's pull request - // notifications and source code access. + // * Disassociating: CodeGuru Reviewer is + // removing the repository's pull request notifications and source code access. State RepositoryAssociationState // A description of why the repository association is in the current state. @@ -423,27 +421,26 @@ type RepositoryAssociationSummary struct { // The state of the repository association. The valid repository association states // are: // - // * Associated: The repository association is complete. - // - // * - // Associating: CodeGuru Reviewer is: + // * Associated: The repository association is complete. // - // * Setting up pull request - // notifications. This is required for pull requests to trigger a CodeGuru Reviewer - // review. If your repository ProviderType is GitHub, GitHub Enterprise Server, or - // Bitbucket, CodeGuru Reviewer creates webhooks in your repository to trigger - // CodeGuru Reviewer reviews. If you delete these webhooks, reviews of code in your - // repository cannot be triggered. + // * Associating: + // CodeGuru Reviewer is: // - // * Setting up source code access. This - // is required for CodeGuru Reviewer to securely clone code in your repository. + // * Setting up pull request notifications. This is required + // for pull requests to trigger a CodeGuru Reviewer review. If your repository + // ProviderType is GitHub, GitHub Enterprise Server, or Bitbucket, CodeGuru + // Reviewer creates webhooks in your repository to trigger CodeGuru Reviewer + // reviews. If you delete these webhooks, reviews of code in your repository cannot + // be triggered. // + // * Setting up source code access. This is required for CodeGuru + // Reviewer to securely clone code in your repository. // - // * Failed: The repository failed to associate or disassociate. + // * Failed: The repository + // failed to associate or disassociate. // - // * - // Disassociating: CodeGuru Reviewer is removing the repository's pull request - // notifications and source code access. + // * Disassociating: CodeGuru Reviewer is + // removing the repository's pull request notifications and source code access. State RepositoryAssociationState } diff --git a/service/codepipeline/doc.go b/service/codepipeline/doc.go index c498311125c..58cb40e4708 100644 --- a/service/codepipeline/doc.go +++ b/service/codepipeline/doc.go @@ -13,55 +13,54 @@ // is uniquely named, and consists of stages, actions, and transitions. You can // work with pipelines by calling: // -// * CreatePipeline, which creates a uniquely +// * CreatePipeline, which creates a uniquely // named pipeline. // -// * DeletePipeline, which deletes the specified pipeline. +// * DeletePipeline, which deletes the specified pipeline. // +// * +// GetPipeline, which returns information about the pipeline structure and pipeline +// metadata, including the pipeline Amazon Resource Name (ARN). // -// * GetPipeline, which returns information about the pipeline structure and -// pipeline metadata, including the pipeline Amazon Resource Name (ARN). -// -// * +// * // GetPipelineExecution, which returns information about a specific execution of a // pipeline. // -// * GetPipelineState, which returns information about the current -// state of the stages and actions of a pipeline. -// -// * ListActionExecutions, -// which returns action-level details for past executions. The details include full -// stage and action-level details, including individual action duration, status, -// any errors that occurred during the execution, and input and output artifact -// location details. -// -// * ListPipelines, which gets a summary of all of the -// pipelines associated with your account. -// -// * ListPipelineExecutions, which -// gets a summary of the most recent executions for a pipeline. -// -// * -// StartPipelineExecution, which runs the most recent revision of an artifact -// through the pipeline. -// -// * StopPipelineExecution, which stops the specified -// pipeline execution from continuing through the pipeline. -// -// * UpdatePipeline, -// which updates a pipeline with edits or changes to the structure of the -// pipeline. -// -// Pipelines include stages. Each stage contains one or more actions -// that must complete before the next stage begins. A stage results in success or -// failure. If a stage fails, the pipeline stops at that stage and remains stopped -// until either a new version of an artifact appears in the source location, or a -// user takes action to rerun the most recent artifact through the pipeline. You -// can call GetPipelineState, which displays the status of a pipeline, including -// the status of stages in the pipeline, or GetPipeline, which returns the entire -// structure of the pipeline, including the stages of that pipeline. For more -// information about the structure of stages and actions, see AWS CodePipeline -// Pipeline Structure Reference +// * GetPipelineState, which returns information about the current state +// of the stages and actions of a pipeline. +// +// * ListActionExecutions, which returns +// action-level details for past executions. The details include full stage and +// action-level details, including individual action duration, status, any errors +// that occurred during the execution, and input and output artifact location +// details. +// +// * ListPipelines, which gets a summary of all of the pipelines +// associated with your account. +// +// * ListPipelineExecutions, which gets a summary of +// the most recent executions for a pipeline. +// +// * StartPipelineExecution, which runs +// the most recent revision of an artifact through the pipeline. +// +// * +// StopPipelineExecution, which stops the specified pipeline execution from +// continuing through the pipeline. +// +// * UpdatePipeline, which updates a pipeline +// with edits or changes to the structure of the pipeline. +// +// Pipelines include +// stages. Each stage contains one or more actions that must complete before the +// next stage begins. A stage results in success or failure. If a stage fails, the +// pipeline stops at that stage and remains stopped until either a new version of +// an artifact appears in the source location, or a user takes action to rerun the +// most recent artifact through the pipeline. You can call GetPipelineState, which +// displays the status of a pipeline, including the status of stages in the +// pipeline, or GetPipeline, which returns the entire structure of the pipeline, +// including the stages of that pipeline. For more information about the structure +// of stages and actions, see AWS CodePipeline Pipeline Structure Reference // (https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-structure.html). // Pipeline stages include actions that are categorized into categories such as // source or build actions performed in a stage of a pipeline. For example, you can @@ -70,71 +69,70 @@ // you do define and interact with actions when working with pipeline operations // such as CreatePipeline and GetPipelineState. Valid action categories are: // -// * +// * // Source // -// * Build +// * Build // -// * Test +// * Test // -// * Deploy +// * Deploy // -// * Approval +// * Approval // -// * -// Invoke +// * Invoke // -// Pipelines also include transitions, which allow the transition of -// artifacts from one stage to the next in a pipeline after the actions in one -// stage complete. You can work with transitions by calling: +// Pipelines also include +// transitions, which allow the transition of artifacts from one stage to the next +// in a pipeline after the actions in one stage complete. You can work with +// transitions by calling: // -// * -// DisableStageTransition, which prevents artifacts from transitioning to the next -// stage in a pipeline. +// * DisableStageTransition, which prevents artifacts from +// transitioning to the next stage in a pipeline. // -// * EnableStageTransition, which enables transition of -// artifacts between stages in a pipeline. +// * EnableStageTransition, which +// enables transition of artifacts between stages in a pipeline. // -// Using the API to integrate with AWS -// CodePipeline For third-party integrators or developers who want to create their -// own integrations with AWS CodePipeline, the expected sequence varies from the -// standard API user. To integrate with AWS CodePipeline, developers need to work -// with the following items: Jobs, which are instances of an action. For example, a -// job for a source action might import a revision of an artifact from a source. -// You can work with jobs by calling: +// Using the API to +// integrate with AWS CodePipeline For third-party integrators or developers who +// want to create their own integrations with AWS CodePipeline, the expected +// sequence varies from the standard API user. To integrate with AWS CodePipeline, +// developers need to work with the following items: Jobs, which are instances of +// an action. For example, a job for a source action might import a revision of an +// artifact from a source. You can work with jobs by calling: // -// * AcknowledgeJob, which confirms whether -// a job worker has received the specified job. +// * AcknowledgeJob, +// which confirms whether a job worker has received the specified job. // -// * GetJobDetails, which returns -// the details of a job. +// * +// GetJobDetails, which returns the details of a job. // -// * PollForJobs, which determines whether there are any -// jobs to act on. +// * PollForJobs, which +// determines whether there are any jobs to act on. // -// * PutJobFailureResult, which provides details of a job -// failure. +// * PutJobFailureResult, which +// provides details of a job failure. // -// * PutJobSuccessResult, which provides details of a job -// success. +// * PutJobSuccessResult, which provides +// details of a job success. // -// Third party jobs, which are instances of an action created by a -// partner action and integrated into AWS CodePipeline. Partner actions are created -// by members of the AWS Partner Network. You can work with third party jobs by -// calling: +// Third party jobs, which are instances of an action +// created by a partner action and integrated into AWS CodePipeline. Partner +// actions are created by members of the AWS Partner Network. You can work with +// third party jobs by calling: // -// * AcknowledgeThirdPartyJob, which confirms whether a job worker -// has received the specified job. +// * AcknowledgeThirdPartyJob, which confirms whether +// a job worker has received the specified job. // -// * GetThirdPartyJobDetails, which requests -// the details of a job for a partner action. +// * GetThirdPartyJobDetails, which +// requests the details of a job for a partner action. // -// * PollForThirdPartyJobs, which -// determines whether there are any jobs to act on. +// * PollForThirdPartyJobs, +// which determines whether there are any jobs to act on. // -// * +// * // PutThirdPartyJobFailureResult, which provides details of a job failure. // -// * +// * // PutThirdPartyJobSuccessResult, which provides details of a job success. package codepipeline diff --git a/service/codepipeline/types/enums.go b/service/codepipeline/types/enums.go index a4a945e3c5d..5c376d3a6f7 100644 --- a/service/codepipeline/types/enums.go +++ b/service/codepipeline/types/enums.go @@ -281,7 +281,7 @@ type StageRetryMode string // Enum values for StageRetryMode const ( - StageRetryModeFailed_actions StageRetryMode = "FAILED_ACTIONS" + StageRetryModeFailedActions StageRetryMode = "FAILED_ACTIONS" ) // Values returns all known values for StageRetryMode. Note that this can be @@ -341,7 +341,7 @@ type WebhookAuthenticationType string // Enum values for WebhookAuthenticationType const ( - WebhookAuthenticationTypeGithub_hmac WebhookAuthenticationType = "GITHUB_HMAC" + WebhookAuthenticationTypeGithubHmac WebhookAuthenticationType = "GITHUB_HMAC" WebhookAuthenticationTypeIp WebhookAuthenticationType = "IP" WebhookAuthenticationTypeUnauthenticated WebhookAuthenticationType = "UNAUTHENTICATED" ) diff --git a/service/codepipeline/types/types.go b/service/codepipeline/types/types.go index 5b1bdac7f20..8c16b74b1d4 100644 --- a/service/codepipeline/types/types.go +++ b/service/codepipeline/types/types.go @@ -840,30 +840,30 @@ type PipelineExecution struct { // The status of the pipeline execution. // - // * InProgress: The pipeline execution - // is currently running. + // * InProgress: The pipeline execution is + // currently running. // - // * Stopped: The pipeline execution was manually - // stopped. For more information, see Stopped Executions + // * Stopped: The pipeline execution was manually stopped. For + // more information, see Stopped Executions // (https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#concepts-executions-stopped). // - // - // * Stopping: The pipeline execution received a request to be manually stopped. + // * + // Stopping: The pipeline execution received a request to be manually stopped. // Depending on the selected stop mode, the execution is either completing or // abandoning in-progress actions. For more information, see Stopped Executions // (https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#concepts-executions-stopped). // + // * + // Succeeded: The pipeline execution was completed successfully. // - // * Succeeded: The pipeline execution was completed successfully. - // - // * - // Superseded: While this pipeline execution was waiting for the next stage to be - // completed, a newer pipeline execution advanced and continued through the - // pipeline instead. For more information, see Superseded Executions + // * Superseded: + // While this pipeline execution was waiting for the next stage to be completed, a + // newer pipeline execution advanced and continued through the pipeline instead. + // For more information, see Superseded Executions // (https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#concepts-superseded). // - // - // * Failed: The pipeline execution was not completed successfully. + // * + // Failed: The pipeline execution was not completed successfully. Status PipelineExecutionStatus } @@ -885,30 +885,30 @@ type PipelineExecutionSummary struct { // The status of the pipeline execution. // - // * InProgress: The pipeline execution - // is currently running. + // * InProgress: The pipeline execution is + // currently running. // - // * Stopped: The pipeline execution was manually - // stopped. For more information, see Stopped Executions + // * Stopped: The pipeline execution was manually stopped. For + // more information, see Stopped Executions // (https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#concepts-executions-stopped). // - // - // * Stopping: The pipeline execution received a request to be manually stopped. + // * + // Stopping: The pipeline execution received a request to be manually stopped. // Depending on the selected stop mode, the execution is either completing or // abandoning in-progress actions. For more information, see Stopped Executions // (https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#concepts-executions-stopped). // + // * + // Succeeded: The pipeline execution was completed successfully. // - // * Succeeded: The pipeline execution was completed successfully. - // - // * - // Superseded: While this pipeline execution was waiting for the next stage to be - // completed, a newer pipeline execution advanced and continued through the - // pipeline instead. For more information, see Superseded Executions + // * Superseded: + // While this pipeline execution was waiting for the next stage to be completed, a + // newer pipeline execution advanced and continued through the pipeline instead. + // For more information, see Superseded Executions // (https://docs.aws.amazon.com/codepipeline/latest/userguide/concepts.html#concepts-superseded). // - // - // * Failed: The pipeline execution was not completed successfully. + // * + // Failed: The pipeline execution was not completed successfully. Status PipelineExecutionStatus // The interaction that stopped a pipeline execution. @@ -1177,17 +1177,17 @@ type WebhookDefinition struct { // Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED. // - // * For - // information about the authentication scheme implemented by GITHUB_HMAC, see - // Securing your webhooks (https://developer.github.com/webhooks/securing/) on the - // GitHub Developer website. + // * For information + // about the authentication scheme implemented by GITHUB_HMAC, see Securing your + // webhooks (https://developer.github.com/webhooks/securing/) on the GitHub + // Developer website. // - // * IP rejects webhooks trigger requests unless - // they originate from an IP address in the IP range whitelisted in the - // authentication configuration. + // * IP rejects webhooks trigger requests unless they originate + // from an IP address in the IP range whitelisted in the authentication + // configuration. // - // * UNAUTHENTICATED accepts all webhook trigger - // requests regardless of origin. + // * UNAUTHENTICATED accepts all webhook trigger requests + // regardless of origin. // // This member is required. Authentication WebhookAuthenticationType diff --git a/service/codestar/doc.go b/service/codestar/doc.go index b2214be75b9..54cc866ff1e 100644 --- a/service/codestar/doc.go +++ b/service/codestar/doc.go @@ -8,58 +8,59 @@ // with usage examples. You can use the AWS CodeStar API to work with: Projects and // their resources, by calling the following: // -// * DeleteProject, which deletes a +// * DeleteProject, which deletes a // project. // -// * DescribeProject, which lists the attributes of a project. +// * DescribeProject, which lists the attributes of a project. // -// * +// * // ListProjects, which lists all projects associated with your AWS account. // -// * +// * // ListResources, which lists the resources associated with a project. // -// * +// * // ListTagsForProject, which lists the tags associated with a project. // -// * +// * // TagProject, which adds tags to a project. // -// * UntagProject, which removes -// tags from a project. +// * UntagProject, which removes tags +// from a project. // -// * UpdateProject, which updates the attributes of a +// * UpdateProject, which updates the attributes of a // project. // // Teams and team members, by calling the following: // -// * +// * // AssociateTeamMember, which adds an IAM user to the team for a project. // -// * -// DisassociateTeamMember, which removes an IAM user from the team for a project. -// +// * +// DisassociateTeamMember, which removes an IAM user from the team for a +// project. // -// * ListTeamMembers, which lists all the IAM users in the team for a project, -// including their roles and attributes. +// * ListTeamMembers, which lists all the IAM users in the team for a +// project, including their roles and attributes. // -// * UpdateTeamMember, which updates a -// team member's attributes in a project. +// * UpdateTeamMember, which +// updates a team member's attributes in a project. // -// Users, by calling the following: +// Users, by calling the +// following: // -// * -// CreateUserProfile, which creates a user profile that contains data associated -// with the user across all projects. +// * CreateUserProfile, which creates a user profile that contains data +// associated with the user across all projects. // -// * DeleteUserProfile, which deletes all -// user profile information across all projects. +// * DeleteUserProfile, which +// deletes all user profile information across all projects. // -// * DescribeUserProfile, which -// describes the profile of a user. +// * +// DescribeUserProfile, which describes the profile of a user. // -// * ListUserProfiles, which lists all user -// profiles. +// * ListUserProfiles, +// which lists all user profiles. // -// * UpdateUserProfile, which updates the profile for a user. +// * UpdateUserProfile, which updates the profile +// for a user. package codestar diff --git a/service/codestarconnections/doc.go b/service/codestarconnections/doc.go index de51108373d..2760f773f95 100644 --- a/service/codestarconnections/doc.go +++ b/service/codestarconnections/doc.go @@ -22,47 +22,47 @@ // GitHub Enterprise Server, you create a host for your connections. You can work // with connections by calling: // -// * CreateConnection, which creates a uniquely -// named connection that can be referenced by services such as CodePipeline. +// * CreateConnection, which creates a uniquely named +// connection that can be referenced by services such as CodePipeline. // -// * +// * // DeleteConnection, which deletes the specified connection. // -// * GetConnection, +// * GetConnection, // which returns information about the connection, including the connection // status. // -// * ListConnections, which lists the connections associated with your +// * ListConnections, which lists the connections associated with your // account. // // You can work with hosts by calling: // -// * CreateHost, which creates a +// * CreateHost, which creates a // host that represents the infrastructure where your provider is installed. // -// * +// * // DeleteHost, which deletes the specified host. // -// * GetHost, which returns +// * GetHost, which returns // information about the host, including the setup status. // -// * ListHosts, which +// * ListHosts, which // lists the hosts associated with your account. // // You can work with tags in AWS // CodeStar Connections by calling the following: // -// * ListTagsForResource, which +// * ListTagsForResource, which // gets information about AWS tags for a specified Amazon Resource Name (ARN) in // AWS CodeStar Connections. // -// * TagResource, which adds or updates tags for a +// * TagResource, which adds or updates tags for a // resource in AWS CodeStar Connections. // -// * UntagResource, which removes tags -// for a resource in AWS CodeStar Connections. +// * UntagResource, which removes tags for a +// resource in AWS CodeStar Connections. // -// For information about how to use -// AWS CodeStar Connections, see the Developer Tools User Guide +// For information about how to use AWS +// CodeStar Connections, see the Developer Tools User Guide // (https://docs.aws.amazon.com/dtconsole/latest/userguide/welcome-connections.html). package codestarconnections diff --git a/service/codestarconnections/types/enums.go b/service/codestarconnections/types/enums.go index 0ff454ac98b..5d49b9765f8 100644 --- a/service/codestarconnections/types/enums.go +++ b/service/codestarconnections/types/enums.go @@ -26,9 +26,9 @@ type ProviderType string // Enum values for ProviderType const ( - ProviderTypeBitbucket ProviderType = "Bitbucket" - ProviderTypeGithub ProviderType = "GitHub" - ProviderTypeGithub_enterprise_server ProviderType = "GitHubEnterpriseServer" + ProviderTypeBitbucket ProviderType = "Bitbucket" + ProviderTypeGithub ProviderType = "GitHub" + ProviderTypeGithubEnterpriseServer ProviderType = "GitHubEnterpriseServer" ) // Values returns all known values for ProviderType. Note that this can be expanded diff --git a/service/codestarnotifications/doc.go b/service/codestarnotifications/doc.go index ffbd0f59b33..6e162bd06d4 100644 --- a/service/codestarnotifications/doc.go +++ b/service/codestarnotifications/doc.go @@ -8,55 +8,55 @@ // API. You can use the AWS CodeStar Notifications API to work with the following // objects: Notification rules, by calling the following: // -// * +// * // CreateNotificationRule, which creates a notification rule for a resource in your // account. // -// * DeleteNotificationRule, which deletes a notification rule. +// * DeleteNotificationRule, which deletes a notification rule. // -// -// * DescribeNotificationRule, which provides information about a notification +// * +// DescribeNotificationRule, which provides information about a notification // rule. // -// * ListNotificationRules, which lists the notification rules -// associated with your account. +// * ListNotificationRules, which lists the notification rules associated +// with your account. // -// * UpdateNotificationRule, which changes the -// name, events, or targets associated with a notification rule. +// * UpdateNotificationRule, which changes the name, events, or +// targets associated with a notification rule. // -// * Subscribe, -// which subscribes a target to a notification rule. +// * Subscribe, which subscribes a +// target to a notification rule. // -// * Unsubscribe, which -// removes a target from a notification rule. +// * Unsubscribe, which removes a target from a +// notification rule. // // Targets, by calling the following: // +// * DeleteTarget, which +// removes a notification rule target (SNS topic) from a notification rule. // -// * DeleteTarget, which removes a notification rule target (SNS topic) from a -// notification rule. -// -// * ListTargets, which lists the targets associated with a -// notification rule. +// * +// ListTargets, which lists the targets associated with a notification +// rule. // // Events, by calling the following: // -// * ListEventTypes, -// which lists the event types you can include in a notification rule. +// * ListEventTypes, which lists the +// event types you can include in a notification rule. // -// Tags, by -// calling the following: +// Tags, by calling the +// following: // -// * ListTagsForResource, which lists the tags already -// associated with a notification rule in your account. +// * ListTagsForResource, which lists the tags already associated with +// a notification rule in your account. // -// * TagResource, which -// associates a tag you provide with a notification rule in your account. +// * TagResource, which associates a tag you +// provide with a notification rule in your account. // -// * -// UntagResource, which removes a tag from a notification rule in your -// account. +// * UntagResource, which +// removes a tag from a notification rule in your account. // -// For information about how to use AWS CodeStar Notifications, see link -// in the CodeStarNotifications User Guide. +// For information about +// how to use AWS CodeStar Notifications, see link in the CodeStarNotifications +// User Guide. package codestarnotifications diff --git a/service/codestarnotifications/types/enums.go b/service/codestarnotifications/types/enums.go index 40c3a865591..81af3dd5ba0 100644 --- a/service/codestarnotifications/types/enums.go +++ b/service/codestarnotifications/types/enums.go @@ -24,8 +24,8 @@ type ListEventTypesFilterName string // Enum values for ListEventTypesFilterName const ( - ListEventTypesFilterNameResource_type ListEventTypesFilterName = "RESOURCE_TYPE" - ListEventTypesFilterNameService_name ListEventTypesFilterName = "SERVICE_NAME" + ListEventTypesFilterNameResourceType ListEventTypesFilterName = "RESOURCE_TYPE" + ListEventTypesFilterNameServiceName ListEventTypesFilterName = "SERVICE_NAME" ) // Values returns all known values for ListEventTypesFilterName. Note that this can @@ -42,10 +42,10 @@ type ListNotificationRulesFilterName string // Enum values for ListNotificationRulesFilterName const ( - ListNotificationRulesFilterNameEvent_type_id ListNotificationRulesFilterName = "EVENT_TYPE_ID" - ListNotificationRulesFilterNameCreated_by ListNotificationRulesFilterName = "CREATED_BY" - ListNotificationRulesFilterNameResource ListNotificationRulesFilterName = "RESOURCE" - ListNotificationRulesFilterNameTarget_address ListNotificationRulesFilterName = "TARGET_ADDRESS" + ListNotificationRulesFilterNameEventTypeId ListNotificationRulesFilterName = "EVENT_TYPE_ID" + ListNotificationRulesFilterNameCreatedBy ListNotificationRulesFilterName = "CREATED_BY" + ListNotificationRulesFilterNameResource ListNotificationRulesFilterName = "RESOURCE" + ListNotificationRulesFilterNameTargetAddress ListNotificationRulesFilterName = "TARGET_ADDRESS" ) // Values returns all known values for ListNotificationRulesFilterName. Note that @@ -65,9 +65,9 @@ type ListTargetsFilterName string // Enum values for ListTargetsFilterName const ( - ListTargetsFilterNameTarget_type ListTargetsFilterName = "TARGET_TYPE" - ListTargetsFilterNameTarget_address ListTargetsFilterName = "TARGET_ADDRESS" - ListTargetsFilterNameTarget_status ListTargetsFilterName = "TARGET_STATUS" + ListTargetsFilterNameTargetType ListTargetsFilterName = "TARGET_TYPE" + ListTargetsFilterNameTargetAddress ListTargetsFilterName = "TARGET_ADDRESS" + ListTargetsFilterNameTargetStatus ListTargetsFilterName = "TARGET_STATUS" ) // Values returns all known values for ListTargetsFilterName. Note that this can be diff --git a/service/cognitoidentity/api_op_CreateIdentityPool.go b/service/cognitoidentity/api_op_CreateIdentityPool.go index 99f6c3cff51..88c709b040f 100644 --- a/service/cognitoidentity/api_op_CreateIdentityPool.go +++ b/service/cognitoidentity/api_op_CreateIdentityPool.go @@ -15,17 +15,17 @@ import ( // information that is specific to your AWS account. The keys for // SupportedLoginProviders are as follows: // -// * Facebook: graph.facebook.com +// * Facebook: graph.facebook.com // +// * +// Google: accounts.google.com // -// * Google: accounts.google.com +// * Amazon: www.amazon.com // -// * Amazon: www.amazon.com -// -// * Twitter: +// * Twitter: // api.twitter.com // -// * Digits: www.digits.com +// * Digits: www.digits.com // // You must use AWS Developer // credentials to call this API. diff --git a/service/cognitoidentity/api_op_GetId.go b/service/cognitoidentity/api_op_GetId.go index baf0b4c3336..cbce1c04a59 100644 --- a/service/cognitoidentity/api_op_GetId.go +++ b/service/cognitoidentity/api_op_GetId.go @@ -41,21 +41,21 @@ type GetIdInput struct { // A set of optional name-value pairs that map provider names to provider tokens. // The available provider names for Logins are as follows: // - // * Facebook: + // * Facebook: // graph.facebook.com // - // * Amazon Cognito user pool: cognito-idp..amazonaws.com/, - // for example, cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789. + // * Amazon Cognito user pool: cognito-idp..amazonaws.com/, for + // example, cognito-idp.us-east-1.amazonaws.com/us-east-1_123456789. // - // * - // Google: accounts.google.com + // * Google: + // accounts.google.com // - // * Amazon: www.amazon.com + // * Amazon: www.amazon.com // - // * Twitter: - // api.twitter.com + // * Twitter: api.twitter.com // - // * Digits: www.digits.com + // * + // Digits: www.digits.com Logins map[string]*string } diff --git a/service/cognitoidentity/types/enums.go b/service/cognitoidentity/types/enums.go index 55ac8c6645b..0455b48bc4c 100644 --- a/service/cognitoidentity/types/enums.go +++ b/service/cognitoidentity/types/enums.go @@ -6,8 +6,8 @@ type AmbiguousRoleResolutionType string // Enum values for AmbiguousRoleResolutionType const ( - AmbiguousRoleResolutionTypeAuthenticated_role AmbiguousRoleResolutionType = "AuthenticatedRole" - AmbiguousRoleResolutionTypeDeny AmbiguousRoleResolutionType = "Deny" + AmbiguousRoleResolutionTypeAuthenticatedRole AmbiguousRoleResolutionType = "AuthenticatedRole" + AmbiguousRoleResolutionTypeDeny AmbiguousRoleResolutionType = "Deny" ) // Values returns all known values for AmbiguousRoleResolutionType. Note that this @@ -24,8 +24,8 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeAccess_denied ErrorCode = "AccessDenied" - ErrorCodeInternal_server_error ErrorCode = "InternalServerError" + ErrorCodeAccessDenied ErrorCode = "AccessDenied" + ErrorCodeInternalServerError ErrorCode = "InternalServerError" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -42,10 +42,10 @@ type MappingRuleMatchType string // Enum values for MappingRuleMatchType const ( - MappingRuleMatchTypeEquals MappingRuleMatchType = "Equals" - MappingRuleMatchTypeContains MappingRuleMatchType = "Contains" - MappingRuleMatchTypeStarts_with MappingRuleMatchType = "StartsWith" - MappingRuleMatchTypeNot_equal MappingRuleMatchType = "NotEqual" + MappingRuleMatchTypeEquals MappingRuleMatchType = "Equals" + MappingRuleMatchTypeContains MappingRuleMatchType = "Contains" + MappingRuleMatchTypeStartsWith MappingRuleMatchType = "StartsWith" + MappingRuleMatchTypeNotEqual MappingRuleMatchType = "NotEqual" ) // Values returns all known values for MappingRuleMatchType. Note that this can be diff --git a/service/cognitoidentityprovider/api_op_AdminConfirmSignUp.go b/service/cognitoidentityprovider/api_op_AdminConfirmSignUp.go index 2bc0e8f71a9..8b8e5d49c82 100644 --- a/service/cognitoidentityprovider/api_op_AdminConfirmSignUp.go +++ b/service/cognitoidentityprovider/api_op_AdminConfirmSignUp.go @@ -54,17 +54,17 @@ type AdminConfirmSignUpInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string } diff --git a/service/cognitoidentityprovider/api_op_AdminCreateUser.go b/service/cognitoidentityprovider/api_op_AdminCreateUser.go index e3a9c6f681d..c99d3d07a75 100644 --- a/service/cognitoidentityprovider/api_op_AdminCreateUser.go +++ b/service/cognitoidentityprovider/api_op_AdminCreateUser.go @@ -65,17 +65,17 @@ type AdminCreateUserInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // Specify "EMAIL" if email will be used to send the welcome message. Specify "SMS" @@ -124,15 +124,15 @@ type AdminCreateUserInput struct { // by calling AdminUpdateUserAttributes // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html).) // + // * + // email: The email address of the user to whom the message that contains the code + // and username will be sent. Required if the email_verified attribute is set to + // True, or if "EMAIL" is specified in the DesiredDeliveryMediums parameter. // - // * email: The email address of the user to whom the message that contains the - // code and username will be sent. Required if the email_verified attribute is set - // to True, or if "EMAIL" is specified in the DesiredDeliveryMediums parameter. - // - // - // * phone_number: The phone number of the user to whom the message that contains - // the code and username will be sent. Required if the phone_number_verified - // attribute is set to True, or if "SMS" is specified in the DesiredDeliveryMediums + // * + // phone_number: The phone number of the user to whom the message that contains the + // code and username will be sent. Required if the phone_number_verified attribute + // is set to True, or if "SMS" is specified in the DesiredDeliveryMediums // parameter. UserAttributes []*types.AttributeType diff --git a/service/cognitoidentityprovider/api_op_AdminGetUser.go b/service/cognitoidentityprovider/api_op_AdminGetUser.go index 1130efecdcf..b5f2e50b845 100644 --- a/service/cognitoidentityprovider/api_op_AdminGetUser.go +++ b/service/cognitoidentityprovider/api_op_AdminGetUser.go @@ -80,27 +80,26 @@ type AdminGetUserOutput struct { // The user status. Can be one of the following: // - // * UNCONFIRMED - User has been + // * UNCONFIRMED - User has been // created but not confirmed. // - // * CONFIRMED - User has been confirmed. + // * CONFIRMED - User has been confirmed. // - // * - // ARCHIVED - User is no longer active. + // * ARCHIVED - + // User is no longer active. // - // * COMPROMISED - User is disabled due - // to a potential security threat. + // * COMPROMISED - User is disabled due to a potential + // security threat. // - // * UNKNOWN - User status is not known. + // * UNKNOWN - User status is not known. // + // * RESET_REQUIRED - User + // is confirmed, but the user must request a code and reset his or her password + // before he or she can sign in. // - // * RESET_REQUIRED - User is confirmed, but the user must request a code and reset - // his or her password before he or she can sign in. - // - // * FORCE_CHANGE_PASSWORD - - // The user is confirmed and the user can sign in using a temporary password, but - // on first sign-in, the user must change his or her password to a new value before - // doing anything else. + // * FORCE_CHANGE_PASSWORD - The user is confirmed + // and the user can sign in using a temporary password, but on first sign-in, the + // user must change his or her password to a new value before doing anything else. UserStatus types.UserStatusType // Metadata pertaining to the operation's result. diff --git a/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go b/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go index b28408e7923..d43fd49c181 100644 --- a/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go +++ b/service/cognitoidentityprovider/api_op_AdminInitiateAuth.go @@ -34,41 +34,41 @@ type AdminInitiateAuthInput struct { // The authentication flow for this call to execute. The API action will depend on // this value. For example: // - // * REFRESH_TOKEN_AUTH will take in a valid refresh + // * REFRESH_TOKEN_AUTH will take in a valid refresh // token and return new tokens. // - // * USER_SRP_AUTH will take in USERNAME and - // SRP_A and return the SRP variables to be used for next challenge execution. + // * USER_SRP_AUTH will take in USERNAME and SRP_A + // and return the SRP variables to be used for next challenge execution. // - // - // * USER_PASSWORD_AUTH will take in USERNAME and PASSWORD and return the next + // * + // USER_PASSWORD_AUTH will take in USERNAME and PASSWORD and return the next // challenge or tokens. // // Valid values include: // - // * USER_SRP_AUTH: Authentication + // * USER_SRP_AUTH: Authentication // flow for the Secure Remote Password (SRP) protocol. // - // * + // * // REFRESH_TOKEN_AUTH/REFRESH_TOKEN: Authentication flow for refreshing the access // token and ID token by supplying a valid refresh token. // - // * CUSTOM_AUTH: - // Custom authentication flow. + // * CUSTOM_AUTH: Custom + // authentication flow. // - // * ADMIN_NO_SRP_AUTH: Non-SRP authentication - // flow; you can pass in the USERNAME and PASSWORD directly if the flow is enabled - // for calling the app client. + // * ADMIN_NO_SRP_AUTH: Non-SRP authentication flow; you can + // pass in the USERNAME and PASSWORD directly if the flow is enabled for calling + // the app client. // - // * USER_PASSWORD_AUTH: Non-SRP authentication - // flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda - // trigger is set, this flow will invoke the user migration Lambda if the USERNAME - // is not found in the user pool. + // * USER_PASSWORD_AUTH: Non-SRP authentication flow; USERNAME and + // PASSWORD are passed directly. If a user migration Lambda trigger is set, this + // flow will invoke the user migration Lambda if the USERNAME is not found in the + // user pool. // - // * ADMIN_USER_PASSWORD_AUTH: Admin-based user - // password authentication. This replaces the ADMIN_NO_SRP_AUTH authentication - // flow. In this flow, Cognito receives the password in the request instead of - // using the SRP process to verify passwords. + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password + // authentication. This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this + // flow, Cognito receives the password in the request instead of using the SRP + // process to verify passwords. // // This member is required. AuthFlow types.AuthFlowType @@ -90,19 +90,19 @@ type AdminInitiateAuthInput struct { // The authentication parameters. These are inputs corresponding to the AuthFlow // that you are invoking. The required values depend on the value of AuthFlow: // + // * + // For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required + // if the app client is configured with a client secret), DEVICE_KEY. // - // * For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH - // (required if the app client is configured with a client secret), DEVICE_KEY. - // - // - // * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH + // * For + // REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH // (required if the app client is configured with a client secret), DEVICE_KEY. // - // - // * For ADMIN_NO_SRP_AUTH: USERNAME (required), SECRET_HASH (if app client is + // * + // For ADMIN_NO_SRP_AUTH: USERNAME (required), SECRET_HASH (if app client is // configured with client secret), PASSWORD (required), DEVICE_KEY. // - // * For + // * For // CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured with // client secret), DEVICE_KEY. To start the authentication flow with password // verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). @@ -115,54 +115,53 @@ type AdminInitiateAuthInput struct { // for various triggers. The ClientMetadata value is passed as input to the // functions for only the following triggers: // - // * Pre signup + // * Pre signup // - // * Pre + // * Pre // authentication // - // * User migration + // * User migration // - // When Amazon Cognito invokes the functions - // for these triggers, it passes a JSON payload, which the function receives as - // input. This payload contains a validationData attribute, which provides the data - // that you assigned to the ClientMetadata parameter in your AdminInitiateAuth - // request. In your function code in AWS Lambda, you can process the validationData - // value to enhance your workflow for your specific needs. When you use the + // When Amazon Cognito invokes the functions for + // these triggers, it passes a JSON payload, which the function receives as input. + // This payload contains a validationData attribute, which provides the data that + // you assigned to the ClientMetadata parameter in your AdminInitiateAuth request. + // In your function code in AWS Lambda, you can process the validationData value to + // enhance your workflow for your specific needs. When you use the // AdminInitiateAuth API action, Amazon Cognito also invokes the functions for the - // following triggers, but it does not provide the ClientMetadata value as input: - // + // following triggers, but it does not provide the ClientMetadata value as + // input: // // * Post authentication // - // * Custom message + // * Custom message // - // * Pre token generation + // * Pre token generation // - // * + // * // Create auth challenge // - // * Define auth challenge + // * Define auth challenge // - // * Verify auth - // challenge + // * Verify auth challenge // - // For more information, see Customizing User Pool Workflows with Lambda - // Triggers + // For + // more information, see Customizing User Pool Workflows with Lambda Triggers // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // Contextual data such as the user's device fingerprint, IP address, or location @@ -184,38 +183,38 @@ type AdminInitiateAuthOutput struct { // returned to you in the AdminInitiateAuth response if you need to pass another // challenge. // - // * MFA_SETUP: If MFA is required, users who do not have at least - // one of the MFA methods set up are presented with an MFA_SETUP challenge. The - // user must set up at least one MFA type to continue to authenticate. + // * MFA_SETUP: If MFA is required, users who do not have at least one + // of the MFA methods set up are presented with an MFA_SETUP challenge. The user + // must set up at least one MFA type to continue to authenticate. // - // * + // * // SELECT_MFA_TYPE: Selects the MFA type. Valid MFA options are SMS_MFA for text // SMS MFA, and SOFTWARE_TOKEN_MFA for TOTP software token MFA. // - // * SMS_MFA: - // Next challenge is to supply an SMS_MFA_CODE, delivered via SMS. + // * SMS_MFA: Next + // challenge is to supply an SMS_MFA_CODE, delivered via SMS. // - // * - // PASSWORD_VERIFIER: Next challenge is to supply PASSWORD_CLAIM_SIGNATURE, + // * PASSWORD_VERIFIER: + // Next challenge is to supply PASSWORD_CLAIM_SIGNATURE, // PASSWORD_CLAIM_SECRET_BLOCK, and TIMESTAMP after the client-side SRP // calculations. // - // * CUSTOM_CHALLENGE: This is returned if your custom + // * CUSTOM_CHALLENGE: This is returned if your custom // authentication flow determines that the user should pass another challenge // before tokens are issued. // - // * DEVICE_SRP_AUTH: If device tracking was enabled - // on your user pool and the previous challenges were passed, this challenge is + // * DEVICE_SRP_AUTH: If device tracking was enabled on + // your user pool and the previous challenges were passed, this challenge is // returned so that Amazon Cognito can start tracking this device. // - // * + // * // DEVICE_PASSWORD_VERIFIER: Similar to PASSWORD_VERIFIER, but for devices only. // - // - // * ADMIN_NO_SRP_AUTH: This is returned if you need to authenticate with USERNAME + // * + // ADMIN_NO_SRP_AUTH: This is returned if you need to authenticate with USERNAME // and PASSWORD directly. An app client must be enabled to use this flow. // - // * + // * // NEW_PASSWORD_REQUIRED: For users which are required to change their passwords // after successful first login. This challenge should be passed with NEW_PASSWORD // and any other required attributes. diff --git a/service/cognitoidentityprovider/api_op_AdminResetUserPassword.go b/service/cognitoidentityprovider/api_op_AdminResetUserPassword.go index d79b85b3290..5fa0c4eaa05 100644 --- a/service/cognitoidentityprovider/api_op_AdminResetUserPassword.go +++ b/service/cognitoidentityprovider/api_op_AdminResetUserPassword.go @@ -63,17 +63,17 @@ type AdminResetUserPasswordInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string } diff --git a/service/cognitoidentityprovider/api_op_AdminRespondToAuthChallenge.go b/service/cognitoidentityprovider/api_op_AdminRespondToAuthChallenge.go index 379da8be351..a98ed5d71bc 100644 --- a/service/cognitoidentityprovider/api_op_AdminRespondToAuthChallenge.go +++ b/service/cognitoidentityprovider/api_op_AdminRespondToAuthChallenge.go @@ -54,20 +54,20 @@ type AdminRespondToAuthChallengeInput struct { // The challenge responses. These are inputs corresponding to the value of // ChallengeName, for example: // - // * SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH - // (if app client is configured with client secret). + // * SMS_MFA: SMS_MFA_CODE, USERNAME, SECRET_HASH (if + // app client is configured with client secret). // - // * PASSWORD_VERIFIER: + // * PASSWORD_VERIFIER: // PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME, // SECRET_HASH (if app client is configured with client secret). // - // * + // * // ADMIN_NO_SRP_AUTH: PASSWORD, USERNAME, SECRET_HASH (if app client is configured // with client secret). // - // * NEW_PASSWORD_REQUIRED: NEW_PASSWORD, any other - // required attributes, USERNAME, SECRET_HASH (if app client is configured with - // client secret). + // * NEW_PASSWORD_REQUIRED: NEW_PASSWORD, any other required + // attributes, USERNAME, SECRET_HASH (if app client is configured with client + // secret). // // The value of the USERNAME attribute must be the user's actual // username, not an alias (such as email address or phone number). To make this @@ -94,17 +94,17 @@ type AdminRespondToAuthChallengeInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // Contextual data such as the user's device fingerprint, IP address, or location diff --git a/service/cognitoidentityprovider/api_op_AdminUpdateUserAttributes.go b/service/cognitoidentityprovider/api_op_AdminUpdateUserAttributes.go index 16a81564407..0e9d9a88f6d 100644 --- a/service/cognitoidentityprovider/api_op_AdminUpdateUserAttributes.go +++ b/service/cognitoidentityprovider/api_op_AdminUpdateUserAttributes.go @@ -65,17 +65,17 @@ type AdminUpdateUserAttributesInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string } diff --git a/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go b/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go index dafcad3d424..e31fe1a7189 100644 --- a/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go +++ b/service/cognitoidentityprovider/api_op_ConfirmForgotPassword.go @@ -71,17 +71,17 @@ type ConfirmForgotPasswordInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // A keyed-hash message authentication code (HMAC) calculated using the secret key diff --git a/service/cognitoidentityprovider/api_op_ConfirmSignUp.go b/service/cognitoidentityprovider/api_op_ConfirmSignUp.go index 2b34fdfecf9..686f0ea8469 100644 --- a/service/cognitoidentityprovider/api_op_ConfirmSignUp.go +++ b/service/cognitoidentityprovider/api_op_ConfirmSignUp.go @@ -64,17 +64,17 @@ type ConfirmSignUpInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // Boolean to be specified to force user confirmation irrespective of existing diff --git a/service/cognitoidentityprovider/api_op_CreateIdentityProvider.go b/service/cognitoidentityprovider/api_op_CreateIdentityProvider.go index 0c3130f5cc2..c1a5276595e 100644 --- a/service/cognitoidentityprovider/api_op_CreateIdentityProvider.go +++ b/service/cognitoidentityprovider/api_op_CreateIdentityProvider.go @@ -32,73 +32,71 @@ type CreateIdentityProviderInput struct { // The identity provider details. The following list describes the provider detail // keys for each identity provider type. // - // * For Google and Login with Amazon: + // * For Google and Login with Amazon: // + // * + // client_id // - // * client_id - // - // * client_secret - // - // * authorize_scopes + // * client_secret // - // * For - // Facebook: - // - // * client_id + // * authorize_scopes // - // * client_secret + // * For Facebook: // - // * - // authorize_scopes + // * client_id // - // * api_version + // * + // client_secret // - // * For Sign in with Apple: + // * authorize_scopes // + // * api_version // - // * client_id + // * For Sign in with Apple: // - // * team_id + // * + // client_id // - // * key_id + // * team_id // - // * private_key + // * key_id // + // * private_key // // * authorize_scopes // - // * For OIDC providers: - // - // * client_id + // * For OIDC + // providers: // - // * - // client_secret + // * client_id // - // * attributes_request_method + // * client_secret // - // * oidc_issuer + // * attributes_request_method // + // * + // oidc_issuer // // * authorize_scopes // - // * authorize_url if not available from discovery URL - // specified by oidc_issuer key + // * authorize_url if not available from discovery + // URL specified by oidc_issuer key // - // * token_url if not available from - // discovery URL specified by oidc_issuer key + // * token_url if not available from discovery + // URL specified by oidc_issuer key // - // * attributes_url if not - // available from discovery URL specified by oidc_issuer key + // * attributes_url if not available from + // discovery URL specified by oidc_issuer key // - // * jwks_uri if - // not available from discovery URL specified by oidc_issuer key + // * jwks_uri if not available from + // discovery URL specified by oidc_issuer key // - // * For SAML - // providers: + // * For SAML providers: // - // * MetadataFile OR MetadataURL + // * + // MetadataFile OR MetadataURL // - // * IDPSignout optional + // * IDPSignout optional // // This member is required. ProviderDetails map[string]*string diff --git a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go index 07441d0efdf..f24936265a4 100644 --- a/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_CreateUserPoolClient.go @@ -74,12 +74,12 @@ type CreateUserPoolClientInput struct { // A list of allowed redirect (callback) URLs for the identity providers. A // redirect URI must: // - // * Be an absolute URI. + // * Be an absolute URI. // - // * Be registered with the + // * Be registered with the // authorization server. // - // * Not include a fragment component. + // * Not include a fragment component. // // See OAuth 2.0 - // Redirection Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon @@ -90,17 +90,17 @@ type CreateUserPoolClientInput struct { // The default redirect URI. Must be in the CallbackURLs list. A redirect URI // must: // - // * Be an absolute URI. + // * Be an absolute URI. // - // * Be registered with the authorization - // server. + // * Be registered with the authorization server. // - // * Not include a fragment component. + // * + // Not include a fragment component. // - // See OAuth 2.0 - Redirection - // Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon Cognito - // requires HTTPS over HTTP except for http://localhost for testing purposes only. - // App callback URLs such as myapp://example are also supported. + // See OAuth 2.0 - Redirection Endpoint + // (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon Cognito requires + // HTTPS over HTTP except for http://localhost for testing purposes only. App + // callback URLs such as myapp://example are also supported. DefaultRedirectURI *string // The authentication flows that are supported by the user pool clients. Flow names @@ -108,25 +108,24 @@ type CreateUserPoolClientInput struct { // prefix. Note that values with ALLOW_ prefix cannot be used along with values // without ALLOW_ prefix. Valid values include: // - // * - // ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication - // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH - // setting. With this authentication flow, Cognito receives the password in the - // request instead of using the SRP (Secure Remote Password protocol) protocol to - // verify passwords. + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: + // Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH. + // This setting replaces the ADMIN_NO_SRP_AUTH setting. With this authentication + // flow, Cognito receives the password in the request instead of using the SRP + // (Secure Remote Password protocol) protocol to verify passwords. // - // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based - // authentication. + // * + // ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. // - // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based - // authentication. In this flow, Cognito receives the password in the request - // instead of using the SRP protocol to verify passwords. + // * + // ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. In this + // flow, Cognito receives the password in the request instead of using the SRP + // protocol to verify passwords. // - // * - // ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // * ALLOW_USER_SRP_AUTH: Enable SRP based + // authentication. // - // * - // ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []types.ExplicitAuthFlowsType // Boolean to specify whether you want to generate a secret for the user pool @@ -150,10 +149,10 @@ type CreateUserPoolClientInput struct { // LEGACY, those APIs will return a UserNotFoundException exception if the user // does not exist in the user pool. Valid values include: // - // * ENABLED - This + // * ENABLED - This // prevents user existence-related errors. // - // * LEGACY - This represents the old + // * LEGACY - This represents the old // behavior of Cognito where user existence related errors are not // prevented. // diff --git a/service/cognitoidentityprovider/api_op_ForgotPassword.go b/service/cognitoidentityprovider/api_op_ForgotPassword.go index 4b6efbdbbbc..5c907b248dd 100644 --- a/service/cognitoidentityprovider/api_op_ForgotPassword.go +++ b/service/cognitoidentityprovider/api_op_ForgotPassword.go @@ -68,17 +68,17 @@ type ForgotPasswordInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // A keyed-hash message authentication code (HMAC) calculated using the secret key diff --git a/service/cognitoidentityprovider/api_op_GetUserAttributeVerificationCode.go b/service/cognitoidentityprovider/api_op_GetUserAttributeVerificationCode.go index 9b1068eaea3..b042bfee759 100644 --- a/service/cognitoidentityprovider/api_op_GetUserAttributeVerificationCode.go +++ b/service/cognitoidentityprovider/api_op_GetUserAttributeVerificationCode.go @@ -57,17 +57,17 @@ type GetUserAttributeVerificationCodeInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string } diff --git a/service/cognitoidentityprovider/api_op_GetUserPoolMfaConfig.go b/service/cognitoidentityprovider/api_op_GetUserPoolMfaConfig.go index 4620b168c84..e499800d5be 100644 --- a/service/cognitoidentityprovider/api_op_GetUserPoolMfaConfig.go +++ b/service/cognitoidentityprovider/api_op_GetUserPoolMfaConfig.go @@ -39,13 +39,13 @@ type GetUserPoolMfaConfigOutput struct { // The multi-factor (MFA) configuration. Valid values include: // - // * OFF MFA will - // not be used for any users. + // * OFF MFA will not + // be used for any users. // - // * ON MFA is required for all users to sign in. + // * ON MFA is required for all users to sign in. // - // - // * OPTIONAL MFA will be required only for individual users who have an MFA factor + // * + // OPTIONAL MFA will be required only for individual users who have an MFA factor // enabled. MfaConfiguration types.UserPoolMfaType diff --git a/service/cognitoidentityprovider/api_op_InitiateAuth.go b/service/cognitoidentityprovider/api_op_InitiateAuth.go index a21647b08ab..c1f75c8c358 100644 --- a/service/cognitoidentityprovider/api_op_InitiateAuth.go +++ b/service/cognitoidentityprovider/api_op_InitiateAuth.go @@ -32,40 +32,39 @@ type InitiateAuthInput struct { // The authentication flow for this call to execute. The API action will depend on // this value. For example: // - // * REFRESH_TOKEN_AUTH will take in a valid refresh + // * REFRESH_TOKEN_AUTH will take in a valid refresh // token and return new tokens. // - // * USER_SRP_AUTH will take in USERNAME and - // SRP_A and return the SRP variables to be used for next challenge execution. + // * USER_SRP_AUTH will take in USERNAME and SRP_A + // and return the SRP variables to be used for next challenge execution. // - // - // * USER_PASSWORD_AUTH will take in USERNAME and PASSWORD and return the next + // * + // USER_PASSWORD_AUTH will take in USERNAME and PASSWORD and return the next // challenge or tokens. // // Valid values include: // - // * USER_SRP_AUTH: Authentication + // * USER_SRP_AUTH: Authentication // flow for the Secure Remote Password (SRP) protocol. // - // * + // * // REFRESH_TOKEN_AUTH/REFRESH_TOKEN: Authentication flow for refreshing the access // token and ID token by supplying a valid refresh token. // - // * CUSTOM_AUTH: - // Custom authentication flow. + // * CUSTOM_AUTH: Custom + // authentication flow. // - // * USER_PASSWORD_AUTH: Non-SRP authentication - // flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda - // trigger is set, this flow will invoke the user migration Lambda if the USERNAME - // is not found in the user pool. + // * USER_PASSWORD_AUTH: Non-SRP authentication flow; + // USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is + // set, this flow will invoke the user migration Lambda if the USERNAME is not + // found in the user pool. // - // * ADMIN_USER_PASSWORD_AUTH: Admin-based user - // password authentication. This replaces the ADMIN_NO_SRP_AUTH authentication - // flow. In this flow, Cognito receives the password in the request instead of - // using the SRP process to verify passwords. + // * ADMIN_USER_PASSWORD_AUTH: Admin-based user password + // authentication. This replaces the ADMIN_NO_SRP_AUTH authentication flow. In this + // flow, Cognito receives the password in the request instead of using the SRP + // process to verify passwords. // - // ADMIN_NO_SRP_AUTH is not a valid - // value. + // ADMIN_NO_SRP_AUTH is not a valid value. // // This member is required. AuthFlow types.AuthFlowType @@ -82,16 +81,16 @@ type InitiateAuthInput struct { // The authentication parameters. These are inputs corresponding to the AuthFlow // that you are invoking. The required values depend on the value of AuthFlow: // + // * + // For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH (required + // if the app client is configured with a client secret), DEVICE_KEY. // - // * For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH - // (required if the app client is configured with a client secret), DEVICE_KEY. - // - // - // * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH + // * For + // REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH // (required if the app client is configured with a client secret), DEVICE_KEY. // - // - // * For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured + // * + // For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is configured // with client secret), DEVICE_KEY. To start the authentication flow with password // verification, include ChallengeName: SRP_A and SRP_A: (The SRP_A Value). AuthParameters map[string]*string @@ -103,15 +102,15 @@ type InitiateAuthInput struct { // various triggers. The ClientMetadata value is passed as input to the functions // for only the following triggers: // - // * Pre signup - // - // * Pre authentication + // * Pre signup // + // * Pre authentication // - // * User migration + // * User + // migration // - // When Amazon Cognito invokes the functions for these triggers, - // it passes a JSON payload, which the function receives as input. This payload + // When Amazon Cognito invokes the functions for these triggers, it + // passes a JSON payload, which the function receives as input. This payload // contains a validationData attribute, which provides the data that you assigned // to the ClientMetadata parameter in your InitiateAuth request. In your function // code in AWS Lambda, you can process the validationData value to enhance your @@ -119,37 +118,37 @@ type InitiateAuthInput struct { // Amazon Cognito also invokes the functions for the following triggers, but it // does not provide the ClientMetadata value as input: // - // * Post authentication - // + // * Post authentication // - // * Custom message + // * + // Custom message // - // * Pre token generation + // * Pre token generation // - // * Create auth challenge + // * Create auth challenge // - // * - // Define auth challenge + // * Define auth + // challenge // - // * Verify auth challenge + // * Verify auth challenge // - // For more information, see - // Customizing User Pool Workflows with Lambda Triggers + // For more information, see Customizing User + // Pool Workflows with Lambda Triggers // (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html) // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // Contextual data such as the user's device fingerprint, IP address, or location @@ -172,27 +171,27 @@ type InitiateAuthOutput struct { // challenge. Valid values include the following. Note that all of these challenges // require USERNAME and SECRET_HASH (if applicable) in the parameters. // - // * - // SMS_MFA: Next challenge is to supply an SMS_MFA_CODE, delivered via SMS. + // * SMS_MFA: + // Next challenge is to supply an SMS_MFA_CODE, delivered via SMS. // - // * + // * // PASSWORD_VERIFIER: Next challenge is to supply PASSWORD_CLAIM_SIGNATURE, // PASSWORD_CLAIM_SECRET_BLOCK, and TIMESTAMP after the client-side SRP // calculations. // - // * CUSTOM_CHALLENGE: This is returned if your custom + // * CUSTOM_CHALLENGE: This is returned if your custom // authentication flow determines that the user should pass another challenge // before tokens are issued. // - // * DEVICE_SRP_AUTH: If device tracking was enabled - // on your user pool and the previous challenges were passed, this challenge is + // * DEVICE_SRP_AUTH: If device tracking was enabled on + // your user pool and the previous challenges were passed, this challenge is // returned so that Amazon Cognito can start tracking this device. // - // * + // * // DEVICE_PASSWORD_VERIFIER: Similar to PASSWORD_VERIFIER, but for devices only. // - // - // * NEW_PASSWORD_REQUIRED: For users which are required to change their passwords + // * + // NEW_PASSWORD_REQUIRED: For users which are required to change their passwords // after successful first login. This challenge should be passed with NEW_PASSWORD // and any other required attributes. ChallengeName types.ChallengeNameType diff --git a/service/cognitoidentityprovider/api_op_ListUsers.go b/service/cognitoidentityprovider/api_op_ListUsers.go index c5a163bc082..d864909fc8e 100644 --- a/service/cognitoidentityprovider/api_op_ListUsers.go +++ b/service/cognitoidentityprovider/api_op_ListUsers.go @@ -44,47 +44,46 @@ type ListUsersInput struct { // Quotation marks within the filter string must be escaped using the backslash (\) // character. For example, "family_name = \"Reddy\"". // - // * AttributeName: The - // name of the attribute to search for. You can only search for one attribute at a - // time. + // * AttributeName: The name of + // the attribute to search for. You can only search for one attribute at a time. // - // * Filter-Type: For an exact match, use =, for example, "given_name = - // \"Jon\"". For a prefix ("starts with") match, use ^=, for example, "given_name - // ^= \"Jon\"". + // * + // Filter-Type: For an exact match, use =, for example, "given_name = \"Jon\"". For + // a prefix ("starts with") match, use ^=, for example, "given_name ^= \"Jon\"". // - // * AttributeValue: The attribute value that must be matched for - // each user. + // * + // AttributeValue: The attribute value that must be matched for each user. // - // If the filter string is empty, ListUsers returns all users in the - // user pool. You can only search for the following standard attributes: + // If the + // filter string is empty, ListUsers returns all users in the user pool. You can + // only search for the following standard attributes: // - // * - // username (case-sensitive) + // * username + // (case-sensitive) // - // * email + // * email // - // * phone_number + // * phone_number // - // * name + // * name // - // * - // given_name + // * given_name // - // * family_name + // * + // family_name // - // * preferred_username + // * preferred_username // - // * - // cognito:user_status (called Status in the Console) (case-insensitive) + // * cognito:user_status (called Status in the + // Console) (case-insensitive) // - // * - // status (called Enabled in the Console) (case-sensitive) + // * status (called Enabled in the Console) + // (case-sensitive) // - // * sub + // * sub // - // Custom - // attributes are not searchable. For more information, see Searching for Users - // Using the ListUsers API + // Custom attributes are not searchable. For more + // information, see Searching for Users Using the ListUsers API // (https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-manage-user-accounts.html#cognito-user-pools-searching-for-users-using-listusers-api) // and Examples of Using the ListUsers API // (https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-manage-user-accounts.html#cognito-user-pools-searching-for-users-listusers-api-examples) diff --git a/service/cognitoidentityprovider/api_op_ResendConfirmationCode.go b/service/cognitoidentityprovider/api_op_ResendConfirmationCode.go index f7d8db4997d..831e559b2b4 100644 --- a/service/cognitoidentityprovider/api_op_ResendConfirmationCode.go +++ b/service/cognitoidentityprovider/api_op_ResendConfirmationCode.go @@ -59,17 +59,17 @@ type ResendConfirmationCodeInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // A keyed-hash message authentication code (HMAC) calculated using the secret key diff --git a/service/cognitoidentityprovider/api_op_RespondToAuthChallenge.go b/service/cognitoidentityprovider/api_op_RespondToAuthChallenge.go index d5caac8eddb..70a0440b6b6 100644 --- a/service/cognitoidentityprovider/api_op_RespondToAuthChallenge.go +++ b/service/cognitoidentityprovider/api_op_RespondToAuthChallenge.go @@ -49,24 +49,24 @@ type RespondToAuthChallengeInput struct { // ChallengeName, for example: SECRET_HASH (if app client is configured with client // secret) applies to all inputs below (including SOFTWARE_TOKEN_MFA). // - // * - // SMS_MFA: SMS_MFA_CODE, USERNAME. + // * SMS_MFA: + // SMS_MFA_CODE, USERNAME. // - // * PASSWORD_VERIFIER: - // PASSWORD_CLAIM_SIGNATURE, PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME. + // * PASSWORD_VERIFIER: PASSWORD_CLAIM_SIGNATURE, + // PASSWORD_CLAIM_SECRET_BLOCK, TIMESTAMP, USERNAME. // + // * NEW_PASSWORD_REQUIRED: + // NEW_PASSWORD, any other required attributes, USERNAME. // - // * NEW_PASSWORD_REQUIRED: NEW_PASSWORD, any other required attributes, - // USERNAME. + // * SOFTWARE_TOKEN_MFA: + // USERNAME and SOFTWARE_TOKEN_MFA_CODE are required attributes. // - // * SOFTWARE_TOKEN_MFA: USERNAME and SOFTWARE_TOKEN_MFA_CODE are - // required attributes. + // * DEVICE_SRP_AUTH + // requires USERNAME, DEVICE_KEY, SRP_A (and SECRET_HASH). // - // * DEVICE_SRP_AUTH requires USERNAME, DEVICE_KEY, SRP_A - // (and SECRET_HASH). - // - // * DEVICE_PASSWORD_VERIFIER requires everything that - // PASSWORD_VERIFIER requires plus DEVICE_KEY. + // * + // DEVICE_PASSWORD_VERIFIER requires everything that PASSWORD_VERIFIER requires + // plus DEVICE_KEY. ChallengeResponses map[string]*string // A map of custom key-value pairs that you can provide as input for any custom @@ -86,17 +86,17 @@ type RespondToAuthChallengeInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // The session which should be passed both ways in challenge-response calls to the diff --git a/service/cognitoidentityprovider/api_op_SetUserPoolMfaConfig.go b/service/cognitoidentityprovider/api_op_SetUserPoolMfaConfig.go index 1f8abb077f5..0214ea978df 100644 --- a/service/cognitoidentityprovider/api_op_SetUserPoolMfaConfig.go +++ b/service/cognitoidentityprovider/api_op_SetUserPoolMfaConfig.go @@ -36,13 +36,13 @@ type SetUserPoolMfaConfigInput struct { // The MFA configuration. Valid values include: // - // * OFF MFA will not be used for - // any users. + // * OFF MFA will not be used for any + // users. // - // * ON MFA is required for all users to sign in. + // * ON MFA is required for all users to sign in. // - // * OPTIONAL - // MFA will be required only for individual users who have an MFA factor enabled. + // * OPTIONAL MFA will be + // required only for individual users who have an MFA factor enabled. MfaConfiguration types.UserPoolMfaType // The SMS text message MFA configuration. @@ -56,13 +56,13 @@ type SetUserPoolMfaConfigOutput struct { // The MFA configuration. Valid values include: // - // * OFF MFA will not be used for - // any users. + // * OFF MFA will not be used for any + // users. // - // * ON MFA is required for all users to sign in. + // * ON MFA is required for all users to sign in. // - // * OPTIONAL - // MFA will be required only for individual users who have an MFA factor enabled. + // * OPTIONAL MFA will be + // required only for individual users who have an MFA factor enabled. MfaConfiguration types.UserPoolMfaType // The SMS text message MFA configuration. diff --git a/service/cognitoidentityprovider/api_op_SignUp.go b/service/cognitoidentityprovider/api_op_SignUp.go index 00bc1300900..dace397dcf9 100644 --- a/service/cognitoidentityprovider/api_op_SignUp.go +++ b/service/cognitoidentityprovider/api_op_SignUp.go @@ -63,17 +63,17 @@ type SignUpInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string // A keyed-hash message authentication code (HMAC) calculated using the secret key diff --git a/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go b/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go index 8ec7e6c81e2..0e0c217eed1 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserAttributes.go @@ -55,17 +55,17 @@ type UpdateUserAttributesInput struct { // in the Amazon Cognito Developer Guide. Take the following limitations into // consideration when you use the ClientMetadata parameter: // - // * Amazon Cognito - // does not store the ClientMetadata value. This data is available only to AWS - // Lambda triggers that are assigned to a user pool to support custom workflows. If - // your user pool configuration does not include triggers, the ClientMetadata - // parameter serves no purpose. + // * Amazon Cognito does + // not store the ClientMetadata value. This data is available only to AWS Lambda + // triggers that are assigned to a user pool to support custom workflows. If your + // user pool configuration does not include triggers, the ClientMetadata parameter + // serves no purpose. // - // * Amazon Cognito does not validate the - // ClientMetadata value. + // * Amazon Cognito does not validate the ClientMetadata + // value. // - // * Amazon Cognito does not encrypt the the - // ClientMetadata value, so don't use it to provide sensitive information. + // * Amazon Cognito does not encrypt the the ClientMetadata value, so don't + // use it to provide sensitive information. ClientMetadata map[string]*string } diff --git a/service/cognitoidentityprovider/api_op_UpdateUserPool.go b/service/cognitoidentityprovider/api_op_UpdateUserPool.go index 830650a8109..28befc8c8f5 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserPool.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserPool.go @@ -73,15 +73,15 @@ type UpdateUserPoolInput struct { // Can be one of the following values: // - // * OFF - MFA tokens are not required and + // * OFF - MFA tokens are not required and // cannot be specified during user registration. // - // * ON - MFA tokens are - // required for all user registrations. You can only specify required when you are - // initially creating a user pool. + // * ON - MFA tokens are required + // for all user registrations. You can only specify required when you are initially + // creating a user pool. // - // * OPTIONAL - Users have the option when - // registering to create an MFA token. + // * OPTIONAL - Users have the option when registering to + // create an MFA token. MfaConfiguration types.UserPoolMfaType // A container with the policies you wish to update in a user pool. diff --git a/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go b/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go index b489b76568d..5969bf9c01f 100644 --- a/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go +++ b/service/cognitoidentityprovider/api_op_UpdateUserPoolClient.go @@ -79,12 +79,12 @@ type UpdateUserPoolClientInput struct { // A list of allowed redirect (callback) URLs for the identity providers. A // redirect URI must: // - // * Be an absolute URI. + // * Be an absolute URI. // - // * Be registered with the + // * Be registered with the // authorization server. // - // * Not include a fragment component. + // * Not include a fragment component. // // See OAuth 2.0 - // Redirection Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon @@ -98,17 +98,17 @@ type UpdateUserPoolClientInput struct { // The default redirect URI. Must be in the CallbackURLs list. A redirect URI // must: // - // * Be an absolute URI. + // * Be an absolute URI. // - // * Be registered with the authorization - // server. + // * Be registered with the authorization server. // - // * Not include a fragment component. + // * + // Not include a fragment component. // - // See OAuth 2.0 - Redirection - // Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon Cognito - // requires HTTPS over HTTP except for http://localhost for testing purposes only. - // App callback URLs such as myapp://example are also supported. + // See OAuth 2.0 - Redirection Endpoint + // (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon Cognito requires + // HTTPS over HTTP except for http://localhost for testing purposes only. App + // callback URLs such as myapp://example are also supported. DefaultRedirectURI *string // The authentication flows that are supported by the user pool clients. Flow names @@ -116,25 +116,24 @@ type UpdateUserPoolClientInput struct { // prefix. Note that values with ALLOW_ prefix cannot be used along with values // without ALLOW_ prefix. Valid values include: // - // * - // ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication - // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH - // setting. With this authentication flow, Cognito receives the password in the - // request instead of using the SRP (Secure Remote Password protocol) protocol to - // verify passwords. + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: + // Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH. + // This setting replaces the ADMIN_NO_SRP_AUTH setting. With this authentication + // flow, Cognito receives the password in the request instead of using the SRP + // (Secure Remote Password protocol) protocol to verify passwords. // - // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based - // authentication. + // * + // ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. // - // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based - // authentication. In this flow, Cognito receives the password in the request - // instead of using the SRP protocol to verify passwords. + // * + // ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. In this + // flow, Cognito receives the password in the request instead of using the SRP + // protocol to verify passwords. // - // * - // ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // * ALLOW_USER_SRP_AUTH: Enable SRP based + // authentication. // - // * - // ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []types.ExplicitAuthFlowsType // The time limit, after which the ID token is no longer valid and cannot be used. @@ -152,10 +151,10 @@ type UpdateUserPoolClientInput struct { // LEGACY, those APIs will return a UserNotFoundException exception if the user // does not exist in the user pool. Valid values include: // - // * ENABLED - This + // * ENABLED - This // prevents user existence-related errors. // - // * LEGACY - This represents the old + // * LEGACY - This represents the old // behavior of Cognito where user existence related errors are not // prevented. // diff --git a/service/cognitoidentityprovider/types/enums.go b/service/cognitoidentityprovider/types/enums.go index 0ac915f6d17..faaf3d88458 100644 --- a/service/cognitoidentityprovider/types/enums.go +++ b/service/cognitoidentityprovider/types/enums.go @@ -6,10 +6,10 @@ type AccountTakeoverEventActionType string // Enum values for AccountTakeoverEventActionType const ( - AccountTakeoverEventActionTypeBlock AccountTakeoverEventActionType = "BLOCK" - AccountTakeoverEventActionTypeMfa_if_configured AccountTakeoverEventActionType = "MFA_IF_CONFIGURED" - AccountTakeoverEventActionTypeMfa_required AccountTakeoverEventActionType = "MFA_REQUIRED" - AccountTakeoverEventActionTypeNo_action AccountTakeoverEventActionType = "NO_ACTION" + AccountTakeoverEventActionTypeBlock AccountTakeoverEventActionType = "BLOCK" + AccountTakeoverEventActionTypeMfaIfConfigured AccountTakeoverEventActionType = "MFA_IF_CONFIGURED" + AccountTakeoverEventActionTypeMfaRequired AccountTakeoverEventActionType = "MFA_REQUIRED" + AccountTakeoverEventActionTypeNoAction AccountTakeoverEventActionType = "NO_ACTION" ) // Values returns all known values for AccountTakeoverEventActionType. Note that @@ -49,9 +49,9 @@ type AliasAttributeType string // Enum values for AliasAttributeType const ( - AliasAttributeTypePhone_number AliasAttributeType = "phone_number" - AliasAttributeTypeEmail AliasAttributeType = "email" - AliasAttributeTypePreferred_username AliasAttributeType = "preferred_username" + AliasAttributeTypePhoneNumber AliasAttributeType = "phone_number" + AliasAttributeTypeEmail AliasAttributeType = "email" + AliasAttributeTypePreferredUsername AliasAttributeType = "preferred_username" ) // Values returns all known values for AliasAttributeType. Note that this can be @@ -91,13 +91,13 @@ type AuthFlowType string // Enum values for AuthFlowType const ( - AuthFlowTypeUser_srp_auth AuthFlowType = "USER_SRP_AUTH" - AuthFlowTypeRefresh_token_auth AuthFlowType = "REFRESH_TOKEN_AUTH" - AuthFlowTypeRefresh_token AuthFlowType = "REFRESH_TOKEN" - AuthFlowTypeCustom_auth AuthFlowType = "CUSTOM_AUTH" - AuthFlowTypeAdmin_no_srp_auth AuthFlowType = "ADMIN_NO_SRP_AUTH" - AuthFlowTypeUser_password_auth AuthFlowType = "USER_PASSWORD_AUTH" - AuthFlowTypeAdmin_user_password_auth AuthFlowType = "ADMIN_USER_PASSWORD_AUTH" + AuthFlowTypeUserSrpAuth AuthFlowType = "USER_SRP_AUTH" + AuthFlowTypeRefreshTokenAuth AuthFlowType = "REFRESH_TOKEN_AUTH" + AuthFlowTypeRefreshToken AuthFlowType = "REFRESH_TOKEN" + AuthFlowTypeCustomAuth AuthFlowType = "CUSTOM_AUTH" + AuthFlowTypeAdminNoSrpAuth AuthFlowType = "ADMIN_NO_SRP_AUTH" + AuthFlowTypeUserPasswordAuth AuthFlowType = "USER_PASSWORD_AUTH" + AuthFlowTypeAdminUserPasswordAuth AuthFlowType = "ADMIN_USER_PASSWORD_AUTH" ) // Values returns all known values for AuthFlowType. Note that this can be expanded @@ -137,16 +137,16 @@ type ChallengeNameType string // Enum values for ChallengeNameType const ( - ChallengeNameTypeSms_mfa ChallengeNameType = "SMS_MFA" - ChallengeNameTypeSoftware_token_mfa ChallengeNameType = "SOFTWARE_TOKEN_MFA" - ChallengeNameTypeSelect_mfa_type ChallengeNameType = "SELECT_MFA_TYPE" - ChallengeNameTypeMfa_setup ChallengeNameType = "MFA_SETUP" - ChallengeNameTypePassword_verifier ChallengeNameType = "PASSWORD_VERIFIER" - ChallengeNameTypeCustom_challenge ChallengeNameType = "CUSTOM_CHALLENGE" - ChallengeNameTypeDevice_srp_auth ChallengeNameType = "DEVICE_SRP_AUTH" - ChallengeNameTypeDevice_password_verifier ChallengeNameType = "DEVICE_PASSWORD_VERIFIER" - ChallengeNameTypeAdmin_no_srp_auth ChallengeNameType = "ADMIN_NO_SRP_AUTH" - ChallengeNameTypeNew_password_required ChallengeNameType = "NEW_PASSWORD_REQUIRED" + ChallengeNameTypeSmsMfa ChallengeNameType = "SMS_MFA" + ChallengeNameTypeSoftwareTokenMfa ChallengeNameType = "SOFTWARE_TOKEN_MFA" + ChallengeNameTypeSelectMfaType ChallengeNameType = "SELECT_MFA_TYPE" + ChallengeNameTypeMfaSetup ChallengeNameType = "MFA_SETUP" + ChallengeNameTypePasswordVerifier ChallengeNameType = "PASSWORD_VERIFIER" + ChallengeNameTypeCustomChallenge ChallengeNameType = "CUSTOM_CHALLENGE" + ChallengeNameTypeDeviceSrpAuth ChallengeNameType = "DEVICE_SRP_AUTH" + ChallengeNameTypeDevicePasswordVerifier ChallengeNameType = "DEVICE_PASSWORD_VERIFIER" + ChallengeNameTypeAdminNoSrpAuth ChallengeNameType = "ADMIN_NO_SRP_AUTH" + ChallengeNameTypeNewPasswordRequired ChallengeNameType = "NEW_PASSWORD_REQUIRED" ) // Values returns all known values for ChallengeNameType. Note that this can be @@ -189,8 +189,8 @@ type CompromisedCredentialsEventActionType string // Enum values for CompromisedCredentialsEventActionType const ( - CompromisedCredentialsEventActionTypeBlock CompromisedCredentialsEventActionType = "BLOCK" - CompromisedCredentialsEventActionTypeNo_action CompromisedCredentialsEventActionType = "NO_ACTION" + CompromisedCredentialsEventActionTypeBlock CompromisedCredentialsEventActionType = "BLOCK" + CompromisedCredentialsEventActionTypeNoAction CompromisedCredentialsEventActionType = "NO_ACTION" ) // Values returns all known values for CompromisedCredentialsEventActionType. Note @@ -208,8 +208,8 @@ type DefaultEmailOptionType string // Enum values for DefaultEmailOptionType const ( - DefaultEmailOptionTypeConfirm_with_link DefaultEmailOptionType = "CONFIRM_WITH_LINK" - DefaultEmailOptionTypeConfirm_with_code DefaultEmailOptionType = "CONFIRM_WITH_CODE" + DefaultEmailOptionTypeConfirmWithLink DefaultEmailOptionType = "CONFIRM_WITH_LINK" + DefaultEmailOptionTypeConfirmWithCode DefaultEmailOptionType = "CONFIRM_WITH_CODE" ) // Values returns all known values for DefaultEmailOptionType. Note that this can @@ -244,8 +244,8 @@ type DeviceRememberedStatusType string // Enum values for DeviceRememberedStatusType const ( - DeviceRememberedStatusTypeRemembered DeviceRememberedStatusType = "remembered" - DeviceRememberedStatusTypeNot_remembered DeviceRememberedStatusType = "not_remembered" + DeviceRememberedStatusTypeRemembered DeviceRememberedStatusType = "remembered" + DeviceRememberedStatusTypeNotRemembered DeviceRememberedStatusType = "not_remembered" ) // Values returns all known values for DeviceRememberedStatusType. Note that this @@ -286,8 +286,8 @@ type EmailSendingAccountType string // Enum values for EmailSendingAccountType const ( - EmailSendingAccountTypeCognito_default EmailSendingAccountType = "COGNITO_DEFAULT" - EmailSendingAccountTypeDeveloper EmailSendingAccountType = "DEVELOPER" + EmailSendingAccountTypeCognitoDefault EmailSendingAccountType = "COGNITO_DEFAULT" + EmailSendingAccountTypeDeveloper EmailSendingAccountType = "DEVELOPER" ) // Values returns all known values for EmailSendingAccountType. Note that this can @@ -304,9 +304,9 @@ type EventFilterType string // Enum values for EventFilterType const ( - EventFilterTypeSign_in EventFilterType = "SIGN_IN" - EventFilterTypePassword_change EventFilterType = "PASSWORD_CHANGE" - EventFilterTypeSign_up EventFilterType = "SIGN_UP" + EventFilterTypeSignIn EventFilterType = "SIGN_IN" + EventFilterTypePasswordChange EventFilterType = "PASSWORD_CHANGE" + EventFilterTypeSignUp EventFilterType = "SIGN_UP" ) // Values returns all known values for EventFilterType. Note that this can be @@ -362,14 +362,14 @@ type ExplicitAuthFlowsType string // Enum values for ExplicitAuthFlowsType const ( - ExplicitAuthFlowsTypeAdmin_no_srp_auth ExplicitAuthFlowsType = "ADMIN_NO_SRP_AUTH" - ExplicitAuthFlowsTypeCustom_auth_flow_only ExplicitAuthFlowsType = "CUSTOM_AUTH_FLOW_ONLY" - ExplicitAuthFlowsTypeUser_password_auth ExplicitAuthFlowsType = "USER_PASSWORD_AUTH" - ExplicitAuthFlowsTypeAllow_admin_user_password_auth ExplicitAuthFlowsType = "ALLOW_ADMIN_USER_PASSWORD_AUTH" - ExplicitAuthFlowsTypeAllow_custom_auth ExplicitAuthFlowsType = "ALLOW_CUSTOM_AUTH" - ExplicitAuthFlowsTypeAllow_user_password_auth ExplicitAuthFlowsType = "ALLOW_USER_PASSWORD_AUTH" - ExplicitAuthFlowsTypeAllow_user_srp_auth ExplicitAuthFlowsType = "ALLOW_USER_SRP_AUTH" - ExplicitAuthFlowsTypeAllow_refresh_token_auth ExplicitAuthFlowsType = "ALLOW_REFRESH_TOKEN_AUTH" + ExplicitAuthFlowsTypeAdminNoSrpAuth ExplicitAuthFlowsType = "ADMIN_NO_SRP_AUTH" + ExplicitAuthFlowsTypeCustomAuthFlowOnly ExplicitAuthFlowsType = "CUSTOM_AUTH_FLOW_ONLY" + ExplicitAuthFlowsTypeUserPasswordAuth ExplicitAuthFlowsType = "USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowAdminUserPasswordAuth ExplicitAuthFlowsType = "ALLOW_ADMIN_USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowCustomAuth ExplicitAuthFlowsType = "ALLOW_CUSTOM_AUTH" + ExplicitAuthFlowsTypeAllowUserPasswordAuth ExplicitAuthFlowsType = "ALLOW_USER_PASSWORD_AUTH" + ExplicitAuthFlowsTypeAllowUserSrpAuth ExplicitAuthFlowsType = "ALLOW_USER_SRP_AUTH" + ExplicitAuthFlowsTypeAllowRefreshTokenAuth ExplicitAuthFlowsType = "ALLOW_REFRESH_TOKEN_AUTH" ) // Values returns all known values for ExplicitAuthFlowsType. Note that this can be @@ -454,9 +454,9 @@ type OAuthFlowType string // Enum values for OAuthFlowType const ( - OAuthFlowTypeCode OAuthFlowType = "code" - OAuthFlowTypeImplicit OAuthFlowType = "implicit" - OAuthFlowTypeClient_credentials OAuthFlowType = "client_credentials" + OAuthFlowTypeCode OAuthFlowType = "code" + OAuthFlowTypeImplicit OAuthFlowType = "implicit" + OAuthFlowTypeClientCredentials OAuthFlowType = "client_credentials" ) // Values returns all known values for OAuthFlowType. Note that this can be @@ -493,9 +493,9 @@ type RecoveryOptionNameType string // Enum values for RecoveryOptionNameType const ( - RecoveryOptionNameTypeVerified_email RecoveryOptionNameType = "verified_email" - RecoveryOptionNameTypeVerified_phone_number RecoveryOptionNameType = "verified_phone_number" - RecoveryOptionNameTypeAdmin_only RecoveryOptionNameType = "admin_only" + RecoveryOptionNameTypeVerifiedEmail RecoveryOptionNameType = "verified_email" + RecoveryOptionNameTypeVerifiedPhoneNumber RecoveryOptionNameType = "verified_phone_number" + RecoveryOptionNameTypeAdminOnly RecoveryOptionNameType = "admin_only" ) // Values returns all known values for RecoveryOptionNameType. Note that this can @@ -623,8 +623,8 @@ type UsernameAttributeType string // Enum values for UsernameAttributeType const ( - UsernameAttributeTypePhone_number UsernameAttributeType = "phone_number" - UsernameAttributeTypeEmail UsernameAttributeType = "email" + UsernameAttributeTypePhoneNumber UsernameAttributeType = "phone_number" + UsernameAttributeTypeEmail UsernameAttributeType = "email" ) // Values returns all known values for UsernameAttributeType. Note that this can be @@ -661,13 +661,13 @@ type UserStatusType string // Enum values for UserStatusType const ( - UserStatusTypeUnconfirmed UserStatusType = "UNCONFIRMED" - UserStatusTypeConfirmed UserStatusType = "CONFIRMED" - UserStatusTypeArchived UserStatusType = "ARCHIVED" - UserStatusTypeCompromised UserStatusType = "COMPROMISED" - UserStatusTypeUnknown UserStatusType = "UNKNOWN" - UserStatusTypeReset_required UserStatusType = "RESET_REQUIRED" - UserStatusTypeForce_change_password UserStatusType = "FORCE_CHANGE_PASSWORD" + UserStatusTypeUnconfirmed UserStatusType = "UNCONFIRMED" + UserStatusTypeConfirmed UserStatusType = "CONFIRMED" + UserStatusTypeArchived UserStatusType = "ARCHIVED" + UserStatusTypeCompromised UserStatusType = "COMPROMISED" + UserStatusTypeUnknown UserStatusType = "UNKNOWN" + UserStatusTypeResetRequired UserStatusType = "RESET_REQUIRED" + UserStatusTypeForceChangePassword UserStatusType = "FORCE_CHANGE_PASSWORD" ) // Values returns all known values for UserStatusType. Note that this can be @@ -689,8 +689,8 @@ type VerifiedAttributeType string // Enum values for VerifiedAttributeType const ( - VerifiedAttributeTypePhone_number VerifiedAttributeType = "phone_number" - VerifiedAttributeTypeEmail VerifiedAttributeType = "email" + VerifiedAttributeTypePhoneNumber VerifiedAttributeType = "phone_number" + VerifiedAttributeTypeEmail VerifiedAttributeType = "email" ) // Values returns all known values for VerifiedAttributeType. Note that this can be diff --git a/service/cognitoidentityprovider/types/types.go b/service/cognitoidentityprovider/types/types.go index 4ade6b8298c..5fc3db40b7a 100644 --- a/service/cognitoidentityprovider/types/types.go +++ b/service/cognitoidentityprovider/types/types.go @@ -31,16 +31,16 @@ type AccountTakeoverActionType struct { // The event action. // - // * BLOCK Choosing this action will block the request. + // * BLOCK Choosing this action will block the request. // + // * + // MFA_IF_CONFIGURED Throw MFA challenge if user has configured it, else allow the + // request. // - // * MFA_IF_CONFIGURED Throw MFA challenge if user has configured it, else allow - // the request. + // * MFA_REQUIRED Throw MFA challenge if user has configured it, else + // block the request. // - // * MFA_REQUIRED Throw MFA challenge if user has configured it, - // else block the request. - // - // * NO_ACTION Allow the user sign-in. + // * NO_ACTION Allow the user sign-in. // // This member is required. EventAction AccountTakeoverEventActionType @@ -352,15 +352,15 @@ type EmailConfigurationType struct { // that configuration set are applied to the email. Configuration sets can be used // to apply the following types of rules to emails: // - // * Event publishing – - // Amazon SES can track the number of send, delivery, open, click, bounce, and - // complaint events for each email sent. Use event publishing to send information - // about these events to other AWS services such as SNS and CloudWatch. + // * Event publishing – Amazon + // SES can track the number of send, delivery, open, click, bounce, and complaint + // events for each email sent. Use event publishing to send information about these + // events to other AWS services such as SNS and CloudWatch. // - // * IP - // pool management – When leasing dedicated IP addresses with Amazon SES, you can - // create groups of IP addresses, called dedicated IP pools. You can then associate - // the dedicated IP pools with configuration sets. + // * IP pool management – + // When leasing dedicated IP addresses with Amazon SES, you can create groups of IP + // addresses, called dedicated IP pools. You can then associate the dedicated IP + // pools with configuration sets. ConfigurationSet *string // Specifies whether Amazon Cognito emails your users by using its built-in email @@ -404,13 +404,13 @@ type EmailConfigurationType struct { // email address is used in one of the following ways, depending on the value that // you specify for the EmailSendingAccount parameter: // - // * If you specify + // * If you specify // COGNITO_DEFAULT, Amazon Cognito uses this address as the custom FROM address // when it emails your users by using its built-in email account. // - // * If you - // specify DEVELOPER, Amazon Cognito emails your users with this address by calling - // Amazon SES on your behalf. + // * If you specify + // DEVELOPER, Amazon Cognito emails your users with this address by calling Amazon + // SES on your behalf. SourceArn *string } @@ -529,76 +529,73 @@ type IdentityProviderType struct { // The identity provider details. The following list describes the provider detail // keys for each identity provider type. // - // * For Google and Login with Amazon: - // - // - // * client_id - // - // * client_secret + // * For Google and Login with Amazon: // - // * authorize_scopes + // * + // client_id // - // * For - // Facebook: + // * client_secret // - // * client_id + // * authorize_scopes // - // * client_secret + // * For Facebook: // - // * - // authorize_scopes + // * client_id // - // * api_version + // * + // client_secret // - // * For Sign in with Apple: + // * authorize_scopes // + // * api_version // - // * client_id + // * For Sign in with Apple: // - // * team_id + // * + // client_id // - // * key_id + // * team_id // - // * private_key + // * key_id // + // * private_key // // * authorize_scopes // - // * For OIDC providers: + // * For OIDC + // providers: // - // * client_id - // - // * - // client_secret + // * client_id // - // * attributes_request_method + // * client_secret // - // * oidc_issuer + // * attributes_request_method // + // * + // oidc_issuer // // * authorize_scopes // - // * authorize_url if not available from discovery URL - // specified by oidc_issuer key + // * authorize_url if not available from discovery + // URL specified by oidc_issuer key // - // * token_url if not available from - // discovery URL specified by oidc_issuer key + // * token_url if not available from discovery + // URL specified by oidc_issuer key // - // * attributes_url if not - // available from discovery URL specified by oidc_issuer key + // * attributes_url if not available from + // discovery URL specified by oidc_issuer key // - // * jwks_uri if - // not available from discovery URL specified by oidc_issuer key + // * jwks_uri if not available from + // discovery URL specified by oidc_issuer key // - // * - // authorize_scopes + // * authorize_scopes // - // * For SAML providers: + // * For SAML + // providers: // - // * MetadataFile OR - // MetadataURL + // * MetadataFile OR MetadataURL // - // * IDPSignOut optional + // * IDPSignOut optional ProviderDetails map[string]*string // The identity provider name. @@ -1083,30 +1080,29 @@ type UserImportJobType struct { // The status of the user import job. One of the following: // - // * Created - The - // job was created but not started. + // * Created - The job + // was created but not started. // - // * Pending - A transition state. You have - // started the job, but it has not begun importing users yet. + // * Pending - A transition state. You have started + // the job, but it has not begun importing users yet. // - // * InProgress - - // The job has started, and users are being imported. + // * InProgress - The job has + // started, and users are being imported. // - // * Stopping - You have - // stopped the job, but the job has not stopped importing users yet. + // * Stopping - You have stopped the job, + // but the job has not stopped importing users yet. // - // * Stopped - // - You have stopped the job, and the job has stopped importing users. + // * Stopped - You have stopped + // the job, and the job has stopped importing users. // - // * - // Succeeded - The job has completed successfully. + // * Succeeded - The job has + // completed successfully. // - // * Failed - The job has - // stopped due to an error. + // * Failed - The job has stopped due to an error. // - // * Expired - You created a job, but did not start - // the job within 24-48 hours. All data associated with the job was deleted, and - // the job cannot be started. + // * + // Expired - You created a job, but did not start the job within 24-48 hours. All + // data associated with the job was deleted, and the job cannot be started. Status UserImportJobStatusType // The user pool ID for the user pool that the users are being imported into. @@ -1119,16 +1115,16 @@ type UsernameConfigurationType struct { // Specifies whether username case sensitivity will be applied for all users in the // user pool through Cognito APIs. Valid values include: // - // * True : Enables case + // * True : Enables case // sensitivity for all username input. When this option is set to True, users must // sign in using the exact capitalization of their given username. For example, // “UserName”. This is the default value. // - // * False : Enables case insensitivity - // for all username input. For example, when this option is set to False, users - // will be able to sign in using either "username" or "Username". This option also - // enables both preferred_username and email alias to be case insensitive, in - // addition to the username attribute. + // * False : Enables case insensitivity for + // all username input. For example, when this option is set to False, users will be + // able to sign in using either "username" or "Username". This option also enables + // both preferred_username and email alias to be case insensitive, in addition to + // the username attribute. // // This member is required. CaseSensitive *bool @@ -1192,12 +1188,12 @@ type UserPoolClientType struct { // A list of allowed redirect (callback) URLs for the identity providers. A // redirect URI must: // - // * Be an absolute URI. + // * Be an absolute URI. // - // * Be registered with the + // * Be registered with the // authorization server. // - // * Not include a fragment component. + // * Not include a fragment component. // // See OAuth 2.0 - // Redirection Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon @@ -1220,17 +1216,17 @@ type UserPoolClientType struct { // The default redirect URI. Must be in the CallbackURLs list. A redirect URI // must: // - // * Be an absolute URI. + // * Be an absolute URI. // - // * Be registered with the authorization - // server. + // * Be registered with the authorization server. // - // * Not include a fragment component. + // * + // Not include a fragment component. // - // See OAuth 2.0 - Redirection - // Endpoint (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon Cognito - // requires HTTPS over HTTP except for http://localhost for testing purposes only. - // App callback URLs such as myapp://example are also supported. + // See OAuth 2.0 - Redirection Endpoint + // (https://tools.ietf.org/html/rfc6749#section-3.1.2). Amazon Cognito requires + // HTTPS over HTTP except for http://localhost for testing purposes only. App + // callback URLs such as myapp://example are also supported. DefaultRedirectURI *string // The authentication flows that are supported by the user pool clients. Flow names @@ -1238,25 +1234,24 @@ type UserPoolClientType struct { // prefix. Note that values with ALLOW_ prefix cannot be used along with values // without ALLOW_ prefix. Valid values include: // - // * - // ALLOW_ADMIN_USER_PASSWORD_AUTH: Enable admin based user password authentication - // flow ADMIN_USER_PASSWORD_AUTH. This setting replaces the ADMIN_NO_SRP_AUTH - // setting. With this authentication flow, Cognito receives the password in the - // request instead of using the SRP (Secure Remote Password protocol) protocol to - // verify passwords. + // * ALLOW_ADMIN_USER_PASSWORD_AUTH: + // Enable admin based user password authentication flow ADMIN_USER_PASSWORD_AUTH. + // This setting replaces the ADMIN_NO_SRP_AUTH setting. With this authentication + // flow, Cognito receives the password in the request instead of using the SRP + // (Secure Remote Password protocol) protocol to verify passwords. // - // * ALLOW_CUSTOM_AUTH: Enable Lambda trigger based - // authentication. + // * + // ALLOW_CUSTOM_AUTH: Enable Lambda trigger based authentication. // - // * ALLOW_USER_PASSWORD_AUTH: Enable user password-based - // authentication. In this flow, Cognito receives the password in the request - // instead of using the SRP protocol to verify passwords. + // * + // ALLOW_USER_PASSWORD_AUTH: Enable user password-based authentication. In this + // flow, Cognito receives the password in the request instead of using the SRP + // protocol to verify passwords. // - // * - // ALLOW_USER_SRP_AUTH: Enable SRP based authentication. + // * ALLOW_USER_SRP_AUTH: Enable SRP based + // authentication. // - // * - // ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. + // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []ExplicitAuthFlowsType // The time limit, specified by tokenValidityUnits, defaulting to hours, after @@ -1278,10 +1273,10 @@ type UserPoolClientType struct { // LEGACY, those APIs will return a UserNotFoundException exception if the user // does not exist in the user pool. Valid values include: // - // * ENABLED - This + // * ENABLED - This // prevents user existence-related errors. // - // * LEGACY - This represents the old + // * LEGACY - This represents the old // behavior of Cognito where user existence related errors are not // prevented. // @@ -1407,15 +1402,15 @@ type UserPoolType struct { // Can be one of the following values: // - // * OFF - MFA tokens are not required and + // * OFF - MFA tokens are not required and // cannot be specified during user registration. // - // * ON - MFA tokens are - // required for all user registrations. You can only specify required when you are - // initially creating a user pool. + // * ON - MFA tokens are required + // for all user registrations. You can only specify required when you are initially + // creating a user pool. // - // * OPTIONAL - Users have the option when - // registering to create an MFA token. + // * OPTIONAL - Users have the option when registering to + // create an MFA token. MfaConfiguration UserPoolMfaType // The name of the user pool. @@ -1485,27 +1480,26 @@ type UserType struct { // The user status. Can be one of the following: // - // * UNCONFIRMED - User has been + // * UNCONFIRMED - User has been // created but not confirmed. // - // * CONFIRMED - User has been confirmed. - // - // * - // ARCHIVED - User is no longer active. + // * CONFIRMED - User has been confirmed. // - // * COMPROMISED - User is disabled due - // to a potential security threat. + // * ARCHIVED - + // User is no longer active. // - // * UNKNOWN - User status is not known. + // * COMPROMISED - User is disabled due to a potential + // security threat. // + // * UNKNOWN - User status is not known. // - // * RESET_REQUIRED - User is confirmed, but the user must request a code and reset - // his or her password before he or she can sign in. + // * RESET_REQUIRED - User + // is confirmed, but the user must request a code and reset his or her password + // before he or she can sign in. // - // * FORCE_CHANGE_PASSWORD - - // The user is confirmed and the user can sign in using a temporary password, but - // on first sign-in, the user must change his or her password to a new value before - // doing anything else. + // * FORCE_CHANGE_PASSWORD - The user is confirmed + // and the user can sign in using a temporary password, but on first sign-in, the + // user must change his or her password to a new value before doing anything else. UserStatus UserStatusType // The user name of the user you wish to describe. diff --git a/service/cognitosync/api_op_GetBulkPublishDetails.go b/service/cognitosync/api_op_GetBulkPublishDetails.go index 30873854cdf..47e7cc64143 100644 --- a/service/cognitosync/api_op_GetBulkPublishDetails.go +++ b/service/cognitosync/api_op_GetBulkPublishDetails.go @@ -52,18 +52,17 @@ type GetBulkPublishDetailsOutput struct { // Status of the last bulk publish operation, valid values are: // - // * NOT_STARTED - // - No bulk publish has been requested for this identity pool + // * NOT_STARTED - No + // bulk publish has been requested for this identity pool // - // * IN_PROGRESS - - // Data is being published to the configured stream + // * IN_PROGRESS - Data is + // being published to the configured stream // - // * SUCCEEDED - All data for - // the identity pool has been published to the configured stream + // * SUCCEEDED - All data for the + // identity pool has been published to the configured stream // - // * FAILED - - // Some portion of the data has failed to publish, check FailureMessage for the - // cause. + // * FAILED - Some + // portion of the data has failed to publish, check FailureMessage for the cause. BulkPublishStatus types.BulkPublishStatus // If BulkPublishStatus is FAILED this field will contain the error message that diff --git a/service/cognitosync/types/enums.go b/service/cognitosync/types/enums.go index 72c18e8d096..3329e8edb4c 100644 --- a/service/cognitosync/types/enums.go +++ b/service/cognitosync/types/enums.go @@ -6,10 +6,10 @@ type BulkPublishStatus string // Enum values for BulkPublishStatus const ( - BulkPublishStatusNot_started BulkPublishStatus = "NOT_STARTED" - BulkPublishStatusIn_progress BulkPublishStatus = "IN_PROGRESS" - BulkPublishStatusFailed BulkPublishStatus = "FAILED" - BulkPublishStatusSucceeded BulkPublishStatus = "SUCCEEDED" + BulkPublishStatusNotStarted BulkPublishStatus = "NOT_STARTED" + BulkPublishStatusInProgress BulkPublishStatus = "IN_PROGRESS" + BulkPublishStatusFailed BulkPublishStatus = "FAILED" + BulkPublishStatusSucceeded BulkPublishStatus = "SUCCEEDED" ) // Values returns all known values for BulkPublishStatus. Note that this can be @@ -46,10 +46,10 @@ type Platform string // Enum values for Platform const ( - PlatformApns Platform = "APNS" - PlatformApns_sandbox Platform = "APNS_SANDBOX" - PlatformGcm Platform = "GCM" - PlatformAdm Platform = "ADM" + PlatformApns Platform = "APNS" + PlatformApnsSandbox Platform = "APNS_SANDBOX" + PlatformGcm Platform = "GCM" + PlatformAdm Platform = "ADM" ) // Values returns all known values for Platform. Note that this can be expanded in diff --git a/service/cognitosync/types/types.go b/service/cognitosync/types/types.go index cd6874c9494..982b09f73d6 100644 --- a/service/cognitosync/types/types.go +++ b/service/cognitosync/types/types.go @@ -24,10 +24,10 @@ type CognitoStreams struct { // Status of the Cognito streams. Valid values are: // - // * ENABLED - Streaming of + // * ENABLED - Streaming of // updates to identity pool is enabled. // - // * DISABLED - Streaming of updates to + // * DISABLED - Streaming of updates to // identity pool is disabled. Bulk publish will also fail if StreamingStatus is // DISABLED. StreamingStatus StreamingStatus diff --git a/service/comprehend/api_op_CreateDocumentClassifier.go b/service/comprehend/api_op_CreateDocumentClassifier.go index 6b96989428f..7b6331121f1 100644 --- a/service/comprehend/api_op_CreateDocumentClassifier.go +++ b/service/comprehend/api_op_CreateDocumentClassifier.go @@ -84,9 +84,9 @@ type CreateDocumentClassifierInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string diff --git a/service/comprehend/api_op_CreateEntityRecognizer.go b/service/comprehend/api_op_CreateEntityRecognizer.go index 18dab40e961..97d5c9b7222 100644 --- a/service/comprehend/api_op_CreateEntityRecognizer.go +++ b/service/comprehend/api_op_CreateEntityRecognizer.go @@ -74,9 +74,9 @@ type CreateEntityRecognizerInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string diff --git a/service/comprehend/api_op_StartDocumentClassificationJob.go b/service/comprehend/api_op_StartDocumentClassificationJob.go index b2765a376c6..22198df6d71 100644 --- a/service/comprehend/api_op_StartDocumentClassificationJob.go +++ b/service/comprehend/api_op_StartDocumentClassificationJob.go @@ -65,9 +65,9 @@ type StartDocumentClassificationJobInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -87,23 +87,23 @@ type StartDocumentClassificationJobOutput struct { // The status of the job: // - // * SUBMITTED - The job has been received and queued - // for processing. + // * SUBMITTED - The job has been received and queued for + // processing. // - // * IN_PROGRESS - Amazon Comprehend is processing the job. + // * IN_PROGRESS - Amazon Comprehend is processing the job. // + // * + // COMPLETED - The job was successfully completed and the output is available. // - // * COMPLETED - The job was successfully completed and the output is available. + // * + // FAILED - The job did not complete. For details, use the operation. // - // - // * FAILED - The job did not complete. For details, use the operation. - // - // * + // * // STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and // is processing the request. // - // * STOPPED - The job was successfully stopped - // without completing. + // * STOPPED - The job was successfully stopped without + // completing. JobStatus types.JobStatus // Metadata pertaining to the operation's result. diff --git a/service/comprehend/api_op_StartDominantLanguageDetectionJob.go b/service/comprehend/api_op_StartDominantLanguageDetectionJob.go index 877f48b99eb..3cea2061da5 100644 --- a/service/comprehend/api_op_StartDominantLanguageDetectionJob.go +++ b/service/comprehend/api_op_StartDominantLanguageDetectionJob.go @@ -62,9 +62,9 @@ type StartDominantLanguageDetectionJobInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -84,17 +84,16 @@ type StartDominantLanguageDetectionJobOutput struct { // The status of the job. // - // * SUBMITTED - The job has been received and is - // queued for processing. + // * SUBMITTED - The job has been received and is queued + // for processing. // - // * IN_PROGRESS - Amazon Comprehend is processing the - // job. + // * IN_PROGRESS - Amazon Comprehend is processing the job. // - // * COMPLETED - The job was successfully completed and the output is - // available. + // * + // COMPLETED - The job was successfully completed and the output is available. // - // * FAILED - The job did not complete. To get details, use the - // operation. + // * + // FAILED - The job did not complete. To get details, use the operation. JobStatus types.JobStatus // Metadata pertaining to the operation's result. diff --git a/service/comprehend/api_op_StartEntitiesDetectionJob.go b/service/comprehend/api_op_StartEntitiesDetectionJob.go index 52e069c1e09..57d8fad76e8 100644 --- a/service/comprehend/api_op_StartEntitiesDetectionJob.go +++ b/service/comprehend/api_op_StartEntitiesDetectionJob.go @@ -79,9 +79,9 @@ type StartEntitiesDetectionJobInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -101,23 +101,23 @@ type StartEntitiesDetectionJobOutput struct { // The status of the job. // - // * SUBMITTED - The job has been received and is - // queued for processing. + // * SUBMITTED - The job has been received and is queued + // for processing. // - // * IN_PROGRESS - Amazon Comprehend is processing the - // job. + // * IN_PROGRESS - Amazon Comprehend is processing the job. // - // * COMPLETED - The job was successfully completed and the output is - // available. + // * + // COMPLETED - The job was successfully completed and the output is available. // - // * FAILED - The job did not complete. To get details, use the - // operation. + // * + // FAILED - The job did not complete. To get details, use the operation. // - // * STOP_REQUESTED - Amazon Comprehend has received a stop request - // for the job and is processing the request. + // * + // STOP_REQUESTED - Amazon Comprehend has received a stop request for the job and + // is processing the request. // - // * STOPPED - The job was - // successfully stopped without completing. + // * STOPPED - The job was successfully stopped without + // completing. JobStatus types.JobStatus // Metadata pertaining to the operation's result. diff --git a/service/comprehend/api_op_StartKeyPhrasesDetectionJob.go b/service/comprehend/api_op_StartKeyPhrasesDetectionJob.go index 72aa4432d8a..2a2b01ba2b6 100644 --- a/service/comprehend/api_op_StartKeyPhrasesDetectionJob.go +++ b/service/comprehend/api_op_StartKeyPhrasesDetectionJob.go @@ -69,9 +69,9 @@ type StartKeyPhrasesDetectionJobInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -91,17 +91,16 @@ type StartKeyPhrasesDetectionJobOutput struct { // The status of the job. // - // * SUBMITTED - The job has been received and is - // queued for processing. + // * SUBMITTED - The job has been received and is queued + // for processing. // - // * IN_PROGRESS - Amazon Comprehend is processing the - // job. + // * IN_PROGRESS - Amazon Comprehend is processing the job. // - // * COMPLETED - The job was successfully completed and the output is - // available. + // * + // COMPLETED - The job was successfully completed and the output is available. // - // * FAILED - The job did not complete. To get details, use the - // operation. + // * + // FAILED - The job did not complete. To get details, use the operation. JobStatus types.JobStatus // Metadata pertaining to the operation's result. diff --git a/service/comprehend/api_op_StartSentimentDetectionJob.go b/service/comprehend/api_op_StartSentimentDetectionJob.go index 2b51aeeb8da..e617661e860 100644 --- a/service/comprehend/api_op_StartSentimentDetectionJob.go +++ b/service/comprehend/api_op_StartSentimentDetectionJob.go @@ -69,9 +69,9 @@ type StartSentimentDetectionJobInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -91,17 +91,16 @@ type StartSentimentDetectionJobOutput struct { // The status of the job. // - // * SUBMITTED - The job has been received and is - // queued for processing. + // * SUBMITTED - The job has been received and is queued + // for processing. // - // * IN_PROGRESS - Amazon Comprehend is processing the - // job. + // * IN_PROGRESS - Amazon Comprehend is processing the job. // - // * COMPLETED - The job was successfully completed and the output is - // available. + // * + // COMPLETED - The job was successfully completed and the output is available. // - // * FAILED - The job did not complete. To get details, use the - // operation. + // * + // FAILED - The job did not complete. To get details, use the operation. JobStatus types.JobStatus // Metadata pertaining to the operation's result. diff --git a/service/comprehend/api_op_StartTopicsDetectionJob.go b/service/comprehend/api_op_StartTopicsDetectionJob.go index e6926cd602f..9af40b29cc2 100644 --- a/service/comprehend/api_op_StartTopicsDetectionJob.go +++ b/service/comprehend/api_op_StartTopicsDetectionJob.go @@ -67,9 +67,9 @@ type StartTopicsDetectionJobInput struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -89,16 +89,16 @@ type StartTopicsDetectionJobOutput struct { // The status of the job: // - // * SUBMITTED - The job has been received and is - // queued for processing. + // * SUBMITTED - The job has been received and is queued + // for processing. // - // * IN_PROGRESS - Amazon Comprehend is processing the - // job. + // * IN_PROGRESS - Amazon Comprehend is processing the job. // - // * COMPLETED - The job was successfully completed and the output is - // available. + // * + // COMPLETED - The job was successfully completed and the output is available. // - // * FAILED - The job did not complete. To get details, use the + // * + // FAILED - The job did not complete. To get details, use the // DescribeTopicDetectionJob operation. JobStatus types.JobStatus diff --git a/service/comprehend/types/enums.go b/service/comprehend/types/enums.go index 54fc09c1dfa..9db20ed466a 100644 --- a/service/comprehend/types/enums.go +++ b/service/comprehend/types/enums.go @@ -6,8 +6,8 @@ type DocumentClassifierDataFormat string // Enum values for DocumentClassifierDataFormat const ( - DocumentClassifierDataFormatComprehend_csv DocumentClassifierDataFormat = "COMPREHEND_CSV" - DocumentClassifierDataFormatAugmented_manifest DocumentClassifierDataFormat = "AUGMENTED_MANIFEST" + DocumentClassifierDataFormatComprehendCsv DocumentClassifierDataFormat = "COMPREHEND_CSV" + DocumentClassifierDataFormatAugmentedManifest DocumentClassifierDataFormat = "AUGMENTED_MANIFEST" ) // Values returns all known values for DocumentClassifierDataFormat. Note that this @@ -24,8 +24,8 @@ type DocumentClassifierMode string // Enum values for DocumentClassifierMode const ( - DocumentClassifierModeMulti_class DocumentClassifierMode = "MULTI_CLASS" - DocumentClassifierModeMulti_label DocumentClassifierMode = "MULTI_LABEL" + DocumentClassifierModeMultiClass DocumentClassifierMode = "MULTI_CLASS" + DocumentClassifierModeMultiLabel DocumentClassifierMode = "MULTI_LABEL" ) // Values returns all known values for DocumentClassifierMode. Note that this can @@ -42,11 +42,11 @@ type EndpointStatus string // Enum values for EndpointStatus const ( - EndpointStatusCreating EndpointStatus = "CREATING" - EndpointStatusDeleting EndpointStatus = "DELETING" - EndpointStatusFailed EndpointStatus = "FAILED" - EndpointStatusIn_service EndpointStatus = "IN_SERVICE" - EndpointStatusUpdating EndpointStatus = "UPDATING" + EndpointStatusCreating EndpointStatus = "CREATING" + EndpointStatusDeleting EndpointStatus = "DELETING" + EndpointStatusFailed EndpointStatus = "FAILED" + EndpointStatusInService EndpointStatus = "IN_SERVICE" + EndpointStatusUpdating EndpointStatus = "UPDATING" ) // Values returns all known values for EndpointStatus. Note that this can be @@ -66,8 +66,8 @@ type EntityRecognizerDataFormat string // Enum values for EntityRecognizerDataFormat const ( - EntityRecognizerDataFormatComprehend_csv EntityRecognizerDataFormat = "COMPREHEND_CSV" - EntityRecognizerDataFormatAugmented_manifest EntityRecognizerDataFormat = "AUGMENTED_MANIFEST" + EntityRecognizerDataFormatComprehendCsv EntityRecognizerDataFormat = "COMPREHEND_CSV" + EntityRecognizerDataFormatAugmentedManifest EntityRecognizerDataFormat = "AUGMENTED_MANIFEST" ) // Values returns all known values for EntityRecognizerDataFormat. Note that this @@ -84,15 +84,15 @@ type EntityType string // Enum values for EntityType const ( - EntityTypePerson EntityType = "PERSON" - EntityTypeLocation EntityType = "LOCATION" - EntityTypeOrganization EntityType = "ORGANIZATION" - EntityTypeCommercial_item EntityType = "COMMERCIAL_ITEM" - EntityTypeEvent EntityType = "EVENT" - EntityTypeDate EntityType = "DATE" - EntityTypeQuantity EntityType = "QUANTITY" - EntityTypeTitle EntityType = "TITLE" - EntityTypeOther EntityType = "OTHER" + EntityTypePerson EntityType = "PERSON" + EntityTypeLocation EntityType = "LOCATION" + EntityTypeOrganization EntityType = "ORGANIZATION" + EntityTypeCommercialItem EntityType = "COMMERCIAL_ITEM" + EntityTypeEvent EntityType = "EVENT" + EntityTypeDate EntityType = "DATE" + EntityTypeQuantity EntityType = "QUANTITY" + EntityTypeTitle EntityType = "TITLE" + EntityTypeOther EntityType = "OTHER" ) // Values returns all known values for EntityType. Note that this can be expanded @@ -116,8 +116,8 @@ type InputFormat string // Enum values for InputFormat const ( - InputFormatOne_doc_per_file InputFormat = "ONE_DOC_PER_FILE" - InputFormatOne_doc_per_line InputFormat = "ONE_DOC_PER_LINE" + InputFormatOneDocPerFile InputFormat = "ONE_DOC_PER_FILE" + InputFormatOneDocPerLine InputFormat = "ONE_DOC_PER_LINE" ) // Values returns all known values for InputFormat. Note that this can be expanded @@ -134,12 +134,12 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusSubmitted JobStatus = "SUBMITTED" - JobStatusIn_progress JobStatus = "IN_PROGRESS" - JobStatusCompleted JobStatus = "COMPLETED" - JobStatusFailed JobStatus = "FAILED" - JobStatusStop_requested JobStatus = "STOP_REQUESTED" - JobStatusStopped JobStatus = "STOPPED" + JobStatusSubmitted JobStatus = "SUBMITTED" + JobStatusInProgress JobStatus = "IN_PROGRESS" + JobStatusCompleted JobStatus = "COMPLETED" + JobStatusFailed JobStatus = "FAILED" + JobStatusStopRequested JobStatus = "STOP_REQUESTED" + JobStatusStopped JobStatus = "STOPPED" ) // Values returns all known values for JobStatus. Note that this can be expanded in @@ -160,18 +160,18 @@ type LanguageCode string // Enum values for LanguageCode const ( - LanguageCodeEn LanguageCode = "en" - LanguageCodeEs LanguageCode = "es" - LanguageCodeFr LanguageCode = "fr" - LanguageCodeDe LanguageCode = "de" - LanguageCodeIt LanguageCode = "it" - LanguageCodePt LanguageCode = "pt" - LanguageCodeAr LanguageCode = "ar" - LanguageCodeHi LanguageCode = "hi" - LanguageCodeJa LanguageCode = "ja" - LanguageCodeKo LanguageCode = "ko" - LanguageCodeZh LanguageCode = "zh" - LanguageCodeZh_tw LanguageCode = "zh-TW" + LanguageCodeEn LanguageCode = "en" + LanguageCodeEs LanguageCode = "es" + LanguageCodeFr LanguageCode = "fr" + LanguageCodeDe LanguageCode = "de" + LanguageCodeIt LanguageCode = "it" + LanguageCodePt LanguageCode = "pt" + LanguageCodeAr LanguageCode = "ar" + LanguageCodeHi LanguageCode = "hi" + LanguageCodeJa LanguageCode = "ja" + LanguageCodeKo LanguageCode = "ko" + LanguageCodeZh LanguageCode = "zh" + LanguageCodeZhTw LanguageCode = "zh-TW" ) // Values returns all known values for LanguageCode. Note that this can be expanded @@ -198,13 +198,13 @@ type ModelStatus string // Enum values for ModelStatus const ( - ModelStatusSubmitted ModelStatus = "SUBMITTED" - ModelStatusTraining ModelStatus = "TRAINING" - ModelStatusDeleting ModelStatus = "DELETING" - ModelStatusStop_requested ModelStatus = "STOP_REQUESTED" - ModelStatusStopped ModelStatus = "STOPPED" - ModelStatusIn_error ModelStatus = "IN_ERROR" - ModelStatusTrained ModelStatus = "TRAINED" + ModelStatusSubmitted ModelStatus = "SUBMITTED" + ModelStatusTraining ModelStatus = "TRAINING" + ModelStatusDeleting ModelStatus = "DELETING" + ModelStatusStopRequested ModelStatus = "STOP_REQUESTED" + ModelStatusStopped ModelStatus = "STOPPED" + ModelStatusInError ModelStatus = "IN_ERROR" + ModelStatusTrained ModelStatus = "TRAINED" ) // Values returns all known values for ModelStatus. Note that this can be expanded @@ -276,8 +276,8 @@ type PiiEntitiesDetectionMaskMode string // Enum values for PiiEntitiesDetectionMaskMode const ( - PiiEntitiesDetectionMaskModeMask PiiEntitiesDetectionMaskMode = "MASK" - PiiEntitiesDetectionMaskModeReplace_with_pii_entity_type PiiEntitiesDetectionMaskMode = "REPLACE_WITH_PII_ENTITY_TYPE" + PiiEntitiesDetectionMaskModeMask PiiEntitiesDetectionMaskMode = "MASK" + PiiEntitiesDetectionMaskModeReplaceWithPiiEntityType PiiEntitiesDetectionMaskMode = "REPLACE_WITH_PII_ENTITY_TYPE" ) // Values returns all known values for PiiEntitiesDetectionMaskMode. Note that this @@ -294,8 +294,8 @@ type PiiEntitiesDetectionMode string // Enum values for PiiEntitiesDetectionMode const ( - PiiEntitiesDetectionModeOnly_redaction PiiEntitiesDetectionMode = "ONLY_REDACTION" - PiiEntitiesDetectionModeOnly_offsets PiiEntitiesDetectionMode = "ONLY_OFFSETS" + PiiEntitiesDetectionModeOnlyRedaction PiiEntitiesDetectionMode = "ONLY_REDACTION" + PiiEntitiesDetectionModeOnlyOffsets PiiEntitiesDetectionMode = "ONLY_OFFSETS" ) // Values returns all known values for PiiEntitiesDetectionMode. Note that this can @@ -312,29 +312,29 @@ type PiiEntityType string // Enum values for PiiEntityType const ( - PiiEntityTypeBank_account_number PiiEntityType = "BANK_ACCOUNT_NUMBER" - PiiEntityTypeBank_routing PiiEntityType = "BANK_ROUTING" - PiiEntityTypeCredit_debit_number PiiEntityType = "CREDIT_DEBIT_NUMBER" - PiiEntityTypeCredit_debit_cvv PiiEntityType = "CREDIT_DEBIT_CVV" - PiiEntityTypeCredit_debit_expiry PiiEntityType = "CREDIT_DEBIT_EXPIRY" - PiiEntityTypePin PiiEntityType = "PIN" - PiiEntityTypeEmail PiiEntityType = "EMAIL" - PiiEntityTypeAddress PiiEntityType = "ADDRESS" - PiiEntityTypeName PiiEntityType = "NAME" - PiiEntityTypePhone PiiEntityType = "PHONE" - PiiEntityTypeSsn PiiEntityType = "SSN" - PiiEntityTypeDate_time PiiEntityType = "DATE_TIME" - PiiEntityTypePassport_number PiiEntityType = "PASSPORT_NUMBER" - PiiEntityTypeDriver_id PiiEntityType = "DRIVER_ID" - PiiEntityTypeUrl PiiEntityType = "URL" - PiiEntityTypeAge PiiEntityType = "AGE" - PiiEntityTypeUsername PiiEntityType = "USERNAME" - PiiEntityTypePassword PiiEntityType = "PASSWORD" - PiiEntityTypeAws_access_key PiiEntityType = "AWS_ACCESS_KEY" - PiiEntityTypeAws_secret_key PiiEntityType = "AWS_SECRET_KEY" - PiiEntityTypeIp_address PiiEntityType = "IP_ADDRESS" - PiiEntityTypeMac_address PiiEntityType = "MAC_ADDRESS" - PiiEntityTypeAll PiiEntityType = "ALL" + PiiEntityTypeBankAccountNumber PiiEntityType = "BANK_ACCOUNT_NUMBER" + PiiEntityTypeBankRouting PiiEntityType = "BANK_ROUTING" + PiiEntityTypeCreditDebitNumber PiiEntityType = "CREDIT_DEBIT_NUMBER" + PiiEntityTypeCreditDebitCvv PiiEntityType = "CREDIT_DEBIT_CVV" + PiiEntityTypeCreditDebitExpiry PiiEntityType = "CREDIT_DEBIT_EXPIRY" + PiiEntityTypePin PiiEntityType = "PIN" + PiiEntityTypeEmail PiiEntityType = "EMAIL" + PiiEntityTypeAddress PiiEntityType = "ADDRESS" + PiiEntityTypeName PiiEntityType = "NAME" + PiiEntityTypePhone PiiEntityType = "PHONE" + PiiEntityTypeSsn PiiEntityType = "SSN" + PiiEntityTypeDateTime PiiEntityType = "DATE_TIME" + PiiEntityTypePassportNumber PiiEntityType = "PASSPORT_NUMBER" + PiiEntityTypeDriverId PiiEntityType = "DRIVER_ID" + PiiEntityTypeUrl PiiEntityType = "URL" + PiiEntityTypeAge PiiEntityType = "AGE" + PiiEntityTypeUsername PiiEntityType = "USERNAME" + PiiEntityTypePassword PiiEntityType = "PASSWORD" + PiiEntityTypeAwsAccessKey PiiEntityType = "AWS_ACCESS_KEY" + PiiEntityTypeAwsSecretKey PiiEntityType = "AWS_SECRET_KEY" + PiiEntityTypeIpAddress PiiEntityType = "IP_ADDRESS" + PiiEntityTypeMacAddress PiiEntityType = "MAC_ADDRESS" + PiiEntityTypeAll PiiEntityType = "ALL" ) // Values returns all known values for PiiEntityType. Note that this can be diff --git a/service/comprehend/types/types.go b/service/comprehend/types/types.go index cf45c2fe2cb..d4ad3e90b59 100644 --- a/service/comprehend/types/types.go +++ b/service/comprehend/types/types.go @@ -250,9 +250,9 @@ type DocumentClassificationJobProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -295,12 +295,12 @@ type DocumentClassifierInputDataConfig struct { // The format of your training data: // - // * COMPREHEND_CSV: A two-column CSV file, + // * COMPREHEND_CSV: A two-column CSV file, // where labels are provided in the first column, and documents are provided in the // second. If you use this value, you must provide the S3Uri parameter in your // request. // - // * AUGMENTED_MANIFEST: A labeled dataset that is produced by Amazon + // * AUGMENTED_MANIFEST: A labeled dataset that is produced by Amazon // SageMaker Ground Truth. This file is in JSON lines format. Each line is a // complete JSON object that contains a training document and its associated // labels. If you use this value, you must provide the AugmentedManifests parameter @@ -335,16 +335,16 @@ type DocumentClassifierOutputDataConfig struct { // encrypt the output results from an analysis job. The KmsKeyId can be one of the // following formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // - // * Amazon Resource Name (ARN) of a KMS Key: + // * + // Amazon Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" // + // * + // KMS Key Alias: "alias/ExampleAlias" // - // * KMS Key Alias: "alias/ExampleAlias" - // - // * ARN of a KMS Key Alias: + // * ARN of a KMS Key Alias: // "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" KmsKeyId *string @@ -420,9 +420,9 @@ type DocumentClassifierProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -520,9 +520,9 @@ type DominantLanguageDetectionJobProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -652,9 +652,9 @@ type EntitiesDetectionJobProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -786,23 +786,22 @@ type EntityRecognizerInputDataConfig struct { // The format of your training data: // - // * COMPREHEND_CSV: A CSV file that - // supplements your training documents. The CSV file contains information about the - // custom entities that your trained model will detect. The required format of the - // file depends on whether you are providing annotations or an entity list. If you - // use this value, you must provide your CSV file by using either the Annotations - // or EntityList parameters. You must provide your training documents by using the + // * COMPREHEND_CSV: A CSV file that supplements + // your training documents. The CSV file contains information about the custom + // entities that your trained model will detect. The required format of the file + // depends on whether you are providing annotations or an entity list. If you use + // this value, you must provide your CSV file by using either the Annotations or + // EntityList parameters. You must provide your training documents by using the // Documents parameter. // - // * AUGMENTED_MANIFEST: A labeled dataset that is - // produced by Amazon SageMaker Ground Truth. This file is in JSON lines format. - // Each line is a complete JSON object that contains a training document and its - // labels. Each label annotates a named entity in the training document. If you use - // this value, you must provide the AugmentedManifests parameter in your - // request. + // * AUGMENTED_MANIFEST: A labeled dataset that is produced + // by Amazon SageMaker Ground Truth. This file is in JSON lines format. Each line + // is a complete JSON object that contains a training document and its labels. Each + // label annotates a named entity in the training document. If you use this value, + // you must provide the AugmentedManifests parameter in your request. // - // If you don't specify a value, Amazon Comprehend uses COMPREHEND_CSV as - // the default. + // If you don't + // specify a value, Amazon Comprehend uses COMPREHEND_CSV as the default. DataFormat EntityRecognizerDataFormat // The S3 location of the folder that contains the training documents for your @@ -893,9 +892,9 @@ type EntityRecognizerProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -957,14 +956,13 @@ type InputDataConfig struct { // Specifies how the text in an input file should be processed: // - // * - // ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option - // when you are processing large documents, such as newspaper articles or - // scientific papers. + // * ONE_DOC_PER_FILE + // - Each file is considered a separate document. Use this option when you are + // processing large documents, such as newspaper articles or scientific papers. // - // * ONE_DOC_PER_LINE - Each line in a file is considered a - // separate document. Use this option when you are processing many short documents, - // such as text messages. + // * + // ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use + // this option when you are processing many short documents, such as text messages. InputFormat InputFormat } @@ -1056,9 +1054,9 @@ type KeyPhrasesDetectionJobProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -1089,16 +1087,16 @@ type OutputDataConfig struct { // encrypt the output results from an analysis job. The KmsKeyId can be one of the // following formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // - // * Amazon Resource Name (ARN) of a KMS Key: + // * + // Amazon Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" // + // * + // KMS Key Alias: "alias/ExampleAlias" // - // * KMS Key Alias: "alias/ExampleAlias" - // - // * ARN of a KMS Key Alias: + // * ARN of a KMS Key Alias: // "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" KmsKeyId *string } @@ -1301,9 +1299,9 @@ type SentimentDetectionJobProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string @@ -1443,9 +1441,9 @@ type TopicsDetectionJobProperties struct { // process the analysis job. The VolumeKmsKeyId can be either of the following // formats: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * Amazon + // * Amazon // Resource Name (ARN) of a KMS Key: // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string diff --git a/service/comprehendmedical/types/enums.go b/service/comprehendmedical/types/enums.go index 9bd5b37eab1..67c07f68cbf 100644 --- a/service/comprehendmedical/types/enums.go +++ b/service/comprehendmedical/types/enums.go @@ -28,40 +28,40 @@ type EntitySubType string // Enum values for EntitySubType const ( - EntitySubTypeName EntitySubType = "NAME" - EntitySubTypeDosage EntitySubType = "DOSAGE" - EntitySubTypeRoute_or_mode EntitySubType = "ROUTE_OR_MODE" - EntitySubTypeForm EntitySubType = "FORM" - EntitySubTypeFrequency EntitySubType = "FREQUENCY" - EntitySubTypeDuration EntitySubType = "DURATION" - EntitySubTypeGeneric_name EntitySubType = "GENERIC_NAME" - EntitySubTypeBrand_name EntitySubType = "BRAND_NAME" - EntitySubTypeStrength EntitySubType = "STRENGTH" - EntitySubTypeRate EntitySubType = "RATE" - EntitySubTypeAcuity EntitySubType = "ACUITY" - EntitySubTypeTest_name EntitySubType = "TEST_NAME" - EntitySubTypeTest_value EntitySubType = "TEST_VALUE" - EntitySubTypeTest_units EntitySubType = "TEST_UNITS" - EntitySubTypeProcedure_name EntitySubType = "PROCEDURE_NAME" - EntitySubTypeTreatment_name EntitySubType = "TREATMENT_NAME" - EntitySubTypeDate EntitySubType = "DATE" - EntitySubTypeAge EntitySubType = "AGE" - EntitySubTypeContact_point EntitySubType = "CONTACT_POINT" - EntitySubTypeEmail EntitySubType = "EMAIL" - EntitySubTypeIdentifier EntitySubType = "IDENTIFIER" - EntitySubTypeUrl EntitySubType = "URL" - EntitySubTypeAddress EntitySubType = "ADDRESS" - EntitySubTypeProfession EntitySubType = "PROFESSION" - EntitySubTypeSystem_organ_site EntitySubType = "SYSTEM_ORGAN_SITE" - EntitySubTypeDirection EntitySubType = "DIRECTION" - EntitySubTypeQuality EntitySubType = "QUALITY" - EntitySubTypeQuantity EntitySubType = "QUANTITY" - EntitySubTypeTime_expression EntitySubType = "TIME_EXPRESSION" - EntitySubTypeTime_to_medication_name EntitySubType = "TIME_TO_MEDICATION_NAME" - EntitySubTypeTime_to_dx_name EntitySubType = "TIME_TO_DX_NAME" - EntitySubTypeTime_to_test_name EntitySubType = "TIME_TO_TEST_NAME" - EntitySubTypeTime_to_procedure_name EntitySubType = "TIME_TO_PROCEDURE_NAME" - EntitySubTypeTime_to_treatment_name EntitySubType = "TIME_TO_TREATMENT_NAME" + EntitySubTypeName EntitySubType = "NAME" + EntitySubTypeDosage EntitySubType = "DOSAGE" + EntitySubTypeRouteOrMode EntitySubType = "ROUTE_OR_MODE" + EntitySubTypeForm EntitySubType = "FORM" + EntitySubTypeFrequency EntitySubType = "FREQUENCY" + EntitySubTypeDuration EntitySubType = "DURATION" + EntitySubTypeGenericName EntitySubType = "GENERIC_NAME" + EntitySubTypeBrandName EntitySubType = "BRAND_NAME" + EntitySubTypeStrength EntitySubType = "STRENGTH" + EntitySubTypeRate EntitySubType = "RATE" + EntitySubTypeAcuity EntitySubType = "ACUITY" + EntitySubTypeTestName EntitySubType = "TEST_NAME" + EntitySubTypeTestValue EntitySubType = "TEST_VALUE" + EntitySubTypeTestUnits EntitySubType = "TEST_UNITS" + EntitySubTypeProcedureName EntitySubType = "PROCEDURE_NAME" + EntitySubTypeTreatmentName EntitySubType = "TREATMENT_NAME" + EntitySubTypeDate EntitySubType = "DATE" + EntitySubTypeAge EntitySubType = "AGE" + EntitySubTypeContactPoint EntitySubType = "CONTACT_POINT" + EntitySubTypeEmail EntitySubType = "EMAIL" + EntitySubTypeIdentifier EntitySubType = "IDENTIFIER" + EntitySubTypeUrl EntitySubType = "URL" + EntitySubTypeAddress EntitySubType = "ADDRESS" + EntitySubTypeProfession EntitySubType = "PROFESSION" + EntitySubTypeSystemOrganSite EntitySubType = "SYSTEM_ORGAN_SITE" + EntitySubTypeDirection EntitySubType = "DIRECTION" + EntitySubTypeQuality EntitySubType = "QUALITY" + EntitySubTypeQuantity EntitySubType = "QUANTITY" + EntitySubTypeTimeExpression EntitySubType = "TIME_EXPRESSION" + EntitySubTypeTimeToMedicationName EntitySubType = "TIME_TO_MEDICATION_NAME" + EntitySubTypeTimeToDxName EntitySubType = "TIME_TO_DX_NAME" + EntitySubTypeTimeToTestName EntitySubType = "TIME_TO_TEST_NAME" + EntitySubTypeTimeToProcedureName EntitySubType = "TIME_TO_PROCEDURE_NAME" + EntitySubTypeTimeToTreatmentName EntitySubType = "TIME_TO_TREATMENT_NAME" ) // Values returns all known values for EntitySubType. Note that this can be @@ -110,12 +110,12 @@ type EntityType string // Enum values for EntityType const ( - EntityTypeMedication EntityType = "MEDICATION" - EntityTypeMedical_condition EntityType = "MEDICAL_CONDITION" - EntityTypeProtected_health_information EntityType = "PROTECTED_HEALTH_INFORMATION" - EntityTypeTest_treatment_procedure EntityType = "TEST_TREATMENT_PROCEDURE" - EntityTypeAnatomy EntityType = "ANATOMY" - EntityTypeTime_expression EntityType = "TIME_EXPRESSION" + EntityTypeMedication EntityType = "MEDICATION" + EntityTypeMedicalCondition EntityType = "MEDICAL_CONDITION" + EntityTypeProtectedHealthInformation EntityType = "PROTECTED_HEALTH_INFORMATION" + EntityTypeTestTreatmentProcedure EntityType = "TEST_TREATMENT_PROCEDURE" + EntityTypeAnatomy EntityType = "ANATOMY" + EntityTypeTimeExpression EntityType = "TIME_EXPRESSION" ) // Values returns all known values for EntityType. Note that this can be expanded @@ -136,11 +136,11 @@ type ICD10CMAttributeType string // Enum values for ICD10CMAttributeType const ( - ICD10CMAttributeTypeAcuity ICD10CMAttributeType = "ACUITY" - ICD10CMAttributeTypeDirection ICD10CMAttributeType = "DIRECTION" - ICD10CMAttributeTypeSystem_organ_site ICD10CMAttributeType = "SYSTEM_ORGAN_SITE" - ICD10CMAttributeTypeQuality ICD10CMAttributeType = "QUALITY" - ICD10CMAttributeTypeQuantity ICD10CMAttributeType = "QUANTITY" + ICD10CMAttributeTypeAcuity ICD10CMAttributeType = "ACUITY" + ICD10CMAttributeTypeDirection ICD10CMAttributeType = "DIRECTION" + ICD10CMAttributeTypeSystemOrganSite ICD10CMAttributeType = "SYSTEM_ORGAN_SITE" + ICD10CMAttributeTypeQuality ICD10CMAttributeType = "QUALITY" + ICD10CMAttributeTypeQuantity ICD10CMAttributeType = "QUANTITY" ) // Values returns all known values for ICD10CMAttributeType. Note that this can be @@ -160,7 +160,7 @@ type ICD10CMEntityCategory string // Enum values for ICD10CMEntityCategory const ( - ICD10CMEntityCategoryMedical_condition ICD10CMEntityCategory = "MEDICAL_CONDITION" + ICD10CMEntityCategoryMedicalCondition ICD10CMEntityCategory = "MEDICAL_CONDITION" ) // Values returns all known values for ICD10CMEntityCategory. Note that this can be @@ -176,7 +176,7 @@ type ICD10CMEntityType string // Enum values for ICD10CMEntityType const ( - ICD10CMEntityTypeDx_name ICD10CMEntityType = "DX_NAME" + ICD10CMEntityTypeDxName ICD10CMEntityType = "DX_NAME" ) // Values returns all known values for ICD10CMEntityType. Note that this can be @@ -214,13 +214,13 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusSubmitted JobStatus = "SUBMITTED" - JobStatusIn_progress JobStatus = "IN_PROGRESS" - JobStatusCompleted JobStatus = "COMPLETED" - JobStatusPartial_success JobStatus = "PARTIAL_SUCCESS" - JobStatusFailed JobStatus = "FAILED" - JobStatusStop_requested JobStatus = "STOP_REQUESTED" - JobStatusStopped JobStatus = "STOPPED" + JobStatusSubmitted JobStatus = "SUBMITTED" + JobStatusInProgress JobStatus = "IN_PROGRESS" + JobStatusCompleted JobStatus = "COMPLETED" + JobStatusPartialSuccess JobStatus = "PARTIAL_SUCCESS" + JobStatusFailed JobStatus = "FAILED" + JobStatusStopRequested JobStatus = "STOP_REQUESTED" + JobStatusStopped JobStatus = "STOPPED" ) // Values returns all known values for JobStatus. Note that this can be expanded in @@ -258,24 +258,24 @@ type RelationshipType string // Enum values for RelationshipType const ( - RelationshipTypeEvery RelationshipType = "EVERY" - RelationshipTypeWith_dosage RelationshipType = "WITH_DOSAGE" - RelationshipTypeAdministered_via RelationshipType = "ADMINISTERED_VIA" - RelationshipTypeFor RelationshipType = "FOR" - RelationshipTypeNegative RelationshipType = "NEGATIVE" - RelationshipTypeOverlap RelationshipType = "OVERLAP" - RelationshipTypeDosage RelationshipType = "DOSAGE" - RelationshipTypeRoute_or_mode RelationshipType = "ROUTE_OR_MODE" - RelationshipTypeForm RelationshipType = "FORM" - RelationshipTypeFrequency RelationshipType = "FREQUENCY" - RelationshipTypeDuration RelationshipType = "DURATION" - RelationshipTypeStrength RelationshipType = "STRENGTH" - RelationshipTypeRate RelationshipType = "RATE" - RelationshipTypeAcuity RelationshipType = "ACUITY" - RelationshipTypeTest_value RelationshipType = "TEST_VALUE" - RelationshipTypeTest_units RelationshipType = "TEST_UNITS" - RelationshipTypeDirection RelationshipType = "DIRECTION" - RelationshipTypeSystem_organ_site RelationshipType = "SYSTEM_ORGAN_SITE" + RelationshipTypeEvery RelationshipType = "EVERY" + RelationshipTypeWithDosage RelationshipType = "WITH_DOSAGE" + RelationshipTypeAdministeredVia RelationshipType = "ADMINISTERED_VIA" + RelationshipTypeFor RelationshipType = "FOR" + RelationshipTypeNegative RelationshipType = "NEGATIVE" + RelationshipTypeOverlap RelationshipType = "OVERLAP" + RelationshipTypeDosage RelationshipType = "DOSAGE" + RelationshipTypeRouteOrMode RelationshipType = "ROUTE_OR_MODE" + RelationshipTypeForm RelationshipType = "FORM" + RelationshipTypeFrequency RelationshipType = "FREQUENCY" + RelationshipTypeDuration RelationshipType = "DURATION" + RelationshipTypeStrength RelationshipType = "STRENGTH" + RelationshipTypeRate RelationshipType = "RATE" + RelationshipTypeAcuity RelationshipType = "ACUITY" + RelationshipTypeTestValue RelationshipType = "TEST_VALUE" + RelationshipTypeTestUnits RelationshipType = "TEST_UNITS" + RelationshipTypeDirection RelationshipType = "DIRECTION" + RelationshipTypeSystemOrganSite RelationshipType = "SYSTEM_ORGAN_SITE" ) // Values returns all known values for RelationshipType. Note that this can be @@ -308,13 +308,13 @@ type RxNormAttributeType string // Enum values for RxNormAttributeType const ( - RxNormAttributeTypeDosage RxNormAttributeType = "DOSAGE" - RxNormAttributeTypeDuration RxNormAttributeType = "DURATION" - RxNormAttributeTypeForm RxNormAttributeType = "FORM" - RxNormAttributeTypeFrequency RxNormAttributeType = "FREQUENCY" - RxNormAttributeTypeRate RxNormAttributeType = "RATE" - RxNormAttributeTypeRoute_or_mode RxNormAttributeType = "ROUTE_OR_MODE" - RxNormAttributeTypeStrength RxNormAttributeType = "STRENGTH" + RxNormAttributeTypeDosage RxNormAttributeType = "DOSAGE" + RxNormAttributeTypeDuration RxNormAttributeType = "DURATION" + RxNormAttributeTypeForm RxNormAttributeType = "FORM" + RxNormAttributeTypeFrequency RxNormAttributeType = "FREQUENCY" + RxNormAttributeTypeRate RxNormAttributeType = "RATE" + RxNormAttributeTypeRouteOrMode RxNormAttributeType = "ROUTE_OR_MODE" + RxNormAttributeTypeStrength RxNormAttributeType = "STRENGTH" ) // Values returns all known values for RxNormAttributeType. Note that this can be @@ -352,8 +352,8 @@ type RxNormEntityType string // Enum values for RxNormEntityType const ( - RxNormEntityTypeBrand_name RxNormEntityType = "BRAND_NAME" - RxNormEntityTypeGeneric_name RxNormEntityType = "GENERIC_NAME" + RxNormEntityTypeBrandName RxNormEntityType = "BRAND_NAME" + RxNormEntityTypeGenericName RxNormEntityType = "GENERIC_NAME" ) // Values returns all known values for RxNormEntityType. Note that this can be diff --git a/service/computeoptimizer/types/enums.go b/service/computeoptimizer/types/enums.go index 6ba32ff5f92..b06ede7a5db 100644 --- a/service/computeoptimizer/types/enums.go +++ b/service/computeoptimizer/types/enums.go @@ -6,43 +6,43 @@ type ExportableAutoScalingGroupField string // Enum values for ExportableAutoScalingGroupField const ( - ExportableAutoScalingGroupFieldAccount_id ExportableAutoScalingGroupField = "AccountId" - ExportableAutoScalingGroupFieldAuto_scaling_group_arn ExportableAutoScalingGroupField = "AutoScalingGroupArn" - ExportableAutoScalingGroupFieldAuto_scaling_group_name ExportableAutoScalingGroupField = "AutoScalingGroupName" - ExportableAutoScalingGroupFieldFinding ExportableAutoScalingGroupField = "Finding" - ExportableAutoScalingGroupFieldUtilization_metrics_cpu_maximum ExportableAutoScalingGroupField = "UtilizationMetricsCpuMaximum" - ExportableAutoScalingGroupFieldUtilization_metrics_memory_maximum ExportableAutoScalingGroupField = "UtilizationMetricsMemoryMaximum" - ExportableAutoScalingGroupFieldUtilization_metrics_ebs_read_ops_per_second_maximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsReadOpsPerSecondMaximum" - ExportableAutoScalingGroupFieldUtilization_metrics_ebs_write_ops_per_second_maximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsWriteOpsPerSecondMaximum" - ExportableAutoScalingGroupFieldUtilization_metrics_ebs_read_bytes_per_second_maximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsReadBytesPerSecondMaximum" - ExportableAutoScalingGroupFieldUtilization_metrics_ebs_write_bytes_per_second_maximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsWriteBytesPerSecondMaximum" - ExportableAutoScalingGroupFieldLookback_period_in_days ExportableAutoScalingGroupField = "LookbackPeriodInDays" - ExportableAutoScalingGroupFieldCurrent_configuration_instance_type ExportableAutoScalingGroupField = "CurrentConfigurationInstanceType" - ExportableAutoScalingGroupFieldCurrent_configuration_desired_capacity ExportableAutoScalingGroupField = "CurrentConfigurationDesiredCapacity" - ExportableAutoScalingGroupFieldCurrent_configuration_min_size ExportableAutoScalingGroupField = "CurrentConfigurationMinSize" - ExportableAutoScalingGroupFieldCurrent_configuration_max_size ExportableAutoScalingGroupField = "CurrentConfigurationMaxSize" - ExportableAutoScalingGroupFieldCurrent_on_demand_price ExportableAutoScalingGroupField = "CurrentOnDemandPrice" - ExportableAutoScalingGroupFieldCurrent_standard_one_year_no_upfront_reserved_price ExportableAutoScalingGroupField = "CurrentStandardOneYearNoUpfrontReservedPrice" - ExportableAutoScalingGroupFieldCurrent_standard_three_year_no_upfront_reserved_price ExportableAutoScalingGroupField = "CurrentStandardThreeYearNoUpfrontReservedPrice" - ExportableAutoScalingGroupFieldCurrent_vcpus ExportableAutoScalingGroupField = "CurrentVCpus" - ExportableAutoScalingGroupFieldCurrent_memory ExportableAutoScalingGroupField = "CurrentMemory" - ExportableAutoScalingGroupFieldCurrent_storage ExportableAutoScalingGroupField = "CurrentStorage" - ExportableAutoScalingGroupFieldCurrent_network ExportableAutoScalingGroupField = "CurrentNetwork" - ExportableAutoScalingGroupFieldRecommendation_options_configuration_instance_type ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationInstanceType" - ExportableAutoScalingGroupFieldRecommendation_options_configuration_desired_capacity ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationDesiredCapacity" - ExportableAutoScalingGroupFieldRecommendation_options_configuration_min_size ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationMinSize" - ExportableAutoScalingGroupFieldRecommendation_options_configuration_max_size ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationMaxSize" - ExportableAutoScalingGroupFieldRecommendation_options_projected_utilization_metrics_cpu_maximum ExportableAutoScalingGroupField = "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum" - ExportableAutoScalingGroupFieldRecommendation_options_projected_utilization_metrics_memory_maximum ExportableAutoScalingGroupField = "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum" - ExportableAutoScalingGroupFieldRecommendation_options_performance_risk ExportableAutoScalingGroupField = "RecommendationOptionsPerformanceRisk" - ExportableAutoScalingGroupFieldRecommendation_options_on_demand_price ExportableAutoScalingGroupField = "RecommendationOptionsOnDemandPrice" - ExportableAutoScalingGroupFieldRecommendation_options_standard_one_year_no_upfront_reserved_price ExportableAutoScalingGroupField = "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice" - ExportableAutoScalingGroupFieldRecommendation_options_standard_three_year_no_upfront_reserved_price ExportableAutoScalingGroupField = "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice" - ExportableAutoScalingGroupFieldRecommendation_options_vcpus ExportableAutoScalingGroupField = "RecommendationOptionsVcpus" - ExportableAutoScalingGroupFieldRecommendation_options_memory ExportableAutoScalingGroupField = "RecommendationOptionsMemory" - ExportableAutoScalingGroupFieldRecommendation_options_storage ExportableAutoScalingGroupField = "RecommendationOptionsStorage" - ExportableAutoScalingGroupFieldRecommendation_options_network ExportableAutoScalingGroupField = "RecommendationOptionsNetwork" - ExportableAutoScalingGroupFieldLast_refresh_timestamp ExportableAutoScalingGroupField = "LastRefreshTimestamp" + ExportableAutoScalingGroupFieldAccountId ExportableAutoScalingGroupField = "AccountId" + ExportableAutoScalingGroupFieldAutoScalingGroupArn ExportableAutoScalingGroupField = "AutoScalingGroupArn" + ExportableAutoScalingGroupFieldAutoScalingGroupName ExportableAutoScalingGroupField = "AutoScalingGroupName" + ExportableAutoScalingGroupFieldFinding ExportableAutoScalingGroupField = "Finding" + ExportableAutoScalingGroupFieldUtilizationMetricsCpuMaximum ExportableAutoScalingGroupField = "UtilizationMetricsCpuMaximum" + ExportableAutoScalingGroupFieldUtilizationMetricsMemoryMaximum ExportableAutoScalingGroupField = "UtilizationMetricsMemoryMaximum" + ExportableAutoScalingGroupFieldUtilizationMetricsEbsReadOpsPerSecondMaximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsReadOpsPerSecondMaximum" + ExportableAutoScalingGroupFieldUtilizationMetricsEbsWriteOpsPerSecondMaximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsWriteOpsPerSecondMaximum" + ExportableAutoScalingGroupFieldUtilizationMetricsEbsReadBytesPerSecondMaximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsReadBytesPerSecondMaximum" + ExportableAutoScalingGroupFieldUtilizationMetricsEbsWriteBytesPerSecondMaximum ExportableAutoScalingGroupField = "UtilizationMetricsEbsWriteBytesPerSecondMaximum" + ExportableAutoScalingGroupFieldLookbackPeriodInDays ExportableAutoScalingGroupField = "LookbackPeriodInDays" + ExportableAutoScalingGroupFieldCurrentConfigurationInstanceType ExportableAutoScalingGroupField = "CurrentConfigurationInstanceType" + ExportableAutoScalingGroupFieldCurrentConfigurationDesiredCapacity ExportableAutoScalingGroupField = "CurrentConfigurationDesiredCapacity" + ExportableAutoScalingGroupFieldCurrentConfigurationMinSize ExportableAutoScalingGroupField = "CurrentConfigurationMinSize" + ExportableAutoScalingGroupFieldCurrentConfigurationMaxSize ExportableAutoScalingGroupField = "CurrentConfigurationMaxSize" + ExportableAutoScalingGroupFieldCurrentOnDemandPrice ExportableAutoScalingGroupField = "CurrentOnDemandPrice" + ExportableAutoScalingGroupFieldCurrentStandardOneYearNoUpfrontReservedPrice ExportableAutoScalingGroupField = "CurrentStandardOneYearNoUpfrontReservedPrice" + ExportableAutoScalingGroupFieldCurrentStandardThreeYearNoUpfrontReservedPrice ExportableAutoScalingGroupField = "CurrentStandardThreeYearNoUpfrontReservedPrice" + ExportableAutoScalingGroupFieldCurrentVcpus ExportableAutoScalingGroupField = "CurrentVCpus" + ExportableAutoScalingGroupFieldCurrentMemory ExportableAutoScalingGroupField = "CurrentMemory" + ExportableAutoScalingGroupFieldCurrentStorage ExportableAutoScalingGroupField = "CurrentStorage" + ExportableAutoScalingGroupFieldCurrentNetwork ExportableAutoScalingGroupField = "CurrentNetwork" + ExportableAutoScalingGroupFieldRecommendationOptionsConfigurationInstanceType ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationInstanceType" + ExportableAutoScalingGroupFieldRecommendationOptionsConfigurationDesiredCapacity ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationDesiredCapacity" + ExportableAutoScalingGroupFieldRecommendationOptionsConfigurationMinSize ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationMinSize" + ExportableAutoScalingGroupFieldRecommendationOptionsConfigurationMaxSize ExportableAutoScalingGroupField = "RecommendationOptionsConfigurationMaxSize" + ExportableAutoScalingGroupFieldRecommendationOptionsProjectedUtilizationMetricsCpuMaximum ExportableAutoScalingGroupField = "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum" + ExportableAutoScalingGroupFieldRecommendationOptionsProjectedUtilizationMetricsMemoryMaximum ExportableAutoScalingGroupField = "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum" + ExportableAutoScalingGroupFieldRecommendationOptionsPerformanceRisk ExportableAutoScalingGroupField = "RecommendationOptionsPerformanceRisk" + ExportableAutoScalingGroupFieldRecommendationOptionsOnDemandPrice ExportableAutoScalingGroupField = "RecommendationOptionsOnDemandPrice" + ExportableAutoScalingGroupFieldRecommendationOptionsStandardOneYearNoUpfrontReservedPrice ExportableAutoScalingGroupField = "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice" + ExportableAutoScalingGroupFieldRecommendationOptionsStandardThreeYearNoUpfrontReservedPrice ExportableAutoScalingGroupField = "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice" + ExportableAutoScalingGroupFieldRecommendationOptionsVcpus ExportableAutoScalingGroupField = "RecommendationOptionsVcpus" + ExportableAutoScalingGroupFieldRecommendationOptionsMemory ExportableAutoScalingGroupField = "RecommendationOptionsMemory" + ExportableAutoScalingGroupFieldRecommendationOptionsStorage ExportableAutoScalingGroupField = "RecommendationOptionsStorage" + ExportableAutoScalingGroupFieldRecommendationOptionsNetwork ExportableAutoScalingGroupField = "RecommendationOptionsNetwork" + ExportableAutoScalingGroupFieldLastRefreshTimestamp ExportableAutoScalingGroupField = "LastRefreshTimestamp" ) // Values returns all known values for ExportableAutoScalingGroupField. Note that @@ -95,39 +95,39 @@ type ExportableInstanceField string // Enum values for ExportableInstanceField const ( - ExportableInstanceFieldAccount_id ExportableInstanceField = "AccountId" - ExportableInstanceFieldInstance_arn ExportableInstanceField = "InstanceArn" - ExportableInstanceFieldInstance_name ExportableInstanceField = "InstanceName" - ExportableInstanceFieldFinding ExportableInstanceField = "Finding" - ExportableInstanceFieldLookback_period_in_days ExportableInstanceField = "LookbackPeriodInDays" - ExportableInstanceFieldCurrent_instance_type ExportableInstanceField = "CurrentInstanceType" - ExportableInstanceFieldUtilization_metrics_cpu_maximum ExportableInstanceField = "UtilizationMetricsCpuMaximum" - ExportableInstanceFieldUtilization_metrics_memory_maximum ExportableInstanceField = "UtilizationMetricsMemoryMaximum" - ExportableInstanceFieldUtilization_metrics_ebs_read_ops_per_second_maximum ExportableInstanceField = "UtilizationMetricsEbsReadOpsPerSecondMaximum" - ExportableInstanceFieldUtilization_metrics_ebs_write_ops_per_second_maximum ExportableInstanceField = "UtilizationMetricsEbsWriteOpsPerSecondMaximum" - ExportableInstanceFieldUtilization_metrics_ebs_read_bytes_per_second_maximum ExportableInstanceField = "UtilizationMetricsEbsReadBytesPerSecondMaximum" - ExportableInstanceFieldUtilization_metrics_ebs_write_bytes_per_second_maximum ExportableInstanceField = "UtilizationMetricsEbsWriteBytesPerSecondMaximum" - ExportableInstanceFieldCurrent_on_demand_price ExportableInstanceField = "CurrentOnDemandPrice" - ExportableInstanceFieldCurrent_standard_one_year_no_upfront_reserved_price ExportableInstanceField = "CurrentStandardOneYearNoUpfrontReservedPrice" - ExportableInstanceFieldCurrent_standard_three_year_no_upfront_reserved_price ExportableInstanceField = "CurrentStandardThreeYearNoUpfrontReservedPrice" - ExportableInstanceFieldCurrent_vcpus ExportableInstanceField = "CurrentVCpus" - ExportableInstanceFieldCurrent_memory ExportableInstanceField = "CurrentMemory" - ExportableInstanceFieldCurrent_storage ExportableInstanceField = "CurrentStorage" - ExportableInstanceFieldCurrent_network ExportableInstanceField = "CurrentNetwork" - ExportableInstanceFieldRecommendation_options_instance_type ExportableInstanceField = "RecommendationOptionsInstanceType" - ExportableInstanceFieldRecommendation_options_projected_utilization_metrics_cpu_maximum ExportableInstanceField = "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum" - ExportableInstanceFieldRecommendation_options_projected_utilization_metrics_memory_maximum ExportableInstanceField = "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum" - ExportableInstanceFieldRecommendation_options_performance_risk ExportableInstanceField = "RecommendationOptionsPerformanceRisk" - ExportableInstanceFieldRecommendation_options_vcpus ExportableInstanceField = "RecommendationOptionsVcpus" - ExportableInstanceFieldRecommendation_options_memory ExportableInstanceField = "RecommendationOptionsMemory" - ExportableInstanceFieldRecommendation_options_storage ExportableInstanceField = "RecommendationOptionsStorage" - ExportableInstanceFieldRecommendation_options_network ExportableInstanceField = "RecommendationOptionsNetwork" - ExportableInstanceFieldRecommendation_options_on_demand_price ExportableInstanceField = "RecommendationOptionsOnDemandPrice" - ExportableInstanceFieldRecommendation_options_standard_one_year_no_upfront_reserved_price ExportableInstanceField = "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice" - ExportableInstanceFieldRecommendation_options_standard_three_year_no_upfront_reserved_price ExportableInstanceField = "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice" - ExportableInstanceFieldRecommendations_sources_recommendation_source_arn ExportableInstanceField = "RecommendationsSourcesRecommendationSourceArn" - ExportableInstanceFieldRecommendations_sources_recommendation_source_type ExportableInstanceField = "RecommendationsSourcesRecommendationSourceType" - ExportableInstanceFieldLast_refresh_timestamp ExportableInstanceField = "LastRefreshTimestamp" + ExportableInstanceFieldAccountId ExportableInstanceField = "AccountId" + ExportableInstanceFieldInstanceArn ExportableInstanceField = "InstanceArn" + ExportableInstanceFieldInstanceName ExportableInstanceField = "InstanceName" + ExportableInstanceFieldFinding ExportableInstanceField = "Finding" + ExportableInstanceFieldLookbackPeriodInDays ExportableInstanceField = "LookbackPeriodInDays" + ExportableInstanceFieldCurrentInstanceType ExportableInstanceField = "CurrentInstanceType" + ExportableInstanceFieldUtilizationMetricsCpuMaximum ExportableInstanceField = "UtilizationMetricsCpuMaximum" + ExportableInstanceFieldUtilizationMetricsMemoryMaximum ExportableInstanceField = "UtilizationMetricsMemoryMaximum" + ExportableInstanceFieldUtilizationMetricsEbsReadOpsPerSecondMaximum ExportableInstanceField = "UtilizationMetricsEbsReadOpsPerSecondMaximum" + ExportableInstanceFieldUtilizationMetricsEbsWriteOpsPerSecondMaximum ExportableInstanceField = "UtilizationMetricsEbsWriteOpsPerSecondMaximum" + ExportableInstanceFieldUtilizationMetricsEbsReadBytesPerSecondMaximum ExportableInstanceField = "UtilizationMetricsEbsReadBytesPerSecondMaximum" + ExportableInstanceFieldUtilizationMetricsEbsWriteBytesPerSecondMaximum ExportableInstanceField = "UtilizationMetricsEbsWriteBytesPerSecondMaximum" + ExportableInstanceFieldCurrentOnDemandPrice ExportableInstanceField = "CurrentOnDemandPrice" + ExportableInstanceFieldCurrentStandardOneYearNoUpfrontReservedPrice ExportableInstanceField = "CurrentStandardOneYearNoUpfrontReservedPrice" + ExportableInstanceFieldCurrentStandardThreeYearNoUpfrontReservedPrice ExportableInstanceField = "CurrentStandardThreeYearNoUpfrontReservedPrice" + ExportableInstanceFieldCurrentVcpus ExportableInstanceField = "CurrentVCpus" + ExportableInstanceFieldCurrentMemory ExportableInstanceField = "CurrentMemory" + ExportableInstanceFieldCurrentStorage ExportableInstanceField = "CurrentStorage" + ExportableInstanceFieldCurrentNetwork ExportableInstanceField = "CurrentNetwork" + ExportableInstanceFieldRecommendationOptionsInstanceType ExportableInstanceField = "RecommendationOptionsInstanceType" + ExportableInstanceFieldRecommendationOptionsProjectedUtilizationMetricsCpuMaximum ExportableInstanceField = "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum" + ExportableInstanceFieldRecommendationOptionsProjectedUtilizationMetricsMemoryMaximum ExportableInstanceField = "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum" + ExportableInstanceFieldRecommendationOptionsPerformanceRisk ExportableInstanceField = "RecommendationOptionsPerformanceRisk" + ExportableInstanceFieldRecommendationOptionsVcpus ExportableInstanceField = "RecommendationOptionsVcpus" + ExportableInstanceFieldRecommendationOptionsMemory ExportableInstanceField = "RecommendationOptionsMemory" + ExportableInstanceFieldRecommendationOptionsStorage ExportableInstanceField = "RecommendationOptionsStorage" + ExportableInstanceFieldRecommendationOptionsNetwork ExportableInstanceField = "RecommendationOptionsNetwork" + ExportableInstanceFieldRecommendationOptionsOnDemandPrice ExportableInstanceField = "RecommendationOptionsOnDemandPrice" + ExportableInstanceFieldRecommendationOptionsStandardOneYearNoUpfrontReservedPrice ExportableInstanceField = "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice" + ExportableInstanceFieldRecommendationOptionsStandardThreeYearNoUpfrontReservedPrice ExportableInstanceField = "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice" + ExportableInstanceFieldRecommendationsSourcesRecommendationSourceArn ExportableInstanceField = "RecommendationsSourcesRecommendationSourceArn" + ExportableInstanceFieldRecommendationsSourcesRecommendationSourceType ExportableInstanceField = "RecommendationsSourcesRecommendationSourceType" + ExportableInstanceFieldLastRefreshTimestamp ExportableInstanceField = "LastRefreshTimestamp" ) // Values returns all known values for ExportableInstanceField. Note that this can @@ -191,8 +191,8 @@ type FilterName string // Enum values for FilterName const ( - FilterNameFinding FilterName = "Finding" - FilterNameRecommendation_source_type FilterName = "RecommendationSourceType" + FilterNameFinding FilterName = "Finding" + FilterNameRecommendationSourceType FilterName = "RecommendationSourceType" ) // Values returns all known values for FilterName. Note that this can be expanded @@ -209,10 +209,10 @@ type Finding string // Enum values for Finding const ( - FindingUnder_provisioned Finding = "Underprovisioned" - FindingOver_provisioned Finding = "Overprovisioned" - FindingOptimized Finding = "Optimized" - FindingNot_optimized Finding = "NotOptimized" + FindingUnderProvisioned Finding = "Underprovisioned" + FindingOverProvisioned Finding = "Overprovisioned" + FindingOptimized Finding = "Optimized" + FindingNotOptimized Finding = "NotOptimized" ) // Values returns all known values for Finding. Note that this can be expanded in @@ -231,8 +231,8 @@ type JobFilterName string // Enum values for JobFilterName const ( - JobFilterNameResource_type JobFilterName = "ResourceType" - JobFilterNameJob_status JobFilterName = "JobStatus" + JobFilterNameResourceType JobFilterName = "ResourceType" + JobFilterNameJobStatus JobFilterName = "JobStatus" ) // Values returns all known values for JobFilterName. Note that this can be @@ -249,10 +249,10 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusQueued JobStatus = "Queued" - JobStatusIn_progress JobStatus = "InProgress" - JobStatusComplete JobStatus = "Complete" - JobStatusFailed JobStatus = "Failed" + JobStatusQueued JobStatus = "Queued" + JobStatusInProgress JobStatus = "InProgress" + JobStatusComplete JobStatus = "Complete" + JobStatusFailed JobStatus = "Failed" ) // Values returns all known values for JobStatus. Note that this can be expanded in @@ -271,12 +271,12 @@ type MetricName string // Enum values for MetricName const ( - MetricNameCpu MetricName = "Cpu" - MetricNameMemory MetricName = "Memory" - MetricNameEbs_read_ops_per_second MetricName = "EBS_READ_OPS_PER_SECOND" - MetricNameEbs_write_ops_per_second MetricName = "EBS_WRITE_OPS_PER_SECOND" - MetricNameEbs_read_bytes_per_second MetricName = "EBS_READ_BYTES_PER_SECOND" - MetricNameEbs_write_bytes_per_second MetricName = "EBS_WRITE_BYTES_PER_SECOND" + MetricNameCpu MetricName = "Cpu" + MetricNameMemory MetricName = "Memory" + MetricNameEbsReadOpsPerSecond MetricName = "EBS_READ_OPS_PER_SECOND" + MetricNameEbsWriteOpsPerSecond MetricName = "EBS_WRITE_OPS_PER_SECOND" + MetricNameEbsReadBytesPerSecond MetricName = "EBS_READ_BYTES_PER_SECOND" + MetricNameEbsWriteBytesPerSecond MetricName = "EBS_WRITE_BYTES_PER_SECOND" ) // Values returns all known values for MetricName. Note that this can be expanded @@ -315,8 +315,8 @@ type RecommendationSourceType string // Enum values for RecommendationSourceType const ( - RecommendationSourceTypeEc2_instance RecommendationSourceType = "Ec2Instance" - RecommendationSourceTypeAuto_scaling_group RecommendationSourceType = "AutoScalingGroup" + RecommendationSourceTypeEc2Instance RecommendationSourceType = "Ec2Instance" + RecommendationSourceTypeAutoScalingGroup RecommendationSourceType = "AutoScalingGroup" ) // Values returns all known values for RecommendationSourceType. Note that this can @@ -333,8 +333,8 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeEc2_instance ResourceType = "Ec2Instance" - ResourceTypeAuto_scaling_group ResourceType = "AutoScalingGroup" + ResourceTypeEc2Instance ResourceType = "Ec2Instance" + ResourceTypeAutoScalingGroup ResourceType = "AutoScalingGroup" ) // Values returns all known values for ResourceType. Note that this can be expanded diff --git a/service/computeoptimizer/types/types.go b/service/computeoptimizer/types/types.go index 72d2d69034b..46b521d9fb1 100644 --- a/service/computeoptimizer/types/types.go +++ b/service/computeoptimizer/types/types.go @@ -41,11 +41,11 @@ type AutoScalingGroupRecommendation struct { // The finding classification for the Auto Scaling group. Findings for Auto Scaling // groups include: // - // * NotOptimized —An Auto Scaling group is considered not + // * NotOptimized —An Auto Scaling group is considered not // optimized when AWS Compute Optimizer identifies a recommendation that can // provide better performance for your workload. // - // * Optimized —An Auto Scaling + // * Optimized —An Auto Scaling // group is considered optimized when Compute Optimizer determines that the group // is correctly provisioned to run your workload based on the chosen instance type. // For optimized resources, Compute Optimizer might recommend a new generation @@ -152,21 +152,21 @@ type InstanceRecommendation struct { // The finding classification for the instance. Findings for instances include: // + // * + // Underprovisioned —An instance is considered under-provisioned when at least one + // specification of your instance, such as CPU, memory, or network, does not meet + // the performance requirements of your workload. Under-provisioned instances may + // lead to poor application performance. // - // * Underprovisioned —An instance is considered under-provisioned when at least - // one specification of your instance, such as CPU, memory, or network, does not - // meet the performance requirements of your workload. Under-provisioned instances - // may lead to poor application performance. - // - // * Overprovisioned —An instance is + // * Overprovisioned —An instance is // considered over-provisioned when at least one specification of your instance, // such as CPU, memory, or network, can be sized down while still meeting the // performance requirements of your workload, and no specification is // under-provisioned. Over-provisioned instances may lead to unnecessary // infrastructure cost. // - // * Optimized —An instance is considered optimized when - // all specifications of your instance, such as CPU, memory, and network, meet the + // * Optimized —An instance is considered optimized when all + // specifications of your instance, such as CPU, memory, and network, meet the // performance requirements of your workload and is not over provisioned. An // optimized instance runs your workloads with optimal performance and // infrastructure cost. For optimized resources, AWS Compute Optimizer might diff --git a/service/configservice/api_op_BatchGetAggregateResourceConfig.go b/service/configservice/api_op_BatchGetAggregateResourceConfig.go index 91c1310c89a..624f24c1e66 100644 --- a/service/configservice/api_op_BatchGetAggregateResourceConfig.go +++ b/service/configservice/api_op_BatchGetAggregateResourceConfig.go @@ -16,11 +16,11 @@ import ( // not processed in the current request. If there are no unprocessed resources, the // operation returns an empty unprocessedResourceIdentifiers list. // -// * The API -// does not return results for deleted resources. +// * The API does +// not return results for deleted resources. // -// * The API does not return -// tags and relationships. +// * The API does not return tags and +// relationships. func (c *Client) BatchGetAggregateResourceConfig(ctx context.Context, params *BatchGetAggregateResourceConfigInput, optFns ...func(*Options)) (*BatchGetAggregateResourceConfigOutput, error) { if params == nil { params = &BatchGetAggregateResourceConfigInput{} diff --git a/service/configservice/api_op_BatchGetResourceConfig.go b/service/configservice/api_op_BatchGetResourceConfig.go index 1fd478940fd..4fbe918cbec 100644 --- a/service/configservice/api_op_BatchGetResourceConfig.go +++ b/service/configservice/api_op_BatchGetResourceConfig.go @@ -16,12 +16,12 @@ import ( // request. If there are no unprocessed resources, the operation returns an empty // unprocessedResourceKeys list. // -// * The API does not return results for deleted +// * The API does not return results for deleted // resources. // -// * The API does not return any tags for the requested resources. -// This information is filtered out of the supplementaryConfiguration section of -// the API response. +// * The API does not return any tags for the requested resources. This +// information is filtered out of the supplementaryConfiguration section of the API +// response. func (c *Client) BatchGetResourceConfig(ctx context.Context, params *BatchGetResourceConfigInput, optFns ...func(*Options)) (*BatchGetResourceConfigOutput, error) { if params == nil { params = &BatchGetResourceConfigInput{} diff --git a/service/configservice/api_op_DeliverConfigSnapshot.go b/service/configservice/api_op_DeliverConfigSnapshot.go index 973fff0f2f9..cbeda9a86ec 100644 --- a/service/configservice/api_op_DeliverConfigSnapshot.go +++ b/service/configservice/api_op_DeliverConfigSnapshot.go @@ -14,14 +14,14 @@ import ( // specified delivery channel. After the delivery has started, AWS Config sends the // following notifications using an Amazon SNS topic that you have specified. // +// * +// Notification of the start of the delivery. // -// * Notification of the start of the delivery. +// * Notification of the completion of +// the delivery, if the delivery was successfully completed. // -// * Notification of the -// completion of the delivery, if the delivery was successfully completed. -// -// * -// Notification of delivery failure, if the delivery failed. +// * Notification of +// delivery failure, if the delivery failed. func (c *Client) DeliverConfigSnapshot(ctx context.Context, params *DeliverConfigSnapshotInput, optFns ...func(*Options)) (*DeliverConfigSnapshotOutput, error) { if params == nil { params = &DeliverConfigSnapshotInput{} diff --git a/service/configservice/api_op_DescribeComplianceByConfigRule.go b/service/configservice/api_op_DescribeComplianceByConfigRule.go index 62c24f6909a..9f74fbf3fed 100644 --- a/service/configservice/api_op_DescribeComplianceByConfigRule.go +++ b/service/configservice/api_op_DescribeComplianceByConfigRule.go @@ -18,19 +18,19 @@ import ( // has no current evaluation results for the rule, it returns INSUFFICIENT_DATA. // This result might indicate one of the following conditions: // -// * AWS Config -// has never invoked an evaluation for the rule. To check whether it has, use the +// * AWS Config has +// never invoked an evaluation for the rule. To check whether it has, use the // DescribeConfigRuleEvaluationStatus action to get the // LastSuccessfulInvocationTime and LastFailedInvocationTime. // -// * The rule's AWS +// * The rule's AWS // Lambda function is failing to send evaluation results to AWS Config. Verify that // the role you assigned to your configuration recorder includes the // config:PutEvaluations permission. If the rule is a custom rule, verify that the // AWS Lambda execution role includes the config:PutEvaluations permission. // -// * -// The rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation +// * The +// rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation // results. This can occur if the resources were deleted or removed from the rule's // scope. func (c *Client) DescribeComplianceByConfigRule(ctx context.Context, params *DescribeComplianceByConfigRuleInput, optFns ...func(*Options)) (*DescribeComplianceByConfigRuleOutput, error) { diff --git a/service/configservice/api_op_DescribeComplianceByResource.go b/service/configservice/api_op_DescribeComplianceByResource.go index ff710cae099..6af6314ffb3 100644 --- a/service/configservice/api_op_DescribeComplianceByResource.go +++ b/service/configservice/api_op_DescribeComplianceByResource.go @@ -19,19 +19,19 @@ import ( // for the resource, it returns INSUFFICIENT_DATA. This result might indicate one // of the following conditions about the rules that evaluate the resource: // -// * -// AWS Config has never invoked an evaluation for the rule. To check whether it -// has, use the DescribeConfigRuleEvaluationStatus action to get the +// * AWS +// Config has never invoked an evaluation for the rule. To check whether it has, +// use the DescribeConfigRuleEvaluationStatus action to get the // LastSuccessfulInvocationTime and LastFailedInvocationTime. // -// * The rule's AWS +// * The rule's AWS // Lambda function is failing to send evaluation results to AWS Config. Verify that // the role that you assigned to your configuration recorder includes the // config:PutEvaluations permission. If the rule is a custom rule, verify that the // AWS Lambda execution role includes the config:PutEvaluations permission. // -// * -// The rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation +// * The +// rule's AWS Lambda function has returned NOT_APPLICABLE for all evaluation // results. This can occur if the resources were deleted or removed from the rule's // scope. func (c *Client) DescribeComplianceByResource(ctx context.Context, params *DescribeComplianceByResourceInput, optFns ...func(*Options)) (*DescribeComplianceByResourceOutput, error) { diff --git a/service/configservice/api_op_DescribeConfigurationAggregatorSourcesStatus.go b/service/configservice/api_op_DescribeConfigurationAggregatorSourcesStatus.go index c2ec7de0015..3c50d69d53f 100644 --- a/service/configservice/api_op_DescribeConfigurationAggregatorSourcesStatus.go +++ b/service/configservice/api_op_DescribeConfigurationAggregatorSourcesStatus.go @@ -47,13 +47,13 @@ type DescribeConfigurationAggregatorSourcesStatusInput struct { // Filters the status type. // - // * Valid value FAILED indicates errors while moving + // * Valid value FAILED indicates errors while moving // data. // - // * Valid value SUCCEEDED indicates the data was successfully moved. + // * Valid value SUCCEEDED indicates the data was successfully moved. // - // - // * Valid value OUTDATED indicates the data is not the most recent. + // * + // Valid value OUTDATED indicates the data is not the most recent. UpdateStatus []types.AggregatedSourceStatusType } diff --git a/service/configservice/api_op_GetDiscoveredResourceCounts.go b/service/configservice/api_op_GetDiscoveredResourceCounts.go index fdbafe55533..8071fdff450 100644 --- a/service/configservice/api_op_GetDiscoveredResourceCounts.go +++ b/service/configservice/api_op_GetDiscoveredResourceCounts.go @@ -15,40 +15,39 @@ import ( // number of resources that AWS Config is recording in this region for your AWS // account. Example // -// * AWS Config is recording three resource types in the US -// East (Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3 +// * AWS Config is recording three resource types in the US East +// (Ohio) Region for your account: 25 EC2 instances, 20 IAM users, and 15 S3 // buckets. // -// * You make a call to the GetDiscoveredResourceCounts action and +// * You make a call to the GetDiscoveredResourceCounts action and // specify that you want all resource types. // -// * AWS Config returns the +// * AWS Config returns the // following: // -// * The resource types (EC2 instances, IAM users, and S3 -// buckets). +// * The resource types (EC2 instances, IAM users, and S3 buckets). // -// * The number of each resource type (25, 20, and 15). +// * +// The number of each resource type (25, 20, and 15). // +// * The total number of all +// resources (60). // -// * The total number of all resources (60). -// -// The response is paginated. By -// default, AWS Config lists 100 ResourceCount objects on each page. You can -// customize this number with the limit parameter. The response includes a -// nextToken string. To get the next page of results, run the request again and -// specify the string for the nextToken parameter. If you make a call to the -// GetDiscoveredResourceCounts action, you might not immediately receive resource -// counts in the following situations: -// -// * You are a new AWS Config customer. +// The response is paginated. By default, AWS Config lists 100 +// ResourceCount objects on each page. You can customize this number with the limit +// parameter. The response includes a nextToken string. To get the next page of +// results, run the request again and specify the string for the nextToken +// parameter. If you make a call to the GetDiscoveredResourceCounts action, you +// might not immediately receive resource counts in the following situations: // +// * +// You are a new AWS Config customer. // // * You just enabled resource recording. // -// It might take a few minutes for AWS -// Config to record and count your resources. Wait a few minutes and then retry the -// GetDiscoveredResourceCounts action. +// It +// might take a few minutes for AWS Config to record and count your resources. Wait +// a few minutes and then retry the GetDiscoveredResourceCounts action. func (c *Client) GetDiscoveredResourceCounts(ctx context.Context, params *GetDiscoveredResourceCountsInput, optFns ...func(*Options)) (*GetDiscoveredResourceCountsOutput, error) { if params == nil { params = &GetDiscoveredResourceCountsInput{} @@ -100,16 +99,16 @@ type GetDiscoveredResourceCountsOutput struct { // your account. If you specify resource types in the request, AWS Config returns // only the total number of resources for those resource types. Example // - // * AWS + // * AWS // Config is recording three resource types in the US East (Ohio) Region for your // account: 25 EC2 instances, 20 IAM users, and 15 S3 buckets, for a total of 60 // resources. // - // * You make a call to the GetDiscoveredResourceCounts action and + // * You make a call to the GetDiscoveredResourceCounts action and // specify the resource type, "AWS::EC2::Instances", in the request. // - // * AWS - // Config returns 25 for totalDiscoveredResources. + // * AWS Config + // returns 25 for totalDiscoveredResources. TotalDiscoveredResources *int64 // Metadata pertaining to the operation's result. diff --git a/service/configservice/api_op_StartConfigRulesEvaluation.go b/service/configservice/api_op_StartConfigRulesEvaluation.go index ee8ac93b88a..143cc081623 100644 --- a/service/configservice/api_op_StartConfigRulesEvaluation.go +++ b/service/configservice/api_op_StartConfigRulesEvaluation.go @@ -25,20 +25,20 @@ import ( // API is useful if you want to run on-demand evaluations, such as the following // example: // -// * You have a custom rule that evaluates your IAM resources every -// 24 hours. +// * You have a custom rule that evaluates your IAM resources every 24 +// hours. // -// * You update your Lambda function to add additional conditions to -// your rule. +// * You update your Lambda function to add additional conditions to your +// rule. // -// * Instead of waiting for the next periodic evaluation, you call -// the StartConfigRulesEvaluation API. +// * Instead of waiting for the next periodic evaluation, you call the +// StartConfigRulesEvaluation API. // -// * AWS Config invokes your Lambda -// function and evaluates your IAM resources. +// * AWS Config invokes your Lambda function and +// evaluates your IAM resources. // -// * Your custom rule will still -// run periodic evaluations every 24 hours. +// * Your custom rule will still run periodic +// evaluations every 24 hours. func (c *Client) StartConfigRulesEvaluation(ctx context.Context, params *StartConfigRulesEvaluationInput, optFns ...func(*Options)) (*StartConfigRulesEvaluationOutput, error) { if params == nil { params = &StartConfigRulesEvaluationInput{} diff --git a/service/configservice/types/enums.go b/service/configservice/types/enums.go index 08b9442a07b..62f6c258f05 100644 --- a/service/configservice/types/enums.go +++ b/service/configservice/types/enums.go @@ -62,10 +62,10 @@ type ComplianceType string // Enum values for ComplianceType const ( - ComplianceTypeCompliant ComplianceType = "COMPLIANT" - ComplianceTypeNon_compliant ComplianceType = "NON_COMPLIANT" - ComplianceTypeNot_applicable ComplianceType = "NOT_APPLICABLE" - ComplianceTypeInsufficient_data ComplianceType = "INSUFFICIENT_DATA" + ComplianceTypeCompliant ComplianceType = "COMPLIANT" + ComplianceTypeNonCompliant ComplianceType = "NON_COMPLIANT" + ComplianceTypeNotApplicable ComplianceType = "NOT_APPLICABLE" + ComplianceTypeInsufficientData ComplianceType = "INSUFFICIENT_DATA" ) // Values returns all known values for ComplianceType. Note that this can be @@ -84,8 +84,8 @@ type ConfigRuleComplianceSummaryGroupKey string // Enum values for ConfigRuleComplianceSummaryGroupKey const ( - ConfigRuleComplianceSummaryGroupKeyAccount_id ConfigRuleComplianceSummaryGroupKey = "ACCOUNT_ID" - ConfigRuleComplianceSummaryGroupKeyAws_region ConfigRuleComplianceSummaryGroupKey = "AWS_REGION" + ConfigRuleComplianceSummaryGroupKeyAccountId ConfigRuleComplianceSummaryGroupKey = "ACCOUNT_ID" + ConfigRuleComplianceSummaryGroupKeyAwsRegion ConfigRuleComplianceSummaryGroupKey = "AWS_REGION" ) // Values returns all known values for ConfigRuleComplianceSummaryGroupKey. Note @@ -103,10 +103,10 @@ type ConfigRuleState string // Enum values for ConfigRuleState const ( - ConfigRuleStateActive ConfigRuleState = "ACTIVE" - ConfigRuleStateDeleting ConfigRuleState = "DELETING" - ConfigRuleStateDeleting_results ConfigRuleState = "DELETING_RESULTS" - ConfigRuleStateEvaluating ConfigRuleState = "EVALUATING" + ConfigRuleStateActive ConfigRuleState = "ACTIVE" + ConfigRuleStateDeleting ConfigRuleState = "DELETING" + ConfigRuleStateDeletingResults ConfigRuleState = "DELETING_RESULTS" + ConfigRuleStateEvaluating ConfigRuleState = "EVALUATING" ) // Values returns all known values for ConfigRuleState. Note that this can be @@ -149,8 +149,8 @@ type ConformancePackComplianceType string // Enum values for ConformancePackComplianceType const ( - ConformancePackComplianceTypeCompliant ConformancePackComplianceType = "COMPLIANT" - ConformancePackComplianceTypeNon_compliant ConformancePackComplianceType = "NON_COMPLIANT" + ConformancePackComplianceTypeCompliant ConformancePackComplianceType = "COMPLIANT" + ConformancePackComplianceTypeNonCompliant ConformancePackComplianceType = "NON_COMPLIANT" ) // Values returns all known values for ConformancePackComplianceType. Note that @@ -168,11 +168,11 @@ type ConformancePackState string // Enum values for ConformancePackState const ( - ConformancePackStateCreate_in_progress ConformancePackState = "CREATE_IN_PROGRESS" - ConformancePackStateCreate_complete ConformancePackState = "CREATE_COMPLETE" - ConformancePackStateCreate_failed ConformancePackState = "CREATE_FAILED" - ConformancePackStateDelete_in_progress ConformancePackState = "DELETE_IN_PROGRESS" - ConformancePackStateDelete_failed ConformancePackState = "DELETE_FAILED" + ConformancePackStateCreateInProgress ConformancePackState = "CREATE_IN_PROGRESS" + ConformancePackStateCreateComplete ConformancePackState = "CREATE_COMPLETE" + ConformancePackStateCreateFailed ConformancePackState = "CREATE_FAILED" + ConformancePackStateDeleteInProgress ConformancePackState = "DELETE_IN_PROGRESS" + ConformancePackStateDeleteFailed ConformancePackState = "DELETE_FAILED" ) // Values returns all known values for ConformancePackState. Note that this can be @@ -192,9 +192,9 @@ type DeliveryStatus string // Enum values for DeliveryStatus const ( - DeliveryStatusSuccess DeliveryStatus = "Success" - DeliveryStatusFailure DeliveryStatus = "Failure" - DeliveryStatusNot_applicable DeliveryStatus = "Not_Applicable" + DeliveryStatusSuccess DeliveryStatus = "Success" + DeliveryStatusFailure DeliveryStatus = "Failure" + DeliveryStatusNotApplicable DeliveryStatus = "Not_Applicable" ) // Values returns all known values for DeliveryStatus. Note that this can be @@ -212,7 +212,7 @@ type EventSource string // Enum values for EventSource const ( - EventSourceAws_config EventSource = "aws.config" + EventSourceAwsConfig EventSource = "aws.config" ) // Values returns all known values for EventSource. Note that this can be expanded @@ -228,11 +228,11 @@ type MaximumExecutionFrequency string // Enum values for MaximumExecutionFrequency const ( - MaximumExecutionFrequencyOne_hour MaximumExecutionFrequency = "One_Hour" - MaximumExecutionFrequencyThree_hours MaximumExecutionFrequency = "Three_Hours" - MaximumExecutionFrequencySix_hours MaximumExecutionFrequency = "Six_Hours" - MaximumExecutionFrequencyTwelve_hours MaximumExecutionFrequency = "Twelve_Hours" - MaximumExecutionFrequencyTwentyfour_hours MaximumExecutionFrequency = "TwentyFour_Hours" + MaximumExecutionFrequencyOneHour MaximumExecutionFrequency = "One_Hour" + MaximumExecutionFrequencyThreeHours MaximumExecutionFrequency = "Three_Hours" + MaximumExecutionFrequencySixHours MaximumExecutionFrequency = "Six_Hours" + MaximumExecutionFrequencyTwelveHours MaximumExecutionFrequency = "Twelve_Hours" + MaximumExecutionFrequencyTwentyfourHours MaximumExecutionFrequency = "TwentyFour_Hours" ) // Values returns all known values for MaximumExecutionFrequency. Note that this @@ -252,15 +252,15 @@ type MemberAccountRuleStatus string // Enum values for MemberAccountRuleStatus const ( - MemberAccountRuleStatusCreate_successful MemberAccountRuleStatus = "CREATE_SUCCESSFUL" - MemberAccountRuleStatusCreate_in_progress MemberAccountRuleStatus = "CREATE_IN_PROGRESS" - MemberAccountRuleStatusCreate_failed MemberAccountRuleStatus = "CREATE_FAILED" - MemberAccountRuleStatusDelete_successful MemberAccountRuleStatus = "DELETE_SUCCESSFUL" - MemberAccountRuleStatusDelete_failed MemberAccountRuleStatus = "DELETE_FAILED" - MemberAccountRuleStatusDelete_in_progress MemberAccountRuleStatus = "DELETE_IN_PROGRESS" - MemberAccountRuleStatusUpdate_successful MemberAccountRuleStatus = "UPDATE_SUCCESSFUL" - MemberAccountRuleStatusUpdate_in_progress MemberAccountRuleStatus = "UPDATE_IN_PROGRESS" - MemberAccountRuleStatusUpdate_failed MemberAccountRuleStatus = "UPDATE_FAILED" + MemberAccountRuleStatusCreateSuccessful MemberAccountRuleStatus = "CREATE_SUCCESSFUL" + MemberAccountRuleStatusCreateInProgress MemberAccountRuleStatus = "CREATE_IN_PROGRESS" + MemberAccountRuleStatusCreateFailed MemberAccountRuleStatus = "CREATE_FAILED" + MemberAccountRuleStatusDeleteSuccessful MemberAccountRuleStatus = "DELETE_SUCCESSFUL" + MemberAccountRuleStatusDeleteFailed MemberAccountRuleStatus = "DELETE_FAILED" + MemberAccountRuleStatusDeleteInProgress MemberAccountRuleStatus = "DELETE_IN_PROGRESS" + MemberAccountRuleStatusUpdateSuccessful MemberAccountRuleStatus = "UPDATE_SUCCESSFUL" + MemberAccountRuleStatusUpdateInProgress MemberAccountRuleStatus = "UPDATE_IN_PROGRESS" + MemberAccountRuleStatusUpdateFailed MemberAccountRuleStatus = "UPDATE_FAILED" ) // Values returns all known values for MemberAccountRuleStatus. Note that this can @@ -306,9 +306,9 @@ type OrganizationConfigRuleTriggerType string // Enum values for OrganizationConfigRuleTriggerType const ( - OrganizationConfigRuleTriggerTypeConfiguration_item_change_notification OrganizationConfigRuleTriggerType = "ConfigurationItemChangeNotification" - OrganizationConfigRuleTriggerTypeOversized_configuration_item_change_notifcation OrganizationConfigRuleTriggerType = "OversizedConfigurationItemChangeNotification" - OrganizationConfigRuleTriggerTypeScheduled_notification OrganizationConfigRuleTriggerType = "ScheduledNotification" + OrganizationConfigRuleTriggerTypeConfigurationItemChangeNotification OrganizationConfigRuleTriggerType = "ConfigurationItemChangeNotification" + OrganizationConfigRuleTriggerTypeOversizedConfigurationItemChangeNotifcation OrganizationConfigRuleTriggerType = "OversizedConfigurationItemChangeNotification" + OrganizationConfigRuleTriggerTypeScheduledNotification OrganizationConfigRuleTriggerType = "ScheduledNotification" ) // Values returns all known values for OrganizationConfigRuleTriggerType. Note that @@ -327,15 +327,15 @@ type OrganizationResourceDetailedStatus string // Enum values for OrganizationResourceDetailedStatus const ( - OrganizationResourceDetailedStatusCreate_successful OrganizationResourceDetailedStatus = "CREATE_SUCCESSFUL" - OrganizationResourceDetailedStatusCreate_in_progress OrganizationResourceDetailedStatus = "CREATE_IN_PROGRESS" - OrganizationResourceDetailedStatusCreate_failed OrganizationResourceDetailedStatus = "CREATE_FAILED" - OrganizationResourceDetailedStatusDelete_successful OrganizationResourceDetailedStatus = "DELETE_SUCCESSFUL" - OrganizationResourceDetailedStatusDelete_failed OrganizationResourceDetailedStatus = "DELETE_FAILED" - OrganizationResourceDetailedStatusDelete_in_progress OrganizationResourceDetailedStatus = "DELETE_IN_PROGRESS" - OrganizationResourceDetailedStatusUpdate_successful OrganizationResourceDetailedStatus = "UPDATE_SUCCESSFUL" - OrganizationResourceDetailedStatusUpdate_in_progress OrganizationResourceDetailedStatus = "UPDATE_IN_PROGRESS" - OrganizationResourceDetailedStatusUpdate_failed OrganizationResourceDetailedStatus = "UPDATE_FAILED" + OrganizationResourceDetailedStatusCreateSuccessful OrganizationResourceDetailedStatus = "CREATE_SUCCESSFUL" + OrganizationResourceDetailedStatusCreateInProgress OrganizationResourceDetailedStatus = "CREATE_IN_PROGRESS" + OrganizationResourceDetailedStatusCreateFailed OrganizationResourceDetailedStatus = "CREATE_FAILED" + OrganizationResourceDetailedStatusDeleteSuccessful OrganizationResourceDetailedStatus = "DELETE_SUCCESSFUL" + OrganizationResourceDetailedStatusDeleteFailed OrganizationResourceDetailedStatus = "DELETE_FAILED" + OrganizationResourceDetailedStatusDeleteInProgress OrganizationResourceDetailedStatus = "DELETE_IN_PROGRESS" + OrganizationResourceDetailedStatusUpdateSuccessful OrganizationResourceDetailedStatus = "UPDATE_SUCCESSFUL" + OrganizationResourceDetailedStatusUpdateInProgress OrganizationResourceDetailedStatus = "UPDATE_IN_PROGRESS" + OrganizationResourceDetailedStatusUpdateFailed OrganizationResourceDetailedStatus = "UPDATE_FAILED" ) // Values returns all known values for OrganizationResourceDetailedStatus. Note @@ -360,15 +360,15 @@ type OrganizationResourceStatus string // Enum values for OrganizationResourceStatus const ( - OrganizationResourceStatusCreate_successful OrganizationResourceStatus = "CREATE_SUCCESSFUL" - OrganizationResourceStatusCreate_in_progress OrganizationResourceStatus = "CREATE_IN_PROGRESS" - OrganizationResourceStatusCreate_failed OrganizationResourceStatus = "CREATE_FAILED" - OrganizationResourceStatusDelete_successful OrganizationResourceStatus = "DELETE_SUCCESSFUL" - OrganizationResourceStatusDelete_failed OrganizationResourceStatus = "DELETE_FAILED" - OrganizationResourceStatusDelete_in_progress OrganizationResourceStatus = "DELETE_IN_PROGRESS" - OrganizationResourceStatusUpdate_successful OrganizationResourceStatus = "UPDATE_SUCCESSFUL" - OrganizationResourceStatusUpdate_in_progress OrganizationResourceStatus = "UPDATE_IN_PROGRESS" - OrganizationResourceStatusUpdate_failed OrganizationResourceStatus = "UPDATE_FAILED" + OrganizationResourceStatusCreateSuccessful OrganizationResourceStatus = "CREATE_SUCCESSFUL" + OrganizationResourceStatusCreateInProgress OrganizationResourceStatus = "CREATE_IN_PROGRESS" + OrganizationResourceStatusCreateFailed OrganizationResourceStatus = "CREATE_FAILED" + OrganizationResourceStatusDeleteSuccessful OrganizationResourceStatus = "DELETE_SUCCESSFUL" + OrganizationResourceStatusDeleteFailed OrganizationResourceStatus = "DELETE_FAILED" + OrganizationResourceStatusDeleteInProgress OrganizationResourceStatus = "DELETE_IN_PROGRESS" + OrganizationResourceStatusUpdateSuccessful OrganizationResourceStatus = "UPDATE_SUCCESSFUL" + OrganizationResourceStatusUpdateInProgress OrganizationResourceStatus = "UPDATE_IN_PROGRESS" + OrganizationResourceStatusUpdateFailed OrganizationResourceStatus = "UPDATE_FAILED" ) // Values returns all known values for OrganizationResourceStatus. Note that this @@ -392,15 +392,15 @@ type OrganizationRuleStatus string // Enum values for OrganizationRuleStatus const ( - OrganizationRuleStatusCreate_successful OrganizationRuleStatus = "CREATE_SUCCESSFUL" - OrganizationRuleStatusCreate_in_progress OrganizationRuleStatus = "CREATE_IN_PROGRESS" - OrganizationRuleStatusCreate_failed OrganizationRuleStatus = "CREATE_FAILED" - OrganizationRuleStatusDelete_successful OrganizationRuleStatus = "DELETE_SUCCESSFUL" - OrganizationRuleStatusDelete_failed OrganizationRuleStatus = "DELETE_FAILED" - OrganizationRuleStatusDelete_in_progress OrganizationRuleStatus = "DELETE_IN_PROGRESS" - OrganizationRuleStatusUpdate_successful OrganizationRuleStatus = "UPDATE_SUCCESSFUL" - OrganizationRuleStatusUpdate_in_progress OrganizationRuleStatus = "UPDATE_IN_PROGRESS" - OrganizationRuleStatusUpdate_failed OrganizationRuleStatus = "UPDATE_FAILED" + OrganizationRuleStatusCreateSuccessful OrganizationRuleStatus = "CREATE_SUCCESSFUL" + OrganizationRuleStatusCreateInProgress OrganizationRuleStatus = "CREATE_IN_PROGRESS" + OrganizationRuleStatusCreateFailed OrganizationRuleStatus = "CREATE_FAILED" + OrganizationRuleStatusDeleteSuccessful OrganizationRuleStatus = "DELETE_SUCCESSFUL" + OrganizationRuleStatusDeleteFailed OrganizationRuleStatus = "DELETE_FAILED" + OrganizationRuleStatusDeleteInProgress OrganizationRuleStatus = "DELETE_IN_PROGRESS" + OrganizationRuleStatusUpdateSuccessful OrganizationRuleStatus = "UPDATE_SUCCESSFUL" + OrganizationRuleStatusUpdateInProgress OrganizationRuleStatus = "UPDATE_IN_PROGRESS" + OrganizationRuleStatusUpdateFailed OrganizationRuleStatus = "UPDATE_FAILED" ) // Values returns all known values for OrganizationRuleStatus. Note that this can @@ -424,8 +424,8 @@ type Owner string // Enum values for Owner const ( - OwnerCustom_lambda Owner = "CUSTOM_LAMBDA" - OwnerAws Owner = "AWS" + OwnerCustomLambda Owner = "CUSTOM_LAMBDA" + OwnerAws Owner = "AWS" ) // Values returns all known values for Owner. Note that this can be expanded in the @@ -462,10 +462,10 @@ type RemediationExecutionState string // Enum values for RemediationExecutionState const ( - RemediationExecutionStateQueued RemediationExecutionState = "QUEUED" - RemediationExecutionStateIn_progress RemediationExecutionState = "IN_PROGRESS" - RemediationExecutionStateSucceeded RemediationExecutionState = "SUCCEEDED" - RemediationExecutionStateFailed RemediationExecutionState = "FAILED" + RemediationExecutionStateQueued RemediationExecutionState = "QUEUED" + RemediationExecutionStateInProgress RemediationExecutionState = "IN_PROGRESS" + RemediationExecutionStateSucceeded RemediationExecutionState = "SUCCEEDED" + RemediationExecutionStateFailed RemediationExecutionState = "FAILED" ) // Values returns all known values for RemediationExecutionState. Note that this @@ -505,7 +505,7 @@ type RemediationTargetType string // Enum values for RemediationTargetType const ( - RemediationTargetTypeSsm_document RemediationTargetType = "SSM_DOCUMENT" + RemediationTargetTypeSsmDocument RemediationTargetType = "SSM_DOCUMENT" ) // Values returns all known values for RemediationTargetType. Note that this can be @@ -521,9 +521,9 @@ type ResourceCountGroupKey string // Enum values for ResourceCountGroupKey const ( - ResourceCountGroupKeyResource_type ResourceCountGroupKey = "RESOURCE_TYPE" - ResourceCountGroupKeyAccount_id ResourceCountGroupKey = "ACCOUNT_ID" - ResourceCountGroupKeyAws_region ResourceCountGroupKey = "AWS_REGION" + ResourceCountGroupKeyResourceType ResourceCountGroupKey = "RESOURCE_TYPE" + ResourceCountGroupKeyAccountId ResourceCountGroupKey = "ACCOUNT_ID" + ResourceCountGroupKeyAwsRegion ResourceCountGroupKey = "AWS_REGION" ) // Values returns all known values for ResourceCountGroupKey. Note that this can be @@ -741,7 +741,7 @@ type ResourceValueType string // Enum values for ResourceValueType const ( - ResourceValueTypeResource_id ResourceValueType = "RESOURCE_ID" + ResourceValueTypeResourceId ResourceValueType = "RESOURCE_ID" ) // Values returns all known values for ResourceValueType. Note that this can be diff --git a/service/configservice/types/errors.go b/service/configservice/types/errors.go index 7fa2157cb4b..be6769d2107 100644 --- a/service/configservice/types/errors.go +++ b/service/configservice/types/errors.go @@ -51,26 +51,26 @@ func (e *InsufficientDeliveryPolicyException) ErrorFault() smithy.ErrorFault { // Indicates one of the following errors: // -// * For PutConfigRule, the rule cannot -// be created because the IAM role assigned to AWS Config lacks permissions to -// perform the config:Put* action. +// * For PutConfigRule, the rule cannot be +// created because the IAM role assigned to AWS Config lacks permissions to perform +// the config:Put* action. // -// * For PutConfigRule, the AWS Lambda -// function cannot be invoked. Check the function ARN, and check the function's -// permissions. +// * For PutConfigRule, the AWS Lambda function cannot be +// invoked. Check the function ARN, and check the function's permissions. // -// * For PutOrganizationConfigRule, organization config rule -// cannot be created because you do not have permissions to call IAM GetRole action -// or create a service linked role. +// * For +// PutOrganizationConfigRule, organization config rule cannot be created because +// you do not have permissions to call IAM GetRole action or create a service +// linked role. // -// * For PutConformancePack and -// PutOrganizationConformancePack, a conformance pack cannot be created because you -// do not have permissions: +// * For PutConformancePack and PutOrganizationConformancePack, a +// conformance pack cannot be created because you do not have permissions: // -// * To call IAM GetRole action or create a -// service linked role. +// * To +// call IAM GetRole action or create a service linked role. // -// * To read Amazon S3 bucket. +// * To read Amazon S3 +// bucket. type InsufficientPermissionsException struct { Message *string } @@ -951,32 +951,31 @@ func (e *RemediationInProgressException) ErrorFault() smithy.ErrorFault { return // You see this exception in the following cases: // -// * For DeleteConfigRule, AWS +// * For DeleteConfigRule, AWS // Config is deleting this rule. Try your request again later. // -// * For +// * For // DeleteConfigRule, the rule is deleting your evaluation results. Try your request // again later. // -// * For DeleteConfigRule, a remediation action is associated -// with the rule and AWS Config cannot delete this rule. Delete the remediation -// action associated with the rule before deleting the rule and try your request -// again later. +// * For DeleteConfigRule, a remediation action is associated with +// the rule and AWS Config cannot delete this rule. Delete the remediation action +// associated with the rule before deleting the rule and try your request again +// later. // -// * For PutConfigOrganizationRule, organization config rule -// deletion is in progress. Try your request again later. +// * For PutConfigOrganizationRule, organization config rule deletion is in +// progress. Try your request again later. // -// * For -// DeleteOrganizationConfigRule, organization config rule creation is in progress. -// Try your request again later. +// * For DeleteOrganizationConfigRule, +// organization config rule creation is in progress. Try your request again +// later. // -// * For PutConformancePack and -// PutOrganizationConformancePack, a conformance pack creation, update, and -// deletion is in progress. Try your request again later. +// * For PutConformancePack and PutOrganizationConformancePack, a +// conformance pack creation, update, and deletion is in progress. Try your request +// again later. // -// * For -// DeleteConformancePack, a conformance pack creation, update, and deletion is in -// progress. Try your request again later. +// * For DeleteConformancePack, a conformance pack creation, update, +// and deletion is in progress. Try your request again later. type ResourceInUseException struct { Message *string } diff --git a/service/configservice/types/types.go b/service/configservice/types/types.go index 11970da5ef4..cef03c8c89d 100644 --- a/service/configservice/types/types.go +++ b/service/configservice/types/types.go @@ -68,14 +68,13 @@ type AggregatedSourceStatus struct { // Filters the last updated status type. // - // * Valid value FAILED indicates errors + // * Valid value FAILED indicates errors // while moving data. // - // * Valid value SUCCEEDED indicates the data was - // successfully moved. + // * Valid value SUCCEEDED indicates the data was successfully + // moved. // - // * Valid value OUTDATED indicates the data is not the - // most recent. + // * Valid value OUTDATED indicates the data is not the most recent. LastUpdateStatus AggregatedSourceStatusType // The time of the last update. @@ -187,19 +186,19 @@ type BaseConfigurationItem struct { // The configuration item status. The valid values are: // - // * OK – The resource + // * OK – The resource // configuration has been updated // - // * ResourceDiscovered – The resource was - // newly discovered + // * ResourceDiscovered – The resource was newly + // discovered // - // * ResourceNotRecorded – The resource was discovered but - // its configuration was not recorded since the recorder excludes the recording of + // * ResourceNotRecorded – The resource was discovered but its + // configuration was not recorded since the recorder excludes the recording of // resources of this type // - // * ResourceDeleted – The resource was deleted + // * ResourceDeleted – The resource was deleted // - // * + // * // ResourceDeletedNotRecorded – The resource was deleted but its configuration was // not recorded since the recorder excludes the recording of resources of this // type @@ -401,11 +400,11 @@ type ConfigRule struct { // The maximum frequency with which AWS Config runs evaluations for a rule. You can // specify a value for MaximumExecutionFrequency when: // - // * You are using an AWS + // * You are using an AWS // managed rule that is triggered at a periodic frequency. // - // * Your custom rule - // is triggered when AWS Config delivers the configuration snapshot. For more + // * Your custom rule is + // triggered when AWS Config delivers the configuration snapshot. For more // information, see ConfigSnapshotDeliveryProperties. // // By default, rules with a @@ -472,11 +471,11 @@ type ConfigRuleEvaluationStatus struct { // Indicates whether AWS Config has evaluated your resources against the rule at // least once. // - // * true - AWS Config has evaluated your AWS resources against - // the rule at least once. + // * true - AWS Config has evaluated your AWS resources against the + // rule at least once. // - // * false - AWS Config has not once finished - // evaluating your AWS resources against the rule. + // * false - AWS Config has not once finished evaluating your + // AWS resources against the rule. FirstEvaluationStarted *bool // The time that you last turned off the AWS Config rule. @@ -511,39 +510,38 @@ type ConfigRuleEvaluationStatus struct { // configuration snapshot is set by one of two values, depending on which is less // frequent: // -// * The value for the deliveryFrequency parameter within the -// delivery channel configuration, which sets how often AWS Config delivers -// configuration snapshots. This value also sets how often AWS Config invokes -// evaluations for AWS Config rules. +// * The value for the deliveryFrequency parameter within the delivery +// channel configuration, which sets how often AWS Config delivers configuration +// snapshots. This value also sets how often AWS Config invokes evaluations for AWS +// Config rules. // -// * The value for the -// MaximumExecutionFrequency parameter, which sets the maximum frequency with which -// AWS Config invokes evaluations for the rule. For more information, see -// ConfigRule. +// * The value for the MaximumExecutionFrequency parameter, which +// sets the maximum frequency with which AWS Config invokes evaluations for the +// rule. For more information, see ConfigRule. // -// If the deliveryFrequency value is less frequent than the -// MaximumExecutionFrequency value for a rule, AWS Config invokes the rule only as -// often as the deliveryFrequency value. +// If the deliveryFrequency value is +// less frequent than the MaximumExecutionFrequency value for a rule, AWS Config +// invokes the rule only as often as the deliveryFrequency value. // -// * For example, you want your rule to -// run evaluations when AWS Config delivers the configuration snapshot. +// * For example, +// you want your rule to run evaluations when AWS Config delivers the configuration +// snapshot. // -// * You -// specify the MaximumExecutionFrequency value for Six_Hours. +// * You specify the MaximumExecutionFrequency value for Six_Hours. // -// * You then -// specify the delivery channel deliveryFrequency value for TwentyFour_Hours. +// * +// You then specify the delivery channel deliveryFrequency value for +// TwentyFour_Hours. // +// * Because the value for deliveryFrequency is less frequent +// than MaximumExecutionFrequency, AWS Config invokes evaluations for the rule +// every 24 hours. // -// * Because the value for deliveryFrequency is less frequent than -// MaximumExecutionFrequency, AWS Config invokes evaluations for the rule every 24 -// hours. -// -// You should set the MaximumExecutionFrequency value to be at least as -// frequent as the deliveryFrequency value. You can view the deliveryFrequency -// value by using the DescribeDeliveryChannnels action. To update the -// deliveryFrequency with which AWS Config delivers your configuration snapshots, -// use the PutDeliveryChannel action. +// You should set the MaximumExecutionFrequency value to be at +// least as frequent as the deliveryFrequency value. You can view the +// deliveryFrequency value by using the DescribeDeliveryChannnels action. To update +// the deliveryFrequency with which AWS Config delivers your configuration +// snapshots, use the PutDeliveryChannel action. type ConfigSnapshotDeliveryProperties struct { // The frequency with which AWS Config delivers configuration snapshots. @@ -625,19 +623,19 @@ type ConfigurationItem struct { // The configuration item status. The valid values are: // - // * OK – The resource + // * OK – The resource // configuration has been updated // - // * ResourceDiscovered – The resource was - // newly discovered + // * ResourceDiscovered – The resource was newly + // discovered // - // * ResourceNotRecorded – The resource was discovered but - // its configuration was not recorded since the recorder excludes the recording of + // * ResourceNotRecorded – The resource was discovered but its + // configuration was not recorded since the recorder excludes the recording of // resources of this type // - // * ResourceDeleted – The resource was deleted + // * ResourceDeleted – The resource was deleted // - // * + // * // ResourceDeletedNotRecorded – The resource was deleted but its configuration was // not recorded since the recorder excludes the recording of resources of this // type @@ -893,20 +891,20 @@ type ConformancePackStatusDetail struct { // Indicates deployment status of conformance pack. AWS Config sets the state of // the conformance pack to: // - // * CREATE_IN_PROGRESS when a conformance pack - // creation is in progress for an account. + // * CREATE_IN_PROGRESS when a conformance pack creation + // is in progress for an account. // - // * CREATE_COMPLETE when a - // conformance pack has been successfully created in your account. + // * CREATE_COMPLETE when a conformance pack has + // been successfully created in your account. // - // * - // CREATE_FAILED when a conformance pack creation failed in your account. + // * CREATE_FAILED when a conformance + // pack creation failed in your account. // - // * - // DELETE_IN_PROGRESS when a conformance pack deletion is in progress. + // * DELETE_IN_PROGRESS when a conformance + // pack deletion is in progress. // - // * - // DELETE_FAILED when a conformance pack deletion failed in your account. + // * DELETE_FAILED when a conformance pack deletion + // failed in your account. // // This member is required. ConformancePackState ConformancePackState @@ -1162,32 +1160,32 @@ type MemberAccountStatus struct { // config-multiaccountsetup.amazonaws.com. AWS Config sets the state of the rule // to: // - // * CREATE_SUCCESSFUL when config rule has been created in the member + // * CREATE_SUCCESSFUL when config rule has been created in the member // account. // - // * CREATE_IN_PROGRESS when config rule is being created in the - // member account. + // * CREATE_IN_PROGRESS when config rule is being created in the member + // account. // - // * CREATE_FAILED when config rule creation has failed in the - // member account. + // * CREATE_FAILED when config rule creation has failed in the member + // account. // - // * DELETE_FAILED when config rule deletion has failed in the - // member account. + // * DELETE_FAILED when config rule deletion has failed in the member + // account. // - // * DELETE_IN_PROGRESS when config rule is being deleted in - // the member account. + // * DELETE_IN_PROGRESS when config rule is being deleted in the member + // account. // - // * DELETE_SUCCESSFUL when config rule has been deleted - // in the member account. + // * DELETE_SUCCESSFUL when config rule has been deleted in the member + // account. // - // * UPDATE_SUCCESSFUL when config rule has been - // updated in the member account. + // * UPDATE_SUCCESSFUL when config rule has been updated in the member + // account. // - // * UPDATE_IN_PROGRESS when config rule is - // being updated in the member account. + // * UPDATE_IN_PROGRESS when config rule is being updated in the member + // account. // - // * UPDATE_FAILED when config rule - // deletion has failed in the member account. + // * UPDATE_FAILED when config rule deletion has failed in the member + // account. // // This member is required. MemberAccountRuleStatus MemberAccountRuleStatus @@ -1267,35 +1265,35 @@ type OrganizationConfigRuleStatus struct { // config-multiaccountsetup.amazonaws.com. AWS Config sets the state of the rule // to: // - // * CREATE_SUCCESSFUL when an organization config rule has been - // successfully created in all the member accounts. - // - // * CREATE_IN_PROGRESS when - // an organization config rule creation is in progress. + // * CREATE_SUCCESSFUL when an organization config rule has been successfully + // created in all the member accounts. // - // * CREATE_FAILED when - // an organization config rule creation failed in one or more member accounts - // within that organization. + // * CREATE_IN_PROGRESS when an organization + // config rule creation is in progress. // - // * DELETE_FAILED when an organization config rule - // deletion failed in one or more member accounts within that organization. + // * CREATE_FAILED when an organization + // config rule creation failed in one or more member accounts within that + // organization. // - // * - // DELETE_IN_PROGRESS when an organization config rule deletion is in progress. + // * DELETE_FAILED when an organization config rule deletion failed + // in one or more member accounts within that organization. // + // * DELETE_IN_PROGRESS + // when an organization config rule deletion is in progress. // - // * DELETE_SUCCESSFUL when an organization config rule has been successfully - // deleted from all the member accounts. + // * DELETE_SUCCESSFUL + // when an organization config rule has been successfully deleted from all the + // member accounts. // - // * UPDATE_SUCCESSFUL when an - // organization config rule has been successfully updated in all the member - // accounts. + // * UPDATE_SUCCESSFUL when an organization config rule has been + // successfully updated in all the member accounts. // - // * UPDATE_IN_PROGRESS when an organization config rule update is - // in progress. + // * UPDATE_IN_PROGRESS when an + // organization config rule update is in progress. // - // * UPDATE_FAILED when an organization config rule update failed - // in one or more member accounts within that organization. + // * UPDATE_FAILED when an + // organization config rule update failed in one or more member accounts within + // that organization. // // This member is required. OrganizationRuleStatus OrganizationRuleStatus @@ -1369,33 +1367,33 @@ type OrganizationConformancePackDetailedStatus struct { // access for config-multiaccountsetup.amazonaws.com. AWS Config sets the state of // the conformance pack to: // - // * CREATE_SUCCESSFUL when conformance pack has been + // * CREATE_SUCCESSFUL when conformance pack has been // created in the member account. // - // * CREATE_IN_PROGRESS when conformance pack - // is being created in the member account. + // * CREATE_IN_PROGRESS when conformance pack is + // being created in the member account. // - // * CREATE_FAILED when conformance - // pack creation has failed in the member account. + // * CREATE_FAILED when conformance pack + // creation has failed in the member account. // - // * DELETE_FAILED when - // conformance pack deletion has failed in the member account. + // * DELETE_FAILED when conformance + // pack deletion has failed in the member account. // - // * - // DELETE_IN_PROGRESS when conformance pack is being deleted in the member - // account. + // * DELETE_IN_PROGRESS when + // conformance pack is being deleted in the member account. // - // * DELETE_SUCCESSFUL when conformance pack has been deleted in the - // member account. + // * DELETE_SUCCESSFUL + // when conformance pack has been deleted in the member account. // - // * UPDATE_SUCCESSFUL when conformance pack has been updated - // in the member account. + // * + // UPDATE_SUCCESSFUL when conformance pack has been updated in the member + // account. // - // * UPDATE_IN_PROGRESS when conformance pack is being - // updated in the member account. + // * UPDATE_IN_PROGRESS when conformance pack is being updated in the + // member account. // - // * UPDATE_FAILED when conformance pack - // deletion has failed in the member account. + // * UPDATE_FAILED when conformance pack deletion has failed in + // the member account. // // This member is required. Status OrganizationResourceDetailedStatus @@ -1431,36 +1429,36 @@ type OrganizationConformancePackStatus struct { // access for config-multiaccountsetup.amazonaws.com. AWS Config sets the state of // the conformance pack to: // - // * CREATE_SUCCESSFUL when an organization - // conformance pack has been successfully created in all the member accounts. + // * CREATE_SUCCESSFUL when an organization conformance + // pack has been successfully created in all the member accounts. // - // - // * CREATE_IN_PROGRESS when an organization conformance pack creation is in + // * + // CREATE_IN_PROGRESS when an organization conformance pack creation is in // progress. // - // * CREATE_FAILED when an organization conformance pack creation - // failed in one or more member accounts within that organization. + // * CREATE_FAILED when an organization conformance pack creation failed + // in one or more member accounts within that organization. // - // * - // DELETE_FAILED when an organization conformance pack deletion failed in one or - // more member accounts within that organization. + // * DELETE_FAILED when + // an organization conformance pack deletion failed in one or more member accounts + // within that organization. // - // * DELETE_IN_PROGRESS when an - // organization conformance pack deletion is in progress. + // * DELETE_IN_PROGRESS when an organization conformance + // pack deletion is in progress. // - // * DELETE_SUCCESSFUL - // when an organization conformance pack has been successfully deleted from all the - // member accounts. + // * DELETE_SUCCESSFUL when an organization + // conformance pack has been successfully deleted from all the member accounts. // - // * UPDATE_SUCCESSFUL when an organization conformance pack - // has been successfully updated in all the member accounts. + // * + // UPDATE_SUCCESSFUL when an organization conformance pack has been successfully + // updated in all the member accounts. // - // * - // UPDATE_IN_PROGRESS when an organization conformance pack update is in - // progress. + // * UPDATE_IN_PROGRESS when an organization + // conformance pack update is in progress. // - // * UPDATE_FAILED when an organization conformance pack update - // failed in one or more member accounts within that organization. + // * UPDATE_FAILED when an organization + // conformance pack update failed in one or more member accounts within that + // organization. // // This member is required. Status OrganizationResourceStatus @@ -1492,17 +1490,17 @@ type OrganizationCustomRuleMetadata struct { // The type of notification that triggers AWS Config to run an evaluation for a // rule. You can specify the following notification types: // - // * + // * // ConfigurationItemChangeNotification - Triggers an evaluation when AWS Config // delivers a configuration item as a result of a resource change. // - // * + // * // OversizedConfigurationItemChangeNotification - Triggers an evaluation when AWS // Config delivers an oversized configuration item. AWS Config may generate this // notification type when a resource changes and the notification exceeds the // maximum size allowed by Amazon SNS. // - // * ScheduledNotification - Triggers a + // * ScheduledNotification - Triggers a // periodic evaluation at the frequency specified for MaximumExecutionFrequency. // // This member is required. @@ -1595,33 +1593,33 @@ type OrganizationResourceDetailedStatusFilters struct { // access for config-multiaccountsetup.amazonaws.com. AWS Config sets the state of // the conformance pack to: // - // * CREATE_SUCCESSFUL when conformance pack has been + // * CREATE_SUCCESSFUL when conformance pack has been // created in the member account. // - // * CREATE_IN_PROGRESS when conformance pack - // is being created in the member account. + // * CREATE_IN_PROGRESS when conformance pack is + // being created in the member account. // - // * CREATE_FAILED when conformance - // pack creation has failed in the member account. + // * CREATE_FAILED when conformance pack + // creation has failed in the member account. // - // * DELETE_FAILED when - // conformance pack deletion has failed in the member account. + // * DELETE_FAILED when conformance + // pack deletion has failed in the member account. // - // * - // DELETE_IN_PROGRESS when conformance pack is being deleted in the member - // account. + // * DELETE_IN_PROGRESS when + // conformance pack is being deleted in the member account. // - // * DELETE_SUCCESSFUL when conformance pack has been deleted in the - // member account. + // * DELETE_SUCCESSFUL + // when conformance pack has been deleted in the member account. // - // * UPDATE_SUCCESSFUL when conformance pack has been updated - // in the member account. + // * + // UPDATE_SUCCESSFUL when conformance pack has been updated in the member + // account. // - // * UPDATE_IN_PROGRESS when conformance pack is being - // updated in the member account. + // * UPDATE_IN_PROGRESS when conformance pack is being updated in the + // member account. // - // * UPDATE_FAILED when conformance pack - // deletion has failed in the member account. + // * UPDATE_FAILED when conformance pack deletion has failed in + // the member account. Status OrganizationResourceDetailedStatus } @@ -2031,25 +2029,25 @@ type SourceDetail struct { // The type of notification that triggers AWS Config to run an evaluation for a // rule. You can specify the following notification types: // - // * + // * // ConfigurationItemChangeNotification - Triggers an evaluation when AWS Config // delivers a configuration item as a result of a resource change. // - // * + // * // OversizedConfigurationItemChangeNotification - Triggers an evaluation when AWS // Config delivers an oversized configuration item. AWS Config may generate this // notification type when a resource changes and the notification exceeds the // maximum size allowed by Amazon SNS. // - // * ScheduledNotification - Triggers a + // * ScheduledNotification - Triggers a // periodic evaluation at the frequency specified for MaximumExecutionFrequency. // + // * + // ConfigurationSnapshotDeliveryCompleted - Triggers a periodic evaluation when AWS + // Config delivers a configuration snapshot. // - // * ConfigurationSnapshotDeliveryCompleted - Triggers a periodic evaluation when - // AWS Config delivers a configuration snapshot. - // - // If you want your custom rule to - // be triggered by configuration changes, specify two SourceDetail objects, one for + // If you want your custom rule to be + // triggered by configuration changes, specify two SourceDetail objects, one for // ConfigurationItemChangeNotification and one for // OversizedConfigurationItemChangeNotification. MessageType MessageType @@ -2097,32 +2095,32 @@ type StatusDetailFilters struct { // config-multiaccountsetup.amazonaws.com. AWS Config sets the state of the rule // to: // - // * CREATE_SUCCESSFUL when config rule has been created in the member + // * CREATE_SUCCESSFUL when config rule has been created in the member // account. // - // * CREATE_IN_PROGRESS when config rule is being created in the - // member account. + // * CREATE_IN_PROGRESS when config rule is being created in the member + // account. // - // * CREATE_FAILED when config rule creation has failed in the - // member account. + // * CREATE_FAILED when config rule creation has failed in the member + // account. // - // * DELETE_FAILED when config rule deletion has failed in the - // member account. + // * DELETE_FAILED when config rule deletion has failed in the member + // account. // - // * DELETE_IN_PROGRESS when config rule is being deleted in - // the member account. + // * DELETE_IN_PROGRESS when config rule is being deleted in the member + // account. // - // * DELETE_SUCCESSFUL when config rule has been deleted - // in the member account. + // * DELETE_SUCCESSFUL when config rule has been deleted in the member + // account. // - // * UPDATE_SUCCESSFUL when config rule has been - // updated in the member account. + // * UPDATE_SUCCESSFUL when config rule has been updated in the member + // account. // - // * UPDATE_IN_PROGRESS when config rule is - // being updated in the member account. + // * UPDATE_IN_PROGRESS when config rule is being updated in the member + // account. // - // * UPDATE_FAILED when config rule - // deletion has failed in the member account. + // * UPDATE_FAILED when config rule deletion has failed in the member + // account. MemberAccountRuleStatus MemberAccountRuleStatus } diff --git a/service/connect/api_op_StartChatContact.go b/service/connect/api_op_StartChatContact.go index 221224fb975..c3f4166645e 100644 --- a/service/connect/api_op_StartChatContact.go +++ b/service/connect/api_op_StartChatContact.go @@ -24,10 +24,10 @@ import ( // with WEBSOCKET and CONNECTION_CREDENTIALS. A 429 error occurs in two // situations: // -// * API rate limit is exceeded. API TPS throttling returns a +// * API rate limit is exceeded. API TPS throttling returns a // TooManyRequests exception from the API Gateway. // -// * The quota for concurrent +// * The quota for concurrent // active chats // (https://docs.aws.amazon.com/connect/latest/adminguide/amazon-connect-service-limits.html) // is exceeded. Active chat throttling returns a LimitExceededException. diff --git a/service/connect/types/enums.go b/service/connect/types/enums.go index 10c61f1494a..ab93cb8fa43 100644 --- a/service/connect/types/enums.go +++ b/service/connect/types/enums.go @@ -40,15 +40,15 @@ type ContactFlowType string // Enum values for ContactFlowType const ( - ContactFlowTypeContact_flow ContactFlowType = "CONTACT_FLOW" - ContactFlowTypeCustomer_queue ContactFlowType = "CUSTOMER_QUEUE" - ContactFlowTypeCustomer_hold ContactFlowType = "CUSTOMER_HOLD" - ContactFlowTypeCustomer_whisper ContactFlowType = "CUSTOMER_WHISPER" - ContactFlowTypeAgent_hold ContactFlowType = "AGENT_HOLD" - ContactFlowTypeAgent_whisper ContactFlowType = "AGENT_WHISPER" - ContactFlowTypeOutbound_whisper ContactFlowType = "OUTBOUND_WHISPER" - ContactFlowTypeAgent_transfer ContactFlowType = "AGENT_TRANSFER" - ContactFlowTypeQueue_transfer ContactFlowType = "QUEUE_TRANSFER" + ContactFlowTypeContactFlow ContactFlowType = "CONTACT_FLOW" + ContactFlowTypeCustomerQueue ContactFlowType = "CUSTOMER_QUEUE" + ContactFlowTypeCustomerHold ContactFlowType = "CUSTOMER_HOLD" + ContactFlowTypeCustomerWhisper ContactFlowType = "CUSTOMER_WHISPER" + ContactFlowTypeAgentHold ContactFlowType = "AGENT_HOLD" + ContactFlowTypeAgentWhisper ContactFlowType = "AGENT_WHISPER" + ContactFlowTypeOutboundWhisper ContactFlowType = "OUTBOUND_WHISPER" + ContactFlowTypeAgentTransfer ContactFlowType = "AGENT_TRANSFER" + ContactFlowTypeQueueTransfer ContactFlowType = "QUEUE_TRANSFER" ) // Values returns all known values for ContactFlowType. Note that this can be @@ -72,19 +72,19 @@ type CurrentMetricName string // Enum values for CurrentMetricName const ( - CurrentMetricNameAgents_online CurrentMetricName = "AGENTS_ONLINE" - CurrentMetricNameAgents_available CurrentMetricName = "AGENTS_AVAILABLE" - CurrentMetricNameAgents_on_call CurrentMetricName = "AGENTS_ON_CALL" - CurrentMetricNameAgents_non_productive CurrentMetricName = "AGENTS_NON_PRODUCTIVE" - CurrentMetricNameAgents_after_contact_work CurrentMetricName = "AGENTS_AFTER_CONTACT_WORK" - CurrentMetricNameAgents_error CurrentMetricName = "AGENTS_ERROR" - CurrentMetricNameAgents_staffed CurrentMetricName = "AGENTS_STAFFED" - CurrentMetricNameContacts_in_queue CurrentMetricName = "CONTACTS_IN_QUEUE" - CurrentMetricNameOldest_contact_age CurrentMetricName = "OLDEST_CONTACT_AGE" - CurrentMetricNameContacts_scheduled CurrentMetricName = "CONTACTS_SCHEDULED" - CurrentMetricNameAgents_on_contact CurrentMetricName = "AGENTS_ON_CONTACT" - CurrentMetricNameSlots_active CurrentMetricName = "SLOTS_ACTIVE" - CurrentMetricNameSlots_available CurrentMetricName = "SLOTS_AVAILABLE" + CurrentMetricNameAgentsOnline CurrentMetricName = "AGENTS_ONLINE" + CurrentMetricNameAgentsAvailable CurrentMetricName = "AGENTS_AVAILABLE" + CurrentMetricNameAgentsOnCall CurrentMetricName = "AGENTS_ON_CALL" + CurrentMetricNameAgentsNonProductive CurrentMetricName = "AGENTS_NON_PRODUCTIVE" + CurrentMetricNameAgentsAfterContactWork CurrentMetricName = "AGENTS_AFTER_CONTACT_WORK" + CurrentMetricNameAgentsError CurrentMetricName = "AGENTS_ERROR" + CurrentMetricNameAgentsStaffed CurrentMetricName = "AGENTS_STAFFED" + CurrentMetricNameContactsInQueue CurrentMetricName = "CONTACTS_IN_QUEUE" + CurrentMetricNameOldestContactAge CurrentMetricName = "OLDEST_CONTACT_AGE" + CurrentMetricNameContactsScheduled CurrentMetricName = "CONTACTS_SCHEDULED" + CurrentMetricNameAgentsOnContact CurrentMetricName = "AGENTS_ON_CONTACT" + CurrentMetricNameSlotsActive CurrentMetricName = "SLOTS_ACTIVE" + CurrentMetricNameSlotsAvailable CurrentMetricName = "SLOTS_AVAILABLE" ) // Values returns all known values for CurrentMetricName. Note that this can be @@ -130,31 +130,31 @@ type HistoricalMetricName string // Enum values for HistoricalMetricName const ( - HistoricalMetricNameContacts_queued HistoricalMetricName = "CONTACTS_QUEUED" - HistoricalMetricNameContacts_handled HistoricalMetricName = "CONTACTS_HANDLED" - HistoricalMetricNameContacts_abandoned HistoricalMetricName = "CONTACTS_ABANDONED" - HistoricalMetricNameContacts_consulted HistoricalMetricName = "CONTACTS_CONSULTED" - HistoricalMetricNameContacts_agent_hung_up_first HistoricalMetricName = "CONTACTS_AGENT_HUNG_UP_FIRST" - HistoricalMetricNameContacts_handled_incoming HistoricalMetricName = "CONTACTS_HANDLED_INCOMING" - HistoricalMetricNameContacts_handled_outbound HistoricalMetricName = "CONTACTS_HANDLED_OUTBOUND" - HistoricalMetricNameContacts_hold_abandons HistoricalMetricName = "CONTACTS_HOLD_ABANDONS" - HistoricalMetricNameContacts_transferred_in HistoricalMetricName = "CONTACTS_TRANSFERRED_IN" - HistoricalMetricNameContacts_transferred_out HistoricalMetricName = "CONTACTS_TRANSFERRED_OUT" - HistoricalMetricNameContacts_transferred_in_from_queue HistoricalMetricName = "CONTACTS_TRANSFERRED_IN_FROM_QUEUE" - HistoricalMetricNameContacts_transferred_out_from_queue HistoricalMetricName = "CONTACTS_TRANSFERRED_OUT_FROM_QUEUE" - HistoricalMetricNameContacts_missed HistoricalMetricName = "CONTACTS_MISSED" - HistoricalMetricNameCallback_contacts_handled HistoricalMetricName = "CALLBACK_CONTACTS_HANDLED" - HistoricalMetricNameApi_contacts_handled HistoricalMetricName = "API_CONTACTS_HANDLED" - HistoricalMetricNameOccupancy HistoricalMetricName = "OCCUPANCY" - HistoricalMetricNameHandle_time HistoricalMetricName = "HANDLE_TIME" - HistoricalMetricNameAfter_contact_work_time HistoricalMetricName = "AFTER_CONTACT_WORK_TIME" - HistoricalMetricNameQueued_time HistoricalMetricName = "QUEUED_TIME" - HistoricalMetricNameAbandon_time HistoricalMetricName = "ABANDON_TIME" - HistoricalMetricNameQueue_answer_time HistoricalMetricName = "QUEUE_ANSWER_TIME" - HistoricalMetricNameHold_time HistoricalMetricName = "HOLD_TIME" - HistoricalMetricNameInteraction_time HistoricalMetricName = "INTERACTION_TIME" - HistoricalMetricNameInteraction_and_hold_time HistoricalMetricName = "INTERACTION_AND_HOLD_TIME" - HistoricalMetricNameService_level HistoricalMetricName = "SERVICE_LEVEL" + HistoricalMetricNameContactsQueued HistoricalMetricName = "CONTACTS_QUEUED" + HistoricalMetricNameContactsHandled HistoricalMetricName = "CONTACTS_HANDLED" + HistoricalMetricNameContactsAbandoned HistoricalMetricName = "CONTACTS_ABANDONED" + HistoricalMetricNameContactsConsulted HistoricalMetricName = "CONTACTS_CONSULTED" + HistoricalMetricNameContactsAgentHungUpFirst HistoricalMetricName = "CONTACTS_AGENT_HUNG_UP_FIRST" + HistoricalMetricNameContactsHandledIncoming HistoricalMetricName = "CONTACTS_HANDLED_INCOMING" + HistoricalMetricNameContactsHandledOutbound HistoricalMetricName = "CONTACTS_HANDLED_OUTBOUND" + HistoricalMetricNameContactsHoldAbandons HistoricalMetricName = "CONTACTS_HOLD_ABANDONS" + HistoricalMetricNameContactsTransferredIn HistoricalMetricName = "CONTACTS_TRANSFERRED_IN" + HistoricalMetricNameContactsTransferredOut HistoricalMetricName = "CONTACTS_TRANSFERRED_OUT" + HistoricalMetricNameContactsTransferredInFromQueue HistoricalMetricName = "CONTACTS_TRANSFERRED_IN_FROM_QUEUE" + HistoricalMetricNameContactsTransferredOutFromQueue HistoricalMetricName = "CONTACTS_TRANSFERRED_OUT_FROM_QUEUE" + HistoricalMetricNameContactsMissed HistoricalMetricName = "CONTACTS_MISSED" + HistoricalMetricNameCallbackContactsHandled HistoricalMetricName = "CALLBACK_CONTACTS_HANDLED" + HistoricalMetricNameApiContactsHandled HistoricalMetricName = "API_CONTACTS_HANDLED" + HistoricalMetricNameOccupancy HistoricalMetricName = "OCCUPANCY" + HistoricalMetricNameHandleTime HistoricalMetricName = "HANDLE_TIME" + HistoricalMetricNameAfterContactWorkTime HistoricalMetricName = "AFTER_CONTACT_WORK_TIME" + HistoricalMetricNameQueuedTime HistoricalMetricName = "QUEUED_TIME" + HistoricalMetricNameAbandonTime HistoricalMetricName = "ABANDON_TIME" + HistoricalMetricNameQueueAnswerTime HistoricalMetricName = "QUEUE_ANSWER_TIME" + HistoricalMetricNameHoldTime HistoricalMetricName = "HOLD_TIME" + HistoricalMetricNameInteractionTime HistoricalMetricName = "INTERACTION_TIME" + HistoricalMetricNameInteractionAndHoldTime HistoricalMetricName = "INTERACTION_AND_HOLD_TIME" + HistoricalMetricNameServiceLevel HistoricalMetricName = "SERVICE_LEVEL" ) // Values returns all known values for HistoricalMetricName. Note that this can be @@ -682,8 +682,8 @@ type PhoneNumberType string // Enum values for PhoneNumberType const ( - PhoneNumberTypeToll_free PhoneNumberType = "TOLL_FREE" - PhoneNumberTypeDid PhoneNumberType = "DID" + PhoneNumberTypeTollFree PhoneNumberType = "TOLL_FREE" + PhoneNumberTypeDid PhoneNumberType = "DID" ) // Values returns all known values for PhoneNumberType. Note that this can be @@ -700,8 +700,8 @@ type PhoneType string // Enum values for PhoneType const ( - PhoneTypeSoft_phone PhoneType = "SOFT_PHONE" - PhoneTypeDesk_phone PhoneType = "DESK_PHONE" + PhoneTypeSoftPhone PhoneType = "SOFT_PHONE" + PhoneTypeDeskPhone PhoneType = "DESK_PHONE" ) // Values returns all known values for PhoneType. Note that this can be expanded in @@ -776,9 +776,9 @@ type VoiceRecordingTrack string // Enum values for VoiceRecordingTrack const ( - VoiceRecordingTrackFrom_agent VoiceRecordingTrack = "FROM_AGENT" - VoiceRecordingTrackTo_agent VoiceRecordingTrack = "TO_AGENT" - VoiceRecordingTrackAll VoiceRecordingTrack = "ALL" + VoiceRecordingTrackFromAgent VoiceRecordingTrack = "FROM_AGENT" + VoiceRecordingTrackToAgent VoiceRecordingTrack = "TO_AGENT" + VoiceRecordingTrackAll VoiceRecordingTrack = "ALL" ) // Values returns all known values for VoiceRecordingTrack. Note that this can be diff --git a/service/connectparticipant/api_op_SendEvent.go b/service/connectparticipant/api_op_SendEvent.go index b2e6832833b..abebdf1018a 100644 --- a/service/connectparticipant/api_op_SendEvent.go +++ b/service/connectparticipant/api_op_SendEvent.go @@ -37,10 +37,10 @@ type SendEventInput struct { // The content type of the request. Supported types are: // - // * + // * // application/vnd.amazonaws.connect.event.typing // - // * + // * // application/vnd.amazonaws.connect.event.connection.acknowledged // // This member is required. diff --git a/service/connectparticipant/types/enums.go b/service/connectparticipant/types/enums.go index e6edb0bf4ea..429e84e173c 100644 --- a/service/connectparticipant/types/enums.go +++ b/service/connectparticipant/types/enums.go @@ -6,9 +6,9 @@ type ChatItemType string // Enum values for ChatItemType const ( - ChatItemTypeMessage ChatItemType = "MESSAGE" - ChatItemTypeEvent ChatItemType = "EVENT" - ChatItemTypeConnection_ack ChatItemType = "CONNECTION_ACK" + ChatItemTypeMessage ChatItemType = "MESSAGE" + ChatItemTypeEvent ChatItemType = "EVENT" + ChatItemTypeConnectionAck ChatItemType = "CONNECTION_ACK" ) // Values returns all known values for ChatItemType. Note that this can be expanded @@ -26,8 +26,8 @@ type ConnectionType string // Enum values for ConnectionType const ( - ConnectionTypeWebsocket ConnectionType = "WEBSOCKET" - ConnectionTypeConnection_credentials ConnectionType = "CONNECTION_CREDENTIALS" + ConnectionTypeWebsocket ConnectionType = "WEBSOCKET" + ConnectionTypeConnectionCredentials ConnectionType = "CONNECTION_CREDENTIALS" ) // Values returns all known values for ConnectionType. Note that this can be diff --git a/service/costandusagereportservice/doc.go b/service/costandusagereportservice/doc.go index 6ce1961d3ff..f3d64d14cab 100644 --- a/service/costandusagereportservice/doc.go +++ b/service/costandusagereportservice/doc.go @@ -12,5 +12,5 @@ // Usage API. Service Endpoint The AWS Cost and Usage Report API provides the // following endpoint: // -// * cur.us-east-1.amazonaws.com +// * cur.us-east-1.amazonaws.com package costandusagereportservice diff --git a/service/costandusagereportservice/types/enums.go b/service/costandusagereportservice/types/enums.go index d49041bd311..08d9a97da65 100644 --- a/service/costandusagereportservice/types/enums.go +++ b/service/costandusagereportservice/types/enums.go @@ -26,29 +26,29 @@ type AWSRegion string // Enum values for AWSRegion const ( - AWSRegionCape_town AWSRegion = "af-south-1" - AWSRegionHong_kong AWSRegion = "ap-east-1" - AWSRegionMumbai AWSRegion = "ap-south-1" - AWSRegionSingapore AWSRegion = "ap-southeast-1" - AWSRegionSydney AWSRegion = "ap-southeast-2" - AWSRegionTokyo AWSRegion = "ap-northeast-1" - AWSRegionSeoul AWSRegion = "ap-northeast-2" - AWSRegionOsaka AWSRegion = "ap-northeast-3" - AWSRegionCanada_central AWSRegion = "ca-central-1" - AWSRegionFrankfurt AWSRegion = "eu-central-1" - AWSRegionIreland AWSRegion = "eu-west-1" - AWSRegionLondon AWSRegion = "eu-west-2" - AWSRegionParis AWSRegion = "eu-west-3" - AWSRegionStockholm AWSRegion = "eu-north-1" - AWSRegionMilano AWSRegion = "eu-south-1" - AWSRegionBahrain AWSRegion = "me-south-1" - AWSRegionSao_paulo AWSRegion = "sa-east-1" - AWSRegionUs_standard AWSRegion = "us-east-1" - AWSRegionOhio AWSRegion = "us-east-2" - AWSRegionNorthern_california AWSRegion = "us-west-1" - AWSRegionOregon AWSRegion = "us-west-2" - AWSRegionBeijing AWSRegion = "cn-north-1" - AWSRegionNingxia AWSRegion = "cn-northwest-1" + AWSRegionCapeTown AWSRegion = "af-south-1" + AWSRegionHongKong AWSRegion = "ap-east-1" + AWSRegionMumbai AWSRegion = "ap-south-1" + AWSRegionSingapore AWSRegion = "ap-southeast-1" + AWSRegionSydney AWSRegion = "ap-southeast-2" + AWSRegionTokyo AWSRegion = "ap-northeast-1" + AWSRegionSeoul AWSRegion = "ap-northeast-2" + AWSRegionOsaka AWSRegion = "ap-northeast-3" + AWSRegionCanadaCentral AWSRegion = "ca-central-1" + AWSRegionFrankfurt AWSRegion = "eu-central-1" + AWSRegionIreland AWSRegion = "eu-west-1" + AWSRegionLondon AWSRegion = "eu-west-2" + AWSRegionParis AWSRegion = "eu-west-3" + AWSRegionStockholm AWSRegion = "eu-north-1" + AWSRegionMilano AWSRegion = "eu-south-1" + AWSRegionBahrain AWSRegion = "me-south-1" + AWSRegionSaoPaulo AWSRegion = "sa-east-1" + AWSRegionUsStandard AWSRegion = "us-east-1" + AWSRegionOhio AWSRegion = "us-east-2" + AWSRegionNorthernCalifornia AWSRegion = "us-west-1" + AWSRegionOregon AWSRegion = "us-west-2" + AWSRegionBeijing AWSRegion = "cn-north-1" + AWSRegionNingxia AWSRegion = "cn-northwest-1" ) // Values returns all known values for AWSRegion. Note that this can be expanded in @@ -124,8 +124,8 @@ type ReportVersioning string // Enum values for ReportVersioning const ( - ReportVersioningCreate_new_report ReportVersioning = "CREATE_NEW_REPORT" - ReportVersioningOverwrite_report ReportVersioning = "OVERWRITE_REPORT" + ReportVersioningCreateNewReport ReportVersioning = "CREATE_NEW_REPORT" + ReportVersioningOverwriteReport ReportVersioning = "OVERWRITE_REPORT" ) // Values returns all known values for ReportVersioning. Note that this can be diff --git a/service/costexplorer/api_op_GetCostForecast.go b/service/costexplorer/api_op_GetCostForecast.go index 249c74c5cad..e57e029eddd 100644 --- a/service/costexplorer/api_op_GetCostForecast.go +++ b/service/costexplorer/api_op_GetCostForecast.go @@ -43,17 +43,16 @@ type GetCostForecastInput struct { // (http://aws.amazon.com/premiumsupport/knowledge-center/blended-rates-intro/). // Valid values for a GetCostForecast call are the following: // - // * - // AMORTIZED_COST + // * AMORTIZED_COST // - // * BLENDED_COST + // * + // BLENDED_COST // - // * NET_AMORTIZED_COST + // * NET_AMORTIZED_COST // - // * - // NET_UNBLENDED_COST + // * NET_UNBLENDED_COST // - // * UNBLENDED_COST + // * UNBLENDED_COST // // This member is required. Metric types.Metric diff --git a/service/costexplorer/api_op_GetDimensionValues.go b/service/costexplorer/api_op_GetDimensionValues.go index 00c0d8e100c..d11e858c428 100644 --- a/service/costexplorer/api_op_GetDimensionValues.go +++ b/service/costexplorer/api_op_GetDimensionValues.go @@ -52,113 +52,112 @@ type GetDimensionValuesInput struct { // you set the context to COST_AND_USAGE, you can use the following dimensions for // searching: // - // * AZ - The Availability Zone. An example is us-east-1a. + // * AZ - The Availability Zone. An example is us-east-1a. // - // * + // * // DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are // Aurora or MySQL. // - // * INSTANCE_TYPE - The type of Amazon EC2 instance. An - // example is m4.xlarge. + // * INSTANCE_TYPE - The type of Amazon EC2 instance. An example + // is m4.xlarge. // - // * LEGAL_ENTITY_NAME - The name of the organization - // that sells you AWS services, such as Amazon Web Services. + // * LEGAL_ENTITY_NAME - The name of the organization that sells you + // AWS services, such as Amazon Web Services. // - // * LINKED_ACCOUNT - // - The description in the attribute map that includes the full name of the member - // account. The value field contains the AWS ID of the member account. + // * LINKED_ACCOUNT - The description + // in the attribute map that includes the full name of the member account. The + // value field contains the AWS ID of the member account. // - // * - // OPERATING_SYSTEM - The operating system. Examples are Windows or Linux. - // - // * - // OPERATION - The action performed. Examples include RunInstance and - // CreateBucket. + // * OPERATING_SYSTEM - The + // operating system. Examples are Windows or Linux. // - // * PLATFORM - The Amazon EC2 operating system. Examples are - // Windows or Linux. + // * OPERATION - The action + // performed. Examples include RunInstance and CreateBucket. // - // * PURCHASE_TYPE - The reservation type of the purchase to - // which this usage is related. Examples include On-Demand Instances and Standard - // Reserved Instances. + // * PLATFORM - The + // Amazon EC2 operating system. Examples are Windows or Linux. // - // * SERVICE - The AWS service such as Amazon DynamoDB. + // * PURCHASE_TYPE - + // The reservation type of the purchase to which this usage is related. Examples + // include On-Demand Instances and Standard Reserved Instances. // + // * SERVICE - The + // AWS service such as Amazon DynamoDB. // - // * USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The - // response for the GetDimensionValues operation includes a unit attribute. - // Examples include GB and Hrs. + // * USAGE_TYPE - The type of usage. An + // example is DataTransfer-In-Bytes. The response for the GetDimensionValues + // operation includes a unit attribute. Examples include GB and Hrs. // - // * USAGE_TYPE_GROUP - The grouping of common - // usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for - // this operation includes a unit attribute. + // * + // USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: + // CloudWatch – Alarms. The response for this operation includes a unit + // attribute. // - // * REGION - The AWS Region. + // * REGION - The AWS Region. // - // * - // RECORD_TYPE - The different types of charges such as RI fees, usage costs, tax - // refunds, and credits. + // * RECORD_TYPE - The different types of + // charges such as RI fees, usage costs, tax refunds, and credits. // - // * RESOURCE_ID - The unique identifier of the - // resource. ResourceId is an opt-in feature only available for last 14 days for - // EC2-Compute Service. + // * RESOURCE_ID - + // The unique identifier of the resource. ResourceId is an opt-in feature only + // available for last 14 days for EC2-Compute Service. // - // If you set the context to RESERVATIONS, you can use the - // following dimensions for searching: + // If you set the context to + // RESERVATIONS, you can use the following dimensions for searching: // - // * AZ - The Availability Zone. An - // example is us-east-1a. + // * AZ - The + // Availability Zone. An example is us-east-1a. // - // * CACHE_ENGINE - The Amazon ElastiCache operating - // system. Examples are Windows or Linux. + // * CACHE_ENGINE - The Amazon + // ElastiCache operating system. Examples are Windows or Linux. // - // * DEPLOYMENT_OPTION - The scope of - // Amazon Relational Database Service deployments. Valid values are SingleAZ and - // MultiAZ. + // * + // DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. + // Valid values are SingleAZ and MultiAZ. // - // * INSTANCE_TYPE - The type of Amazon EC2 instance. An example is - // m4.xlarge. + // * INSTANCE_TYPE - The type of Amazon EC2 + // instance. An example is m4.xlarge. // - // * LINKED_ACCOUNT - The description in the attribute map that - // includes the full name of the member account. The value field contains the AWS - // ID of the member account. + // * LINKED_ACCOUNT - The description in the + // attribute map that includes the full name of the member account. The value field + // contains the AWS ID of the member account. // - // * PLATFORM - The Amazon EC2 operating system. - // Examples are Windows or Linux. + // * PLATFORM - The Amazon EC2 + // operating system. Examples are Windows or Linux. // - // * REGION - The AWS Region. + // * REGION - The AWS Region. // - // * SCOPE - // (Utilization only) - The scope of a Reserved Instance (RI). Values are regional - // or a single Availability Zone. + // * + // SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are + // regional or a single Availability Zone. // - // * TAG (Coverage only) - The tags that are - // associated with a Reserved Instance (RI). + // * TAG (Coverage only) - The tags that + // are associated with a Reserved Instance (RI). // - // * TENANCY - The tenancy of a + // * TENANCY - The tenancy of a // resource. Examples are shared or dedicated. // // If you set the context to // SAVINGS_PLANS, you can use the following dimensions for searching: // - // * + // * // SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) // - // * + // * // PAYMENT_OPTION - Payment option for the given Savings Plans (for example, All // Upfront) // - // * REGION - The AWS Region. + // * REGION - The AWS Region. // - // * INSTANCE_TYPE_FAMILY - The - // family of instances (For example, m5) + // * INSTANCE_TYPE_FAMILY - The family of + // instances (For example, m5) // - // * LINKED_ACCOUNT - The description in - // the attribute map that includes the full name of the member account. The value - // field contains the AWS ID of the member account. + // * LINKED_ACCOUNT - The description in the attribute + // map that includes the full name of the member account. The value field contains + // the AWS ID of the member account. // - // * SAVINGS_PLAN_ARN - The - // unique identifier for your Savings Plan + // * SAVINGS_PLAN_ARN - The unique identifier + // for your Savings Plan Context types.Context // The token to retrieve the next set of results. AWS provides the token when the @@ -175,111 +174,110 @@ type GetDimensionValuesOutput struct { // only for a specific context. If you set the context to COST_AND_USAGE, you can // use the following dimensions for searching: // - // * AZ - The Availability Zone. - // An example is us-east-1a. + // * AZ - The Availability Zone. An + // example is us-east-1a. // - // * DATABASE_ENGINE - The Amazon Relational - // Database Service database. Examples are Aurora or MySQL. + // * DATABASE_ENGINE - The Amazon Relational Database + // Service database. Examples are Aurora or MySQL. // - // * INSTANCE_TYPE - - // The type of Amazon EC2 instance. An example is m4.xlarge. + // * INSTANCE_TYPE - The type of + // Amazon EC2 instance. An example is m4.xlarge. // - // * - // LEGAL_ENTITY_NAME - The name of the organization that sells you AWS services, - // such as Amazon Web Services. + // * LEGAL_ENTITY_NAME - The name of + // the organization that sells you AWS services, such as Amazon Web Services. // - // * LINKED_ACCOUNT - The description in the - // attribute map that includes the full name of the member account. The value field - // contains the AWS ID of the member account. + // * + // LINKED_ACCOUNT - The description in the attribute map that includes the full + // name of the member account. The value field contains the AWS ID of the member + // account. // - // * OPERATING_SYSTEM - The - // operating system. Examples are Windows or Linux. + // * OPERATING_SYSTEM - The operating system. Examples are Windows or + // Linux. // - // * OPERATION - The action - // performed. Examples include RunInstance and CreateBucket. + // * OPERATION - The action performed. Examples include RunInstance and + // CreateBucket. // - // * PLATFORM - The - // Amazon EC2 operating system. Examples are Windows or Linux. + // * PLATFORM - The Amazon EC2 operating system. Examples are + // Windows or Linux. // - // * PURCHASE_TYPE - // - The reservation type of the purchase to which this usage is related. Examples - // include On-Demand Instances and Standard Reserved Instances. + // * PURCHASE_TYPE - The reservation type of the purchase to + // which this usage is related. Examples include On-Demand Instances and Standard + // Reserved Instances. // - // * SERVICE - - // The AWS service such as Amazon DynamoDB. + // * SERVICE - The AWS service such as Amazon DynamoDB. // - // * USAGE_TYPE - The type of usage. - // An example is DataTransfer-In-Bytes. The response for the GetDimensionValues - // operation includes a unit attribute. Examples include GB and Hrs. + // * + // USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The + // response for the GetDimensionValues operation includes a unit attribute. + // Examples include GB and Hrs. // - // * - // USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: - // CloudWatch – Alarms. The response for this operation includes a unit - // attribute. + // * USAGE_TYPE_GROUP - The grouping of common usage + // types. An example is Amazon EC2: CloudWatch – Alarms. The response for this + // operation includes a unit attribute. // - // * RECORD_TYPE - The different types of charges such as RI fees, - // usage costs, tax refunds, and credits. + // * RECORD_TYPE - The different types of + // charges such as RI fees, usage costs, tax refunds, and credits. // - // * RESOURCE_ID - The unique - // identifier of the resource. ResourceId is an opt-in feature only available for - // last 14 days for EC2-Compute Service. + // * RESOURCE_ID - + // The unique identifier of the resource. ResourceId is an opt-in feature only + // available for last 14 days for EC2-Compute Service. // - // If you set the context to RESERVATIONS, - // you can use the following dimensions for searching: + // If you set the context to + // RESERVATIONS, you can use the following dimensions for searching: // - // * AZ - The Availability - // Zone. An example is us-east-1a. + // * AZ - The + // Availability Zone. An example is us-east-1a. // - // * CACHE_ENGINE - The Amazon ElastiCache - // operating system. Examples are Windows or Linux. + // * CACHE_ENGINE - The Amazon + // ElastiCache operating system. Examples are Windows or Linux. // - // * DEPLOYMENT_OPTION - The - // scope of Amazon Relational Database Service deployments. Valid values are - // SingleAZ and MultiAZ. + // * + // DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. + // Valid values are SingleAZ and MultiAZ. // - // * INSTANCE_TYPE - The type of Amazon EC2 instance. An - // example is m4.xlarge. + // * INSTANCE_TYPE - The type of Amazon EC2 + // instance. An example is m4.xlarge. // - // * LINKED_ACCOUNT - The description in the attribute - // map that includes the full name of the member account. The value field contains - // the AWS ID of the member account. + // * LINKED_ACCOUNT - The description in the + // attribute map that includes the full name of the member account. The value field + // contains the AWS ID of the member account. // - // * PLATFORM - The Amazon EC2 operating - // system. Examples are Windows or Linux. + // * PLATFORM - The Amazon EC2 + // operating system. Examples are Windows or Linux. // - // * REGION - The AWS Region. + // * REGION - The AWS Region. // - // * + // * // SCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are // regional or a single Availability Zone. // - // * TAG (Coverage only) - The tags - // that are associated with a Reserved Instance (RI). + // * TAG (Coverage only) - The tags that + // are associated with a Reserved Instance (RI). // - // * TENANCY - The tenancy - // of a resource. Examples are shared or dedicated. + // * TENANCY - The tenancy of a + // resource. Examples are shared or dedicated. // // If you set the context to // SAVINGS_PLANS, you can use the following dimensions for searching: // - // * + // * // SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute) // - // * + // * // PAYMENT_OPTION - Payment option for the given Savings Plans (for example, All // Upfront) // - // * REGION - The AWS Region. + // * REGION - The AWS Region. // - // * INSTANCE_TYPE_FAMILY - The - // family of instances (For example, m5) + // * INSTANCE_TYPE_FAMILY - The family of + // instances (For example, m5) // - // * LINKED_ACCOUNT - The description in - // the attribute map that includes the full name of the member account. The value - // field contains the AWS ID of the member account. + // * LINKED_ACCOUNT - The description in the attribute + // map that includes the full name of the member account. The value field contains + // the AWS ID of the member account. // - // * SAVINGS_PLAN_ARN - The - // unique identifier for your Savings Plan + // * SAVINGS_PLAN_ARN - The unique identifier + // for your Savings Plan // // This member is required. DimensionValues []*types.DimensionValuesWithAttributes diff --git a/service/costexplorer/api_op_GetReservationCoverage.go b/service/costexplorer/api_op_GetReservationCoverage.go index f77a765b0a2..4ac779d6e73 100644 --- a/service/costexplorer/api_op_GetReservationCoverage.go +++ b/service/costexplorer/api_op_GetReservationCoverage.go @@ -19,35 +19,34 @@ import ( // any time period, you can filter data about reservation usage by the following // dimensions: // -// * AZ +// * AZ // -// * CACHE_ENGINE +// * CACHE_ENGINE // -// * DATABASE_ENGINE +// * DATABASE_ENGINE // -// * -// DEPLOYMENT_OPTION +// * DEPLOYMENT_OPTION // -// * INSTANCE_TYPE +// * +// INSTANCE_TYPE // -// * LINKED_ACCOUNT +// * LINKED_ACCOUNT // -// * -// OPERATING_SYSTEM +// * OPERATING_SYSTEM // -// * PLATFORM +// * PLATFORM // -// * REGION +// * REGION // -// * SERVICE +// * +// SERVICE // -// * TAG +// * TAG // -// * -// TENANCY +// * TENANCY // -// To determine valid values for a dimension, use the GetDimensionValues -// operation. +// To determine valid values for a dimension, use the +// GetDimensionValues operation. func (c *Client) GetReservationCoverage(ctx context.Context, params *GetReservationCoverageInput, optFns ...func(*Options)) (*GetReservationCoverageOutput, error) { if params == nil { params = &GetReservationCoverageInput{} @@ -80,32 +79,31 @@ type GetReservationCoverageInput struct { // Filters utilization data by dimensions. You can filter by the following // dimensions: // - // * AZ + // * AZ // - // * CACHE_ENGINE + // * CACHE_ENGINE // - // * DATABASE_ENGINE + // * DATABASE_ENGINE // - // * - // DEPLOYMENT_OPTION + // * DEPLOYMENT_OPTION // - // * INSTANCE_TYPE + // * + // INSTANCE_TYPE // - // * LINKED_ACCOUNT + // * LINKED_ACCOUNT // - // * - // OPERATING_SYSTEM + // * OPERATING_SYSTEM // - // * PLATFORM + // * PLATFORM // - // * REGION + // * REGION // - // * SERVICE + // * + // SERVICE // - // * TAG + // * TAG // - // * - // TENANCY + // * TENANCY // // GetReservationCoverage uses the same Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) @@ -124,28 +122,27 @@ type GetReservationCoverageInput struct { // You can group the data by the following attributes: // - // * AZ + // * AZ // - // * - // CACHE_ENGINE + // * CACHE_ENGINE // - // * DATABASE_ENGINE + // * + // DATABASE_ENGINE // - // * DEPLOYMENT_OPTION + // * DEPLOYMENT_OPTION // - // * - // INSTANCE_TYPE - // - // * LINKED_ACCOUNT + // * INSTANCE_TYPE // - // * OPERATING_SYSTEM + // * LINKED_ACCOUNT // - // * PLATFORM + // * + // OPERATING_SYSTEM // + // * PLATFORM // // * REGION // - // * TENANCY + // * TENANCY GroupBy []*types.GroupDefinition // The measurement that you want your reservation coverage reported in. Valid diff --git a/service/costexplorer/api_op_GetReservationUtilization.go b/service/costexplorer/api_op_GetReservationUtilization.go index 4119958349b..4952e34b7b3 100644 --- a/service/costexplorer/api_op_GetReservationUtilization.go +++ b/service/costexplorer/api_op_GetReservationUtilization.go @@ -43,32 +43,31 @@ type GetReservationUtilizationInput struct { // Filters utilization data by dimensions. You can filter by the following // dimensions: // - // * AZ + // * AZ // - // * CACHE_ENGINE + // * CACHE_ENGINE // - // * DEPLOYMENT_OPTION + // * DEPLOYMENT_OPTION // - // * - // INSTANCE_TYPE + // * INSTANCE_TYPE // - // * LINKED_ACCOUNT + // * + // LINKED_ACCOUNT // - // * OPERATING_SYSTEM - // - // * PLATFORM + // * OPERATING_SYSTEM // + // * PLATFORM // // * REGION // - // * SERVICE + // * SERVICE // - // * SCOPE + // * SCOPE // - // * TENANCY + // * + // TENANCY // - // GetReservationUtilization - // uses the same Expression + // GetReservationUtilization uses the same Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) // object as the other operations, but only AND is supported among each dimension, // and nesting is supported up to only one level deep. If there are multiple values diff --git a/service/costexplorer/api_op_GetRightsizingRecommendation.go b/service/costexplorer/api_op_GetRightsizingRecommendation.go index ebf5b878f5a..5fbc3c2ffbd 100644 --- a/service/costexplorer/api_op_GetRightsizingRecommendation.go +++ b/service/costexplorer/api_op_GetRightsizingRecommendation.go @@ -50,35 +50,34 @@ type GetRightsizingRecommendationInput struct { // Use Expression to filter by cost or by usage. There are two patterns: // - // * - // Simple dimension values - You can set the dimension name and values for the - // filters that you plan to use. For example, you can filter for REGION==us-east-1 - // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full - // name (for example, REGION==US East (N. Virginia). The Expression example looks - // like: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] - // } } The list of dimension values are OR'd together to retrieve cost or usage - // data. You can create Expression and DimensionValues objects using either with* - // methods or set* methods in multiple lines. + // * Simple + // dimension values - You can set the dimension name and values for the filters + // that you plan to use. For example, you can filter for REGION==us-east-1 OR + // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name + // (for example, REGION==US East (N. Virginia). The Expression example looks like: + // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } + // The list of dimension values are OR'd together to retrieve cost or usage data. + // You can create Expression and DimensionValues objects using either with* methods + // or set* methods in multiple lines. // - // * Compound dimension values with - // logical operations - You can use multiple Expression types and the logical - // operators AND/OR/NOT to create a list of one or more Expression objects. This - // allows you to filter on more advanced options. For example, you can filter on - // ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND - // (USAGE_TYPE != DataTransfer). The Expression for that looks like this: { "And": - // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", - // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, - // {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } - // Because each Expression can have only one operator, the service returns an error - // if more than one is specified. The following example shows an Expression object - // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // * Compound dimension values with logical + // operations - You can use multiple Expression types and the logical operators + // AND/OR/NOT to create a list of one or more Expression objects. This allows you + // to filter on more advanced options. For example, you can filter on ((REGION == + // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != + // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ + // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, + // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": + // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each + // Expression can have only one operator, the service returns an error if more than + // one is specified. The following example shows an Expression object that creates + // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", + // "Values": [ "DataTransfer" ] } } // - // For GetRightsizingRecommendation - // action, a combination of OR and NOT is not supported. OR is not supported - // between different dimensions, or dimensions and tags. NOT operators aren't - // supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or - // RIGHTSIZING_TYPE. + // For GetRightsizingRecommendation action, a + // combination of OR and NOT is not supported. OR is not supported between + // different dimensions, or dimensions and tags. NOT operators aren't supported. + // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. Filter *types.Expression // The pagination token that indicates the next set of results that you want to diff --git a/service/costexplorer/api_op_GetSavingsPlansCoverage.go b/service/costexplorer/api_op_GetSavingsPlansCoverage.go index c4d10127542..4d69521cf8d 100644 --- a/service/costexplorer/api_op_GetSavingsPlansCoverage.go +++ b/service/costexplorer/api_op_GetSavingsPlansCoverage.go @@ -17,17 +17,17 @@ import ( // dimensions, Cost Categories, and nested expressions. For any time period, you // can filter data for Savings Plans usage with the following dimensions: // -// * +// * // LINKED_ACCOUNT // -// * REGION +// * REGION // -// * SERVICE +// * SERVICE // -// * INSTANCE_FAMILY +// * INSTANCE_FAMILY // -// To determine -// valid values for a dimension, use the GetDimensionValues operation. +// To determine valid +// values for a dimension, use the GetDimensionValues operation. func (c *Client) GetSavingsPlansCoverage(ctx context.Context, params *GetSavingsPlansCoverageInput, optFns ...func(*Options)) (*GetSavingsPlansCoverageOutput, error) { if params == nil { params = &GetSavingsPlansCoverageInput{} @@ -55,17 +55,17 @@ type GetSavingsPlansCoverageInput struct { // Filters Savings Plans coverage data by dimensions. You can filter data for // Savings Plans usage with the following dimensions: // - // * LINKED_ACCOUNT + // * LINKED_ACCOUNT // - // * + // * // REGION // - // * SERVICE + // * SERVICE // - // * INSTANCE_FAMILY + // * INSTANCE_FAMILY // - // GetSavingsPlansCoverage uses the - // same Expression + // GetSavingsPlansCoverage uses the same + // Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) // object as the other operations, but only AND is supported among each dimension. // If there are multiple values for a dimension, they are OR'd together. Cost diff --git a/service/costexplorer/api_op_GetSavingsPlansUtilization.go b/service/costexplorer/api_op_GetSavingsPlansUtilization.go index 2dc954f07ee..418d148f022 100644 --- a/service/costexplorer/api_op_GetSavingsPlansUtilization.go +++ b/service/costexplorer/api_op_GetSavingsPlansUtilization.go @@ -43,22 +43,22 @@ type GetSavingsPlansUtilizationInput struct { // Filters Savings Plans utilization coverage data for active Savings Plans // dimensions. You can filter data with the following dimensions: // - // * + // * // LINKED_ACCOUNT // - // * SAVINGS_PLAN_ARN + // * SAVINGS_PLAN_ARN // - // * SAVINGS_PLANS_TYPE + // * SAVINGS_PLANS_TYPE // - // * REGION + // * REGION // + // * + // PAYMENT_OPTION // - // * PAYMENT_OPTION + // * INSTANCE_TYPE_FAMILY // - // * INSTANCE_TYPE_FAMILY - // - // GetSavingsPlansUtilization uses - // the same Expression + // GetSavingsPlansUtilization uses the same + // Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) // object as the other operations, but only AND is supported among each dimension. Filter *types.Expression diff --git a/service/costexplorer/api_op_GetSavingsPlansUtilizationDetails.go b/service/costexplorer/api_op_GetSavingsPlansUtilizationDetails.go index 19d933b8fa6..02d03186781 100644 --- a/service/costexplorer/api_op_GetSavingsPlansUtilizationDetails.go +++ b/service/costexplorer/api_op_GetSavingsPlansUtilizationDetails.go @@ -45,20 +45,19 @@ type GetSavingsPlansUtilizationDetailsInput struct { // Filters Savings Plans utilization coverage data for active Savings Plans // dimensions. You can filter data with the following dimensions: // - // * + // * // LINKED_ACCOUNT // - // * SAVINGS_PLAN_ARN + // * SAVINGS_PLAN_ARN // - // * REGION + // * REGION // - // * PAYMENT_OPTION + // * PAYMENT_OPTION // + // * + // INSTANCE_TYPE_FAMILY // - // * INSTANCE_TYPE_FAMILY - // - // GetSavingsPlansUtilizationDetails uses the same - // Expression + // GetSavingsPlansUtilizationDetails uses the same Expression // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html) // object as the other operations, but only AND is supported among each dimension. Filter *types.Expression diff --git a/service/costexplorer/api_op_GetUsageForecast.go b/service/costexplorer/api_op_GetUsageForecast.go index 0f9fd0eaa1f..3d9aebf1a51 100644 --- a/service/costexplorer/api_op_GetUsageForecast.go +++ b/service/costexplorer/api_op_GetUsageForecast.go @@ -40,9 +40,9 @@ type GetUsageForecastInput struct { // Which metric Cost Explorer uses to create your forecast. Valid values for a // GetUsageForecast call are the following: // - // * USAGE_QUANTITY + // * USAGE_QUANTITY // - // * + // * // NORMALIZED_USAGE_AMOUNT // // This member is required. diff --git a/service/costexplorer/doc.go b/service/costexplorer/doc.go index 0f4fcdc14c4..1eccef2eac2 100644 --- a/service/costexplorer/doc.go +++ b/service/costexplorer/doc.go @@ -10,7 +10,7 @@ // environment. Service Endpoint The Cost Explorer API provides the following // endpoint: // -// * https://ce.us-east-1.amazonaws.com +// * https://ce.us-east-1.amazonaws.com // // For information about costs // associated with the Cost Explorer API, see AWS Cost Management Pricing diff --git a/service/costexplorer/types/enums.go b/service/costexplorer/types/enums.go index e1136017bf1..32dcbb4b8dd 100644 --- a/service/costexplorer/types/enums.go +++ b/service/costexplorer/types/enums.go @@ -24,9 +24,9 @@ type AnomalyFeedbackType string // Enum values for AnomalyFeedbackType const ( - AnomalyFeedbackTypeYes AnomalyFeedbackType = "YES" - AnomalyFeedbackTypeNo AnomalyFeedbackType = "NO" - AnomalyFeedbackTypePlanned_activity AnomalyFeedbackType = "PLANNED_ACTIVITY" + AnomalyFeedbackTypeYes AnomalyFeedbackType = "YES" + AnomalyFeedbackTypeNo AnomalyFeedbackType = "NO" + AnomalyFeedbackTypePlannedActivity AnomalyFeedbackType = "PLANNED_ACTIVITY" ) // Values returns all known values for AnomalyFeedbackType. Note that this can be @@ -64,9 +64,9 @@ type Context string // Enum values for Context const ( - ContextCost_and_usage Context = "COST_AND_USAGE" - ContextReservations Context = "RESERVATIONS" - ContextSavings_plans Context = "SAVINGS_PLANS" + ContextCostAndUsage Context = "COST_AND_USAGE" + ContextReservations Context = "RESERVATIONS" + ContextSavingsPlans Context = "SAVINGS_PLANS" ) // Values returns all known values for Context. Note that this can be expanded in @@ -118,7 +118,7 @@ type CostCategoryStatusComponent string // Enum values for CostCategoryStatusComponent const ( - CostCategoryStatusComponentCost_explorer CostCategoryStatusComponent = "COST_EXPLORER" + CostCategoryStatusComponentCostExplorer CostCategoryStatusComponent = "COST_EXPLORER" ) // Values returns all known values for CostCategoryStatusComponent. Note that this @@ -134,35 +134,35 @@ type Dimension string // Enum values for Dimension const ( - DimensionAz Dimension = "AZ" - DimensionInstance_type Dimension = "INSTANCE_TYPE" - DimensionLinked_account Dimension = "LINKED_ACCOUNT" - DimensionLinked_account_name Dimension = "LINKED_ACCOUNT_NAME" - DimensionOperation Dimension = "OPERATION" - DimensionPurchase_type Dimension = "PURCHASE_TYPE" - DimensionRegion Dimension = "REGION" - DimensionService Dimension = "SERVICE" - DimensionService_code Dimension = "SERVICE_CODE" - DimensionUsage_type Dimension = "USAGE_TYPE" - DimensionUsage_type_group Dimension = "USAGE_TYPE_GROUP" - DimensionRecord_type Dimension = "RECORD_TYPE" - DimensionOperating_system Dimension = "OPERATING_SYSTEM" - DimensionTenancy Dimension = "TENANCY" - DimensionScope Dimension = "SCOPE" - DimensionPlatform Dimension = "PLATFORM" - DimensionSubscription_id Dimension = "SUBSCRIPTION_ID" - DimensionLegal_entity_name Dimension = "LEGAL_ENTITY_NAME" - DimensionDeployment_option Dimension = "DEPLOYMENT_OPTION" - DimensionDatabase_engine Dimension = "DATABASE_ENGINE" - DimensionCache_engine Dimension = "CACHE_ENGINE" - DimensionInstance_type_family Dimension = "INSTANCE_TYPE_FAMILY" - DimensionBilling_entity Dimension = "BILLING_ENTITY" - DimensionReservation_id Dimension = "RESERVATION_ID" - DimensionResource_id Dimension = "RESOURCE_ID" - DimensionRightsizing_type Dimension = "RIGHTSIZING_TYPE" - DimensionSavings_plans_type Dimension = "SAVINGS_PLANS_TYPE" - DimensionSavings_plan_arn Dimension = "SAVINGS_PLAN_ARN" - DimensionPayment_option Dimension = "PAYMENT_OPTION" + DimensionAz Dimension = "AZ" + DimensionInstanceType Dimension = "INSTANCE_TYPE" + DimensionLinkedAccount Dimension = "LINKED_ACCOUNT" + DimensionLinkedAccountName Dimension = "LINKED_ACCOUNT_NAME" + DimensionOperation Dimension = "OPERATION" + DimensionPurchaseType Dimension = "PURCHASE_TYPE" + DimensionRegion Dimension = "REGION" + DimensionService Dimension = "SERVICE" + DimensionServiceCode Dimension = "SERVICE_CODE" + DimensionUsageType Dimension = "USAGE_TYPE" + DimensionUsageTypeGroup Dimension = "USAGE_TYPE_GROUP" + DimensionRecordType Dimension = "RECORD_TYPE" + DimensionOperatingSystem Dimension = "OPERATING_SYSTEM" + DimensionTenancy Dimension = "TENANCY" + DimensionScope Dimension = "SCOPE" + DimensionPlatform Dimension = "PLATFORM" + DimensionSubscriptionId Dimension = "SUBSCRIPTION_ID" + DimensionLegalEntityName Dimension = "LEGAL_ENTITY_NAME" + DimensionDeploymentOption Dimension = "DEPLOYMENT_OPTION" + DimensionDatabaseEngine Dimension = "DATABASE_ENGINE" + DimensionCacheEngine Dimension = "CACHE_ENGINE" + DimensionInstanceTypeFamily Dimension = "INSTANCE_TYPE_FAMILY" + DimensionBillingEntity Dimension = "BILLING_ENTITY" + DimensionReservationId Dimension = "RESERVATION_ID" + DimensionResourceId Dimension = "RESOURCE_ID" + DimensionRightsizingType Dimension = "RIGHTSIZING_TYPE" + DimensionSavingsPlansType Dimension = "SAVINGS_PLANS_TYPE" + DimensionSavingsPlanArn Dimension = "SAVINGS_PLAN_ARN" + DimensionPaymentOption Dimension = "PAYMENT_OPTION" ) // Values returns all known values for Dimension. Note that this can be expanded in @@ -226,9 +226,9 @@ type GroupDefinitionType string // Enum values for GroupDefinitionType const ( - GroupDefinitionTypeDimension GroupDefinitionType = "DIMENSION" - GroupDefinitionTypeTag GroupDefinitionType = "TAG" - GroupDefinitionTypeCost_category GroupDefinitionType = "COST_CATEGORY" + GroupDefinitionTypeDimension GroupDefinitionType = "DIMENSION" + GroupDefinitionTypeTag GroupDefinitionType = "TAG" + GroupDefinitionTypeCostCategory GroupDefinitionType = "COST_CATEGORY" ) // Values returns all known values for GroupDefinitionType. Note that this can be @@ -246,9 +246,9 @@ type LookbackPeriodInDays string // Enum values for LookbackPeriodInDays const ( - LookbackPeriodInDaysSeven_days LookbackPeriodInDays = "SEVEN_DAYS" - LookbackPeriodInDaysThirty_days LookbackPeriodInDays = "THIRTY_DAYS" - LookbackPeriodInDaysSixty_days LookbackPeriodInDays = "SIXTY_DAYS" + LookbackPeriodInDaysSevenDays LookbackPeriodInDays = "SEVEN_DAYS" + LookbackPeriodInDaysThirtyDays LookbackPeriodInDays = "THIRTY_DAYS" + LookbackPeriodInDaysSixtyDays LookbackPeriodInDays = "SIXTY_DAYS" ) // Values returns all known values for LookbackPeriodInDays. Note that this can be @@ -266,12 +266,12 @@ type MatchOption string // Enum values for MatchOption const ( - MatchOptionEquals MatchOption = "EQUALS" - MatchOptionStarts_with MatchOption = "STARTS_WITH" - MatchOptionEnds_with MatchOption = "ENDS_WITH" - MatchOptionContains MatchOption = "CONTAINS" - MatchOptionCase_sensitive MatchOption = "CASE_SENSITIVE" - MatchOptionCase_insensitive MatchOption = "CASE_INSENSITIVE" + MatchOptionEquals MatchOption = "EQUALS" + MatchOptionStartsWith MatchOption = "STARTS_WITH" + MatchOptionEndsWith MatchOption = "ENDS_WITH" + MatchOptionContains MatchOption = "CONTAINS" + MatchOptionCaseSensitive MatchOption = "CASE_SENSITIVE" + MatchOptionCaseInsensitive MatchOption = "CASE_INSENSITIVE" ) // Values returns all known values for MatchOption. Note that this can be expanded @@ -292,13 +292,13 @@ type Metric string // Enum values for Metric const ( - MetricBlended_cost Metric = "BLENDED_COST" - MetricUnblended_cost Metric = "UNBLENDED_COST" - MetricAmortized_cost Metric = "AMORTIZED_COST" - MetricNet_unblended_cost Metric = "NET_UNBLENDED_COST" - MetricNet_amortized_cost Metric = "NET_AMORTIZED_COST" - MetricUsage_quantity Metric = "USAGE_QUANTITY" - MetricNormalized_usage_amount Metric = "NORMALIZED_USAGE_AMOUNT" + MetricBlendedCost Metric = "BLENDED_COST" + MetricUnblendedCost Metric = "UNBLENDED_COST" + MetricAmortizedCost Metric = "AMORTIZED_COST" + MetricNetUnblendedCost Metric = "NET_UNBLENDED_COST" + MetricNetAmortizedCost Metric = "NET_AMORTIZED_COST" + MetricUsageQuantity Metric = "USAGE_QUANTITY" + MetricNormalizedUsageAmount Metric = "NORMALIZED_USAGE_AMOUNT" ) // Values returns all known values for Metric. Note that this can be expanded in @@ -354,12 +354,12 @@ type NumericOperator string // Enum values for NumericOperator const ( - NumericOperatorEqual NumericOperator = "EQUAL" - NumericOperatorGreater_than_or_equal NumericOperator = "GREATER_THAN_OR_EQUAL" - NumericOperatorLess_than_or_equal NumericOperator = "LESS_THAN_OR_EQUAL" - NumericOperatorGreater_than NumericOperator = "GREATER_THAN" - NumericOperatorLess_than NumericOperator = "LESS_THAN" - NumericOperatorBetween NumericOperator = "BETWEEN" + NumericOperatorEqual NumericOperator = "EQUAL" + NumericOperatorGreaterThanOrEqual NumericOperator = "GREATER_THAN_OR_EQUAL" + NumericOperatorLessThanOrEqual NumericOperator = "LESS_THAN_OR_EQUAL" + NumericOperatorGreaterThan NumericOperator = "GREATER_THAN" + NumericOperatorLessThan NumericOperator = "LESS_THAN" + NumericOperatorBetween NumericOperator = "BETWEEN" ) // Values returns all known values for NumericOperator. Note that this can be @@ -398,12 +398,12 @@ type PaymentOption string // Enum values for PaymentOption const ( - PaymentOptionNo_upfront PaymentOption = "NO_UPFRONT" - PaymentOptionPartial_upfront PaymentOption = "PARTIAL_UPFRONT" - PaymentOptionAll_upfront PaymentOption = "ALL_UPFRONT" - PaymentOptionLight_utilization PaymentOption = "LIGHT_UTILIZATION" - PaymentOptionMedium_utilization PaymentOption = "MEDIUM_UTILIZATION" - PaymentOptionHeavy_utilization PaymentOption = "HEAVY_UTILIZATION" + PaymentOptionNoUpfront PaymentOption = "NO_UPFRONT" + PaymentOptionPartialUpfront PaymentOption = "PARTIAL_UPFRONT" + PaymentOptionAllUpfront PaymentOption = "ALL_UPFRONT" + PaymentOptionLightUtilization PaymentOption = "LIGHT_UTILIZATION" + PaymentOptionMediumUtilization PaymentOption = "MEDIUM_UTILIZATION" + PaymentOptionHeavyUtilization PaymentOption = "HEAVY_UTILIZATION" ) // Values returns all known values for PaymentOption. Note that this can be @@ -424,8 +424,8 @@ type RecommendationTarget string // Enum values for RecommendationTarget const ( - RecommendationTargetSame_instance_family RecommendationTarget = "SAME_INSTANCE_FAMILY" - RecommendationTargetCross_instance_family RecommendationTarget = "CROSS_INSTANCE_FAMILY" + RecommendationTargetSameInstanceFamily RecommendationTarget = "SAME_INSTANCE_FAMILY" + RecommendationTargetCrossInstanceFamily RecommendationTarget = "CROSS_INSTANCE_FAMILY" ) // Values returns all known values for RecommendationTarget. Note that this can be @@ -496,8 +496,8 @@ type SupportedSavingsPlansType string // Enum values for SupportedSavingsPlansType const ( - SupportedSavingsPlansTypeCompute_sp SupportedSavingsPlansType = "COMPUTE_SP" - SupportedSavingsPlansTypeEc2_instance_sp SupportedSavingsPlansType = "EC2_INSTANCE_SP" + SupportedSavingsPlansTypeComputeSp SupportedSavingsPlansType = "COMPUTE_SP" + SupportedSavingsPlansTypeEc2InstanceSp SupportedSavingsPlansType = "EC2_INSTANCE_SP" ) // Values returns all known values for SupportedSavingsPlansType. Note that this @@ -514,8 +514,8 @@ type TermInYears string // Enum values for TermInYears const ( - TermInYearsOne_year TermInYears = "ONE_YEAR" - TermInYearsThree_years TermInYears = "THREE_YEARS" + TermInYearsOneYear TermInYears = "ONE_YEAR" + TermInYearsThreeYears TermInYears = "THREE_YEARS" ) // Values returns all known values for TermInYears. Note that this can be expanded diff --git a/service/costexplorer/types/types.go b/service/costexplorer/types/types.go index a5fc43841a7..6555f92f649 100644 --- a/service/costexplorer/types/types.go +++ b/service/costexplorer/types/types.go @@ -89,35 +89,34 @@ type AnomalyMonitor struct { // Use Expression to filter by cost or by usage. There are two patterns: // - // * - // Simple dimension values - You can set the dimension name and values for the - // filters that you plan to use. For example, you can filter for REGION==us-east-1 - // OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full - // name (for example, REGION==US East (N. Virginia). The Expression example looks - // like: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] - // } } The list of dimension values are OR'd together to retrieve cost or usage - // data. You can create Expression and DimensionValues objects using either with* - // methods or set* methods in multiple lines. + // * Simple + // dimension values - You can set the dimension name and values for the filters + // that you plan to use. For example, you can filter for REGION==us-east-1 OR + // REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name + // (for example, REGION==US East (N. Virginia). The Expression example looks like: + // { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } + // The list of dimension values are OR'd together to retrieve cost or usage data. + // You can create Expression and DimensionValues objects using either with* methods + // or set* methods in multiple lines. // - // * Compound dimension values with - // logical operations - You can use multiple Expression types and the logical - // operators AND/OR/NOT to create a list of one or more Expression objects. This - // allows you to filter on more advanced options. For example, you can filter on - // ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND - // (USAGE_TYPE != DataTransfer). The Expression for that looks like this: { "And": - // [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", - // "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, - // {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } - // Because each Expression can have only one operator, the service returns an error - // if more than one is specified. The following example shows an Expression object - // that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": - // "USAGE_TYPE", "Values": [ "DataTransfer" ] } } + // * Compound dimension values with logical + // operations - You can use multiple Expression types and the logical operators + // AND/OR/NOT to create a list of one or more Expression objects. This allows you + // to filter on more advanced options. For example, you can filter on ((REGION == + // us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != + // DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ + // {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, + // {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": + // { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each + // Expression can have only one operator, the service returns an error if more than + // one is specified. The following example shows an Expression object that creates + // an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", + // "Values": [ "DataTransfer" ] } } // - // For GetRightsizingRecommendation - // action, a combination of OR and NOT is not supported. OR is not supported - // between different dimensions, or dimensions and tags. NOT operators aren't - // supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or - // RIGHTSIZING_TYPE. + // For GetRightsizingRecommendation action, a + // combination of OR and NOT is not supported. OR is not supported between + // different dimensions, or dimensions and tags. NOT operators aren't supported. + // Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. MonitorSpecification *Expression } @@ -601,35 +600,34 @@ type ESInstanceDetails struct { // Use Expression to filter by cost or by usage. There are two patterns: // -// * -// Simple dimension values - You can set the dimension name and values for the -// filters that you plan to use. For example, you can filter for REGION==us-east-1 -// OR REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full -// name (for example, REGION==US East (N. Virginia). The Expression example looks -// like: { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] -// } } The list of dimension values are OR'd together to retrieve cost or usage -// data. You can create Expression and DimensionValues objects using either with* -// methods or set* methods in multiple lines. +// * Simple +// dimension values - You can set the dimension name and values for the filters +// that you plan to use. For example, you can filter for REGION==us-east-1 OR +// REGION==us-west-1. For GetRightsizingRecommendation, the Region is a full name +// (for example, REGION==US East (N. Virginia). The Expression example looks like: +// { "Dimensions": { "Key": "REGION", "Values": [ "us-east-1", “us-west-1” ] } } +// The list of dimension values are OR'd together to retrieve cost or usage data. +// You can create Expression and DimensionValues objects using either with* methods +// or set* methods in multiple lines. // -// * Compound dimension values with -// logical operations - You can use multiple Expression types and the logical -// operators AND/OR/NOT to create a list of one or more Expression objects. This -// allows you to filter on more advanced options. For example, you can filter on -// ((REGION == us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND -// (USAGE_TYPE != DataTransfer). The Expression for that looks like this: { "And": -// [ {"Or": [ {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", -// "us-west-1" ] }}, {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, -// {"Not": {"Dimensions": { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } -// Because each Expression can have only one operator, the service returns an error -// if more than one is specified. The following example shows an Expression object -// that creates an error. { "And": [ ... ], "DimensionValues": { "Dimension": -// "USAGE_TYPE", "Values": [ "DataTransfer" ] } } +// * Compound dimension values with logical +// operations - You can use multiple Expression types and the logical operators +// AND/OR/NOT to create a list of one or more Expression objects. This allows you +// to filter on more advanced options. For example, you can filter on ((REGION == +// us-east-1 OR REGION == us-west-1) OR (TAG.Type == Type1)) AND (USAGE_TYPE != +// DataTransfer). The Expression for that looks like this: { "And": [ {"Or": [ +// {"Dimensions": { "Key": "REGION", "Values": [ "us-east-1", "us-west-1" ] }}, +// {"Tags": { "Key": "TagName", "Values": ["Value1"] } } ]}, {"Not": {"Dimensions": +// { "Key": "USAGE_TYPE", "Values": ["DataTransfer"] }}} ] } Because each +// Expression can have only one operator, the service returns an error if more than +// one is specified. The following example shows an Expression object that creates +// an error. { "And": [ ... ], "DimensionValues": { "Dimension": "USAGE_TYPE", +// "Values": [ "DataTransfer" ] } } // -// For GetRightsizingRecommendation -// action, a combination of OR and NOT is not supported. OR is not supported -// between different dimensions, or dimensions and tags. NOT operators aren't -// supported. Dimensions are also limited to LINKED_ACCOUNT, REGION, or -// RIGHTSIZING_TYPE. +// For GetRightsizingRecommendation action, a +// combination of OR and NOT is not supported. OR is not supported between +// different dimensions, or dimensions and tags. NOT operators aren't supported. +// Dimensions are also limited to LINKED_ACCOUNT, REGION, or RIGHTSIZING_TYPE. type Expression struct { // Return results that match both Dimension objects. diff --git a/service/databasemigrationservice/api_op_ApplyPendingMaintenanceAction.go b/service/databasemigrationservice/api_op_ApplyPendingMaintenanceAction.go index c07113dbc34..8a6287f4eff 100644 --- a/service/databasemigrationservice/api_op_ApplyPendingMaintenanceAction.go +++ b/service/databasemigrationservice/api_op_ApplyPendingMaintenanceAction.go @@ -39,15 +39,14 @@ type ApplyPendingMaintenanceActionInput struct { // A value that specifies the type of opt-in request, or undoes an opt-in request. // You can't undo an opt-in request of type immediate. Valid values: // - // * - // immediate - Apply the maintenance action immediately. + // * immediate - + // Apply the maintenance action immediately. // - // * next-maintenance - - // Apply the maintenance action during the next maintenance window for the - // resource. + // * next-maintenance - Apply the + // maintenance action during the next maintenance window for the resource. // - // * undo-opt-in - Cancel any existing next-maintenance opt-in - // requests. + // * + // undo-opt-in - Cancel any existing next-maintenance opt-in requests. // // This member is required. OptInType *string diff --git a/service/databasemigrationservice/api_op_CreateEndpoint.go b/service/databasemigrationservice/api_op_CreateEndpoint.go index 702a9ff5758..16244b37ba5 100644 --- a/service/databasemigrationservice/api_op_CreateEndpoint.go +++ b/service/databasemigrationservice/api_op_CreateEndpoint.go @@ -60,18 +60,18 @@ type CreateEndpointInput struct { // The settings in JSON format for the DMS transfer type of source endpoint. // Possible settings include the following: // - // * ServiceAccessRoleArn - The IAM - // role that has permission to access the Amazon S3 bucket. + // * ServiceAccessRoleArn - The IAM role + // that has permission to access the Amazon S3 bucket. // - // * BucketName - The - // name of the S3 bucket to use. + // * BucketName - The name of + // the S3 bucket to use. // - // * CompressionType - An optional parameter to - // use GZIP to compress the target files. To use GZIP, set this value to NONE (the - // default). To keep the files uncompressed, don't use this value. + // * CompressionType - An optional parameter to use GZIP to + // compress the target files. To use GZIP, set this value to NONE (the default). To + // keep the files uncompressed, don't use this value. // - // Shorthand - // syntax for these settings is as follows: + // Shorthand syntax for these + // settings is as follows: // ServiceAccessRoleArn=string,BucketName=string,CompressionType=string JSON syntax // for these settings is as follows: { "ServiceAccessRoleArn": "string", // "BucketName": "string", "CompressionType": "none"|"gzip" } diff --git a/service/databasemigrationservice/api_op_CreateReplicationInstance.go b/service/databasemigrationservice/api_op_CreateReplicationInstance.go index 924fc82c403..5c9c3928c1f 100644 --- a/service/databasemigrationservice/api_op_CreateReplicationInstance.go +++ b/service/databasemigrationservice/api_op_CreateReplicationInstance.go @@ -50,13 +50,13 @@ type CreateReplicationInstanceInput struct { // The replication instance identifier. This parameter is stored as a lowercase // string. Constraints: // - // * Must contain 1-63 alphanumeric characters or - // hyphens. + // * Must contain 1-63 alphanumeric characters or hyphens. // - // * First character must be a letter. + // * + // First character must be a letter. // - // * Can't end with a hyphen - // or contain two consecutive hyphens. + // * Can't end with a hyphen or contain two + // consecutive hyphens. // // Example: myrepinstance // diff --git a/service/databasemigrationservice/api_op_CreateReplicationTask.go b/service/databasemigrationservice/api_op_CreateReplicationTask.go index 65db62a9054..208bb44805a 100644 --- a/service/databasemigrationservice/api_op_CreateReplicationTask.go +++ b/service/databasemigrationservice/api_op_CreateReplicationTask.go @@ -43,13 +43,13 @@ type CreateReplicationTaskInput struct { // An identifier for the replication task. Constraints: // - // * Must contain 1-255 + // * Must contain 1-255 // alphanumeric characters or hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * + // Cannot end with a hyphen or contain two consecutive hyphens. // // This member is required. ReplicationTaskIdentifier *string diff --git a/service/databasemigrationservice/api_op_ModifyEndpoint.go b/service/databasemigrationservice/api_op_ModifyEndpoint.go index ac3f9e7bcfb..8ea899ea98f 100644 --- a/service/databasemigrationservice/api_op_ModifyEndpoint.go +++ b/service/databasemigrationservice/api_op_ModifyEndpoint.go @@ -44,16 +44,16 @@ type ModifyEndpointInput struct { // The settings in JSON format for the DMS transfer type of source endpoint. // Attributes include the following: // - // * serviceAccessRoleArn - The AWS Identity - // and Access Management (IAM) role that has permission to access the Amazon S3 + // * serviceAccessRoleArn - The AWS Identity and + // Access Management (IAM) role that has permission to access the Amazon S3 // bucket. // - // * BucketName - The name of the S3 bucket to use. + // * BucketName - The name of the S3 bucket to use. // - // * - // compressionType - An optional parameter to use GZIP to compress the target - // files. Either set this parameter to NONE (the default) or don't use it to leave - // the files uncompressed. + // * compressionType - + // An optional parameter to use GZIP to compress the target files. Either set this + // parameter to NONE (the default) or don't use it to leave the files + // uncompressed. // // Shorthand syntax for these settings is as follows: // ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string JSON diff --git a/service/databasemigrationservice/api_op_ModifyReplicationInstance.go b/service/databasemigrationservice/api_op_ModifyReplicationInstance.go index ba429fcf251..30976564a68 100644 --- a/service/databasemigrationservice/api_op_ModifyReplicationInstance.go +++ b/service/databasemigrationservice/api_op_ModifyReplicationInstance.go @@ -58,13 +58,13 @@ type ModifyReplicationInstanceInput struct { // is asynchronously applied as soon as possible. An outage does result if these // factors apply: // - // * This parameter is set to true during the maintenance + // * This parameter is set to true during the maintenance // window. // - // * A newer minor version is available. + // * A newer minor version is available. // - // * AWS DMS has enabled - // automatic patching for the given engine version. + // * AWS DMS has enabled automatic + // patching for the given engine version. AutoMinorVersionUpgrade *bool // The engine version number of the replication instance. When modifying a major diff --git a/service/databasemigrationservice/api_op_ModifyReplicationTask.go b/service/databasemigrationservice/api_op_ModifyReplicationTask.go index bf218cf32c1..f28fd02e3ba 100644 --- a/service/databasemigrationservice/api_op_ModifyReplicationTask.go +++ b/service/databasemigrationservice/api_op_ModifyReplicationTask.go @@ -72,13 +72,13 @@ type ModifyReplicationTaskInput struct { // The replication task identifier. Constraints: // - // * Must contain 1-255 - // alphanumeric characters or hyphens. + // * Must contain 1-255 alphanumeric + // characters or hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * Cannot end with a + // hyphen or contain two consecutive hyphens. ReplicationTaskIdentifier *string // JSON file that contains settings for the task, such as task metadata settings. diff --git a/service/databasemigrationservice/api_op_StartReplicationTaskAssessmentRun.go b/service/databasemigrationservice/api_op_StartReplicationTaskAssessmentRun.go index ee79b7b06e0..9056012dc71 100644 --- a/service/databasemigrationservice/api_op_StartReplicationTaskAssessmentRun.go +++ b/service/databasemigrationservice/api_op_StartReplicationTaskAssessmentRun.go @@ -82,12 +82,12 @@ type StartReplicationTaskAssessmentRunInput struct { // run. If you don't specify this request parameter, AWS DMS stores the assessment // run results without encryption. You can specify one of the options following: // + // * + // "SSE_S3" – The server-side encryption provided as a default by Amazon S3. // - // * "SSE_S3" – The server-side encryption provided as a default by Amazon S3. - // - // - // * "SSE_KMS" – AWS Key Management Service (AWS KMS) encryption. This encryption - // can use either a custom KMS encryption key that you specify or the default KMS + // * + // "SSE_KMS" – AWS Key Management Service (AWS KMS) encryption. This encryption can + // use either a custom KMS encryption key that you specify or the default KMS // encryption key that DMS provides. ResultEncryptionMode *string diff --git a/service/databasemigrationservice/types/enums.go b/service/databasemigrationservice/types/enums.go index ae007dea921..79923c6b946 100644 --- a/service/databasemigrationservice/types/enums.go +++ b/service/databasemigrationservice/types/enums.go @@ -6,9 +6,9 @@ type AuthMechanismValue string // Enum values for AuthMechanismValue const ( - AuthMechanismValueDefault AuthMechanismValue = "default" - AuthMechanismValueMongodb_cr AuthMechanismValue = "mongodb_cr" - AuthMechanismValueScram_sha_1 AuthMechanismValue = "scram_sha_1" + AuthMechanismValueDefault AuthMechanismValue = "default" + AuthMechanismValueMongodbCr AuthMechanismValue = "mongodb_cr" + AuthMechanismValueScramSha1 AuthMechanismValue = "scram_sha_1" ) // Values returns all known values for AuthMechanismValue. Note that this can be @@ -146,10 +146,10 @@ type DmsSslModeValue string // Enum values for DmsSslModeValue const ( - DmsSslModeValueNone DmsSslModeValue = "none" - DmsSslModeValueRequire DmsSslModeValue = "require" - DmsSslModeValueVerify_ca DmsSslModeValue = "verify-ca" - DmsSslModeValueVerify_full DmsSslModeValue = "verify-full" + DmsSslModeValueNone DmsSslModeValue = "none" + DmsSslModeValueRequire DmsSslModeValue = "require" + DmsSslModeValueVerifyCa DmsSslModeValue = "verify-ca" + DmsSslModeValueVerifyFull DmsSslModeValue = "verify-full" ) // Values returns all known values for DmsSslModeValue. Note that this can be @@ -168,9 +168,9 @@ type EncodingTypeValue string // Enum values for EncodingTypeValue const ( - EncodingTypeValuePlain EncodingTypeValue = "plain" - EncodingTypeValuePlain_dictionary EncodingTypeValue = "plain-dictionary" - EncodingTypeValueRle_dictionary EncodingTypeValue = "rle-dictionary" + EncodingTypeValuePlain EncodingTypeValue = "plain" + EncodingTypeValuePlainDictionary EncodingTypeValue = "plain-dictionary" + EncodingTypeValueRleDictionary EncodingTypeValue = "rle-dictionary" ) // Values returns all known values for EncodingTypeValue. Note that this can be @@ -188,8 +188,8 @@ type EncryptionModeValue string // Enum values for EncryptionModeValue const ( - EncryptionModeValueSse_s3 EncryptionModeValue = "sse-s3" - EncryptionModeValueSse_kms EncryptionModeValue = "sse-kms" + EncryptionModeValueSseS3 EncryptionModeValue = "sse-s3" + EncryptionModeValueSseKms EncryptionModeValue = "sse-kms" ) // Values returns all known values for EncryptionModeValue. Note that this can be @@ -206,8 +206,8 @@ type MessageFormatValue string // Enum values for MessageFormatValue const ( - MessageFormatValueJson MessageFormatValue = "json" - MessageFormatValueJson_unformatted MessageFormatValue = "json-unformatted" + MessageFormatValueJson MessageFormatValue = "json" + MessageFormatValueJsonUnformatted MessageFormatValue = "json-unformatted" ) // Values returns all known values for MessageFormatValue. Note that this can be @@ -224,9 +224,9 @@ type MigrationTypeValue string // Enum values for MigrationTypeValue const ( - MigrationTypeValueFull_load MigrationTypeValue = "full-load" - MigrationTypeValueCdc MigrationTypeValue = "cdc" - MigrationTypeValueFull_load_and_cdc MigrationTypeValue = "full-load-and-cdc" + MigrationTypeValueFullLoad MigrationTypeValue = "full-load" + MigrationTypeValueCdc MigrationTypeValue = "cdc" + MigrationTypeValueFullLoadAndCdc MigrationTypeValue = "full-load-and-cdc" ) // Values returns all known values for MigrationTypeValue. Note that this can be @@ -262,8 +262,8 @@ type ParquetVersionValue string // Enum values for ParquetVersionValue const ( - ParquetVersionValueParquet_1_0 ParquetVersionValue = "parquet-1-0" - ParquetVersionValueParquet_2_0 ParquetVersionValue = "parquet-2-0" + ParquetVersionValueParquet10 ParquetVersionValue = "parquet-1-0" + ParquetVersionValueParquet20 ParquetVersionValue = "parquet-2-0" ) // Values returns all known values for ParquetVersionValue. Note that this can be @@ -317,8 +317,8 @@ type ReloadOptionValue string // Enum values for ReloadOptionValue const ( - ReloadOptionValueData_reload ReloadOptionValue = "data-reload" - ReloadOptionValueValidate_only ReloadOptionValue = "validate-only" + ReloadOptionValueDataReload ReloadOptionValue = "data-reload" + ReloadOptionValueValidateOnly ReloadOptionValue = "validate-only" ) // Values returns all known values for ReloadOptionValue. Note that this can be @@ -353,9 +353,9 @@ type SafeguardPolicy string // Enum values for SafeguardPolicy const ( - SafeguardPolicyRely_on_sql_server_replication_agent SafeguardPolicy = "rely-on-sql-server-replication-agent" - SafeguardPolicyExclusive_automatic_truncation SafeguardPolicy = "exclusive-automatic-truncation" - SafeguardPolicyShared_automatic_truncation SafeguardPolicy = "shared-automatic-truncation" + SafeguardPolicyRelyOnSqlServerReplicationAgent SafeguardPolicy = "rely-on-sql-server-replication-agent" + SafeguardPolicyExclusiveAutomaticTruncation SafeguardPolicy = "exclusive-automatic-truncation" + SafeguardPolicySharedAutomaticTruncation SafeguardPolicy = "shared-automatic-truncation" ) // Values returns all known values for SafeguardPolicy. Note that this can be @@ -389,9 +389,9 @@ type StartReplicationTaskTypeValue string // Enum values for StartReplicationTaskTypeValue const ( - StartReplicationTaskTypeValueStart_replication StartReplicationTaskTypeValue = "start-replication" - StartReplicationTaskTypeValueResume_processing StartReplicationTaskTypeValue = "resume-processing" - StartReplicationTaskTypeValueReload_target StartReplicationTaskTypeValue = "reload-target" + StartReplicationTaskTypeValueStartReplication StartReplicationTaskTypeValue = "start-replication" + StartReplicationTaskTypeValueResumeProcessing StartReplicationTaskTypeValue = "resume-processing" + StartReplicationTaskTypeValueReloadTarget StartReplicationTaskTypeValue = "reload-target" ) // Values returns all known values for StartReplicationTaskTypeValue. Note that @@ -410,8 +410,8 @@ type TargetDbType string // Enum values for TargetDbType const ( - TargetDbTypeSpecific_database TargetDbType = "specific-database" - TargetDbTypeMultiple_databases TargetDbType = "multiple-databases" + TargetDbTypeSpecificDatabase TargetDbType = "specific-database" + TargetDbTypeMultipleDatabases TargetDbType = "multiple-databases" ) // Values returns all known values for TargetDbType. Note that this can be expanded diff --git a/service/databasemigrationservice/types/types.go b/service/databasemigrationservice/types/types.go index 8a3f86782bc..65b302cc346 100644 --- a/service/databasemigrationservice/types/types.go +++ b/service/databasemigrationservice/types/types.go @@ -93,14 +93,14 @@ type Connection struct { // The connection status. This parameter can return one of the following values: // + // * + // "successful" // - // * "successful" + // * "testing" // - // * "testing" + // * "failed" // - // * "failed" - // - // * "deleting" + // * "deleting" Status *string } @@ -153,14 +153,14 @@ type ElasticsearchSettings struct { // Describes an endpoint of a database instance in response to operations such as // the following: // -// * CreateEndpoint +// * CreateEndpoint // -// * DescribeEndpoint +// * DescribeEndpoint // -// * -// DescribeEndpointTypes +// * DescribeEndpointTypes // -// * ModifyEndpoint +// * +// ModifyEndpoint type Endpoint struct { // The Amazon Resource Name (ARN) used for SSL connection to the endpoint. @@ -172,18 +172,18 @@ type Endpoint struct { // The settings in JSON format for the DMS transfer type of source endpoint. // Possible settings include the following: // - // * ServiceAccessRoleArn - The IAM - // role that has permission to access the Amazon S3 bucket. + // * ServiceAccessRoleArn - The IAM role + // that has permission to access the Amazon S3 bucket. // - // * BucketName - The - // name of the S3 bucket to use. + // * BucketName - The name of + // the S3 bucket to use. // - // * CompressionType - An optional parameter to - // use GZIP to compress the target files. To use GZIP, set this value to NONE (the - // default). To keep the files uncompressed, don't use this value. + // * CompressionType - An optional parameter to use GZIP to + // compress the target files. To use GZIP, set this value to NONE (the default). To + // keep the files uncompressed, don't use this value. // - // Shorthand - // syntax for these settings is as follows: + // Shorthand syntax for these + // settings is as follows: // ServiceAccessRoleArn=string,BucketName=string,CompressionType=string JSON syntax // for these settings is as follows: { "ServiceAccessRoleArn": "string", // "BucketName": "string", "CompressionType": "none"|"gzip" } @@ -1275,13 +1275,13 @@ type ReplicationInstance struct { // The replication instance identifier is a required parameter. This parameter is // stored as a lowercase string. Constraints: // - // * Must contain 1-63 alphanumeric + // * Must contain 1-63 alphanumeric // characters or hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot - // end with a hyphen or contain two consecutive hyphens. + // * Cannot end with a + // hyphen or contain two consecutive hyphens. // // Example: myrepinstance ReplicationInstanceIdentifier *string @@ -1300,35 +1300,35 @@ type ReplicationInstance struct { // The status of the replication instance. The possible return values include: // + // * + // "available" // - // * "available" - // - // * "creating" + // * "creating" // - // * "deleted" + // * "deleted" // - // * "deleting" + // * "deleting" // - // * - // "failed" + // * "failed" // - // * "modifying" + // * + // "modifying" // - // * "upgrading" + // * "upgrading" // - // * "rebooting" + // * "rebooting" // - // * - // "resetting-master-credentials" + // * "resetting-master-credentials" // - // * "storage-full" + // * + // "storage-full" // - // * - // "incompatible-credentials" + // * "incompatible-credentials" // - // * "incompatible-network" + // * "incompatible-network" // - // * "maintenance" + // * + // "maintenance" ReplicationInstanceStatus *string // The subnet group for the replication instance. @@ -1441,13 +1441,13 @@ type ReplicationTask struct { // The user-assigned replication task identifier or name. Constraints: // - // * Must + // * Must // contain 1-255 alphanumeric characters or hyphens. // - // * First character must be - // a letter. + // * First character must be a + // letter. // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or contain two consecutive hyphens. ReplicationTaskIdentifier *string // The settings for the replication task. @@ -1469,18 +1469,18 @@ type ReplicationTask struct { // The reason the replication task was stopped. This response parameter can return // one of the following values: // - // * "STOP_REASON_FULL_LOAD_COMPLETED" – - // Full-load migration completed. + // * "STOP_REASON_FULL_LOAD_COMPLETED" – Full-load + // migration completed. // - // * "STOP_REASON_CACHED_CHANGES_APPLIED" – - // Change data capture (CDC) load completed. + // * "STOP_REASON_CACHED_CHANGES_APPLIED" – Change data + // capture (CDC) load completed. // - // * - // "STOP_REASON_CACHED_CHANGES_NOT_APPLIED" – In a full-load and CDC migration, the - // full-load stopped as specified before starting the CDC migration. + // * "STOP_REASON_CACHED_CHANGES_NOT_APPLIED" – In a + // full-load and CDC migration, the full-load stopped as specified before starting + // the CDC migration. // - // * - // "STOP_REASON_SERVER_TIME" – The migration stopped at the specified server time. + // * "STOP_REASON_SERVER_TIME" – The migration stopped at the + // specified server time. StopReason *string // Table mappings specified in the task. @@ -1568,38 +1568,38 @@ type ReplicationTaskAssessmentRun struct { // Assessment run status. This status can have one of the following values: // - // * + // * // "cancelling" – The assessment run was canceled by the // CancelReplicationTaskAssessmentRun operation. // - // * "deleting" – The assessment - // run was deleted by the DeleteReplicationTaskAssessmentRun operation. - // - // * - // "failed" – At least one individual assessment completed with a failed status. + // * "deleting" – The assessment run + // was deleted by the DeleteReplicationTaskAssessmentRun operation. // + // * "failed" – + // At least one individual assessment completed with a failed status. // - // * "error-provisioning" – An internal error occurred while resources were + // * + // "error-provisioning" – An internal error occurred while resources were // provisioned (during provisioning status). // - // * "error-executing" – An internal + // * "error-executing" – An internal // error occurred while individual assessments ran (during running status). // - // * + // * // "invalid state" – The assessment run is in an unknown state. // - // * "passed" – - // All individual assessments have completed, and none has a failed status. + // * "passed" – All + // individual assessments have completed, and none has a failed status. // - // * + // * // "provisioning" – Resources required to run individual assessments are being // provisioned. // - // * "running" – Individual assessments are being run. + // * "running" – Individual assessments are being run. // - // * - // "starting" – The assessment run is starting, but resources are not yet being - // provisioned for individual assessments. + // * "starting" + // – The assessment run is starting, but resources are not yet being provisioned + // for individual assessments. Status *string } @@ -1634,18 +1634,18 @@ type ReplicationTaskIndividualAssessment struct { // Individual assessment status. This status can have one of the following // values: // - // * "cancelled" + // * "cancelled" // - // * "error" + // * "error" // - // * "failed" + // * "failed" // - // * "passed" + // * "passed" // - // * - // "pending" + // * "pending" // - // * "running" + // * + // "running" Status *string } @@ -1774,12 +1774,11 @@ type S3Settings struct { // The format of the data that you want to use for output. You can choose one of // the following: // - // * csv : This is a row-based file format with comma-separated + // * csv : This is a row-based file format with comma-separated // values (.csv). // - // * parquet : Apache Parquet (.parquet) is a columnar storage - // file format that features efficient compression and provides faster query - // response. + // * parquet : Apache Parquet (.parquet) is a columnar storage file + // format that features efficient compression and provides faster query response. DataFormat DataFormatValue // The size of one data page in bytes. This parameter defaults to 1024 * 1024 bytes @@ -1817,16 +1816,16 @@ type S3Settings struct { // The type of encoding you are using: // - // * RLE_DICTIONARY uses a combination of + // * RLE_DICTIONARY uses a combination of // bit-packing and run-length encoding to store repeated values more efficiently. // This is the default. // - // * PLAIN doesn't use encoding at all. Values are stored - // as they are. + // * PLAIN doesn't use encoding at all. Values are stored as + // they are. // - // * PLAIN_DICTIONARY builds a dictionary of the values - // encountered in a given column. The dictionary is stored in a dictionary page for - // each column chunk. + // * PLAIN_DICTIONARY builds a dictionary of the values encountered in a + // given column. The dictionary is stored in a dictionary page for each column + // chunk. EncodingType EncodingTypeValue // The type of server-side encryption that you want to use for your data. This @@ -1838,30 +1837,30 @@ type S3Settings struct { // and Access Management (IAM) role with permission to allow "arn:aws:s3:::dms-*" // to use the following actions: // - // * s3:CreateBucket + // * s3:CreateBucket // - // * s3:ListBucket + // * s3:ListBucket // - // * + // * // s3:DeleteBucket // - // * s3:GetBucketLocation + // * s3:GetBucketLocation // - // * s3:GetObject + // * s3:GetObject // - // * - // s3:PutObject + // * s3:PutObject // - // * s3:DeleteObject + // * + // s3:DeleteObject // - // * s3:GetObjectVersion + // * s3:GetObjectVersion // - // * - // s3:GetBucketPolicy + // * s3:GetBucketPolicy // - // * s3:PutBucketPolicy + // * + // s3:PutBucketPolicy // - // * s3:DeleteBucketPolicy + // * s3:DeleteBucketPolicy EncryptionMode EncryptionModeValue // Specifies how tables are defined in the S3 source files only. @@ -2062,39 +2061,39 @@ type TableStatistics struct { // The validation state of the table. This parameter can have the following // values: // - // * Not enabled – Validation isn't enabled for the table in the - // migration task. + // * Not enabled – Validation isn't enabled for the table in the migration + // task. // - // * Pending records – Some records in the table are waiting - // for validation. + // * Pending records – Some records in the table are waiting for + // validation. // - // * Mismatched records – Some records in the table don't - // match between the source and target. + // * Mismatched records – Some records in the table don't match + // between the source and target. // - // * Suspended records – Some records in - // the table couldn't be validated. + // * Suspended records – Some records in the table + // couldn't be validated. // - // * No primary key –The table couldn't be - // validated because it has no primary key. + // * No primary key –The table couldn't be validated + // because it has no primary key. // - // * Table error – The table wasn't - // validated because it's in an error state and some data wasn't migrated. + // * Table error – The table wasn't validated + // because it's in an error state and some data wasn't migrated. // - // * - // Validated – All rows in the table are validated. If the table is updated, the - // status can change from Validated. + // * Validated – All + // rows in the table are validated. If the table is updated, the status can change + // from Validated. // - // * Error – The table couldn't be validated - // because of an unexpected error. + // * Error – The table couldn't be validated because of an + // unexpected error. // - // * Pending validation – The table is waiting - // validation. + // * Pending validation – The table is waiting validation. // - // * Preparing table – Preparing the table enabled in the - // migration task for validation. + // * + // Preparing table – Preparing the table enabled in the migration task for + // validation. // - // * Pending revalidation – All rows in the - // table are pending validation after the table was updated. + // * Pending revalidation – All rows in the table are pending + // validation after the table was updated. ValidationState *string // Additional details about the state of validation. @@ -2121,12 +2120,12 @@ type TableToReload struct { // A user-defined key-value pair that describes metadata added to an AWS DMS // resource and that is used by operations such as the following: // -// * +// * // AddTagsToResource // -// * ListTagsForResource +// * ListTagsForResource // -// * RemoveTagsFromResource +// * RemoveTagsFromResource type Tag struct { // A key is the required name of the tag. The string value can be 1-128 Unicode diff --git a/service/dataexchange/types/enums.go b/service/dataexchange/types/enums.go index ca2d3b861bf..762685a19ea 100644 --- a/service/dataexchange/types/enums.go +++ b/service/dataexchange/types/enums.go @@ -6,7 +6,7 @@ type AssetType string // Enum values for AssetType const ( - AssetTypeS3_snapshot AssetType = "S3_SNAPSHOT" + AssetTypeS3Snapshot AssetType = "S3_SNAPSHOT" ) // Values returns all known values for AssetType. Note that this can be expanded in @@ -22,13 +22,13 @@ type Code string // Enum values for Code const ( - CodeAccess_denied_exception Code = "ACCESS_DENIED_EXCEPTION" - CodeInternal_server_exception Code = "INTERNAL_SERVER_EXCEPTION" - CodeMalware_detected Code = "MALWARE_DETECTED" - CodeResource_not_found_exception Code = "RESOURCE_NOT_FOUND_EXCEPTION" - CodeService_quota_exceeded_exception Code = "SERVICE_QUOTA_EXCEEDED_EXCEPTION" - CodeValidation_exception Code = "VALIDATION_EXCEPTION" - CodeMalware_scan_encrypted_file Code = "MALWARE_SCAN_ENCRYPTED_FILE" + CodeAccessDeniedException Code = "ACCESS_DENIED_EXCEPTION" + CodeInternalServerException Code = "INTERNAL_SERVER_EXCEPTION" + CodeMalwareDetected Code = "MALWARE_DETECTED" + CodeResourceNotFoundException Code = "RESOURCE_NOT_FOUND_EXCEPTION" + CodeServiceQuotaExceededException Code = "SERVICE_QUOTA_EXCEEDED_EXCEPTION" + CodeValidationException Code = "VALIDATION_EXCEPTION" + CodeMalwareScanEncryptedFile Code = "MALWARE_SCAN_ENCRYPTED_FILE" ) // Values returns all known values for Code. Note that this can be expanded in the @@ -50,8 +50,8 @@ type JobErrorLimitName string // Enum values for JobErrorLimitName const ( - JobErrorLimitNameAssets_per_revision JobErrorLimitName = "Assets per revision" - JobErrorLimitNameAsset_size_in_gb JobErrorLimitName = "Asset size in GB" + JobErrorLimitNameAssetsPerRevision JobErrorLimitName = "Assets per revision" + JobErrorLimitNameAssetSizeInGb JobErrorLimitName = "Asset size in GB" ) // Values returns all known values for JobErrorLimitName. Note that this can be @@ -86,18 +86,18 @@ type LimitName string // Enum values for LimitName const ( - LimitNameProducts_per_account LimitName = "Products per account" - LimitNameData_sets_per_account LimitName = "Data sets per account" - LimitNameData_sets_per_product LimitName = "Data sets per product" - LimitNameRevisions_per_data_set LimitName = "Revisions per data set" - LimitNameAssets_per_revision LimitName = "Assets per revision" - LimitNameAssets_per_import_job_from_amazon_s3 LimitName = "Assets per import job from Amazon S3" - LimitNameAsset_per_export_job_from_amazon_s3 LimitName = "Asset per export job from Amazon S3" - LimitNameAsset_size_in_gb LimitName = "Asset size in GB" - LimitNameConcurrent_in_progress_jobs_to_import_assets_from_amazon_s3 LimitName = "Concurrent in progress jobs to import assets from Amazon S3" - LimitNameConcurrent_in_progress_jobs_to_import_assets_from_a_signed_url LimitName = "Concurrent in progress jobs to import assets from a signed URL" - LimitNameConcurrent_in_progress_jobs_to_export_assets_to_amazon_s3 LimitName = "Concurrent in progress jobs to export assets to Amazon S3" - LimitNameConcurrent_in_progress_jobs_to_export_assets_to_a_signed_url LimitName = "Concurrent in progress jobs to export assets to a signed URL" + LimitNameProductsPerAccount LimitName = "Products per account" + LimitNameDataSetsPerAccount LimitName = "Data sets per account" + LimitNameDataSetsPerProduct LimitName = "Data sets per product" + LimitNameRevisionsPerDataSet LimitName = "Revisions per data set" + LimitNameAssetsPerRevision LimitName = "Assets per revision" + LimitNameAssetsPerImportJobFromAmazonS3 LimitName = "Assets per import job from Amazon S3" + LimitNameAssetPerExportJobFromAmazonS3 LimitName = "Asset per export job from Amazon S3" + LimitNameAssetSizeInGb LimitName = "Asset size in GB" + LimitNameConcurrentInProgressJobsToImportAssetsFromAmazonS3 LimitName = "Concurrent in progress jobs to import assets from Amazon S3" + LimitNameConcurrentInProgressJobsToImportAssetsFromASignedUrl LimitName = "Concurrent in progress jobs to import assets from a signed URL" + LimitNameConcurrentInProgressJobsToExportAssetsToAmazonS3 LimitName = "Concurrent in progress jobs to export assets to Amazon S3" + LimitNameConcurrentInProgressJobsToExportAssetsToASignedUrl LimitName = "Concurrent in progress jobs to export assets to a signed URL" ) // Values returns all known values for LimitName. Note that this can be expanded in @@ -142,7 +142,7 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeData_set ResourceType = "DATA_SET" + ResourceTypeDataSet ResourceType = "DATA_SET" ResourceTypeRevision ResourceType = "REVISION" ResourceTypeAsset ResourceType = "ASSET" ResourceTypeJob ResourceType = "JOB" @@ -164,8 +164,8 @@ type ServerSideEncryptionTypes string // Enum values for ServerSideEncryptionTypes const ( - ServerSideEncryptionTypesAws_kms ServerSideEncryptionTypes = "aws:kms" - ServerSideEncryptionTypesAes256 ServerSideEncryptionTypes = "AES256" + ServerSideEncryptionTypesAwsKms ServerSideEncryptionTypes = "aws:kms" + ServerSideEncryptionTypesAes256 ServerSideEncryptionTypes = "AES256" ) // Values returns all known values for ServerSideEncryptionTypes. Note that this @@ -182,12 +182,12 @@ type State string // Enum values for State const ( - StateWaiting State = "WAITING" - StateIn_progress State = "IN_PROGRESS" - StateError State = "ERROR" - StateCompleted State = "COMPLETED" - StateCancelled State = "CANCELLED" - StateTimed_out State = "TIMED_OUT" + StateWaiting State = "WAITING" + StateInProgress State = "IN_PROGRESS" + StateError State = "ERROR" + StateCompleted State = "COMPLETED" + StateCancelled State = "CANCELLED" + StateTimedOut State = "TIMED_OUT" ) // Values returns all known values for State. Note that this can be expanded in the @@ -208,10 +208,10 @@ type Type string // Enum values for Type const ( - TypeImport_assets_from_s3 Type = "IMPORT_ASSETS_FROM_S3" - TypeImport_asset_from_signed_url Type = "IMPORT_ASSET_FROM_SIGNED_URL" - TypeExport_assets_to_s3 Type = "EXPORT_ASSETS_TO_S3" - TypeExport_asset_to_signed_url Type = "EXPORT_ASSET_TO_SIGNED_URL" + TypeImportAssetsFromS3 Type = "IMPORT_ASSETS_FROM_S3" + TypeImportAssetFromSignedUrl Type = "IMPORT_ASSET_FROM_SIGNED_URL" + TypeExportAssetsToS3 Type = "EXPORT_ASSETS_TO_S3" + TypeExportAssetToSignedUrl Type = "EXPORT_ASSET_TO_SIGNED_URL" ) // Values returns all known values for Type. Note that this can be expanded in the diff --git a/service/datapipeline/api_op_PutPipelineDefinition.go b/service/datapipeline/api_op_PutPipelineDefinition.go index 6c18afa7816..b2a1c18feaf 100644 --- a/service/datapipeline/api_op_PutPipelineDefinition.go +++ b/service/datapipeline/api_op_PutPipelineDefinition.go @@ -17,19 +17,19 @@ import ( // pipeline are saved unless one of the following validation errors exist in the // pipeline. // -// * An object is missing a name or identifier field. +// * An object is missing a name or identifier field. // -// * A -// string or reference field is empty. +// * A string or +// reference field is empty. // -// * The number of objects in the pipeline -// exceeds the allowed maximum number of objects. +// * The number of objects in the pipeline exceeds the +// allowed maximum number of objects. // -// * The pipeline is in a -// FINISHED state. +// * The pipeline is in a FINISHED +// state. // -// Pipeline object definitions are passed to the -// PutPipelineDefinition action and returned by the GetPipelineDefinition action. +// Pipeline object definitions are passed to the PutPipelineDefinition +// action and returned by the GetPipelineDefinition action. func (c *Client) PutPipelineDefinition(ctx context.Context, params *PutPipelineDefinitionInput, optFns ...func(*Options)) (*PutPipelineDefinitionOutput, error) { if params == nil { params = &PutPipelineDefinitionInput{} diff --git a/service/datapipeline/types/types.go b/service/datapipeline/types/types.go index 2542a319dd1..694cfc504ad 100644 --- a/service/datapipeline/types/types.go +++ b/service/datapipeline/types/types.go @@ -49,46 +49,46 @@ type Operator struct { // apply only to certain object fields, as detailed below. The comparison operators // EQ and REF_EQ act on the following fields: // - // * name + // * name // - // * @sphere + // * @sphere // - // * - // parent + // * parent // - // * @componentParent + // * + // @componentParent // - // * @instanceParent + // * @instanceParent // - // * @status + // * @status // - // * - // @scheduledStartTime + // * @scheduledStartTime // - // * @scheduledEndTime + // * + // @scheduledEndTime // - // * @actualStartTime + // * @actualStartTime // - // * - // @actualEndTime + // * @actualEndTime // - // The comparison operators GE, LE, and BETWEEN act on the - // following fields: + // The comparison + // operators GE, LE, and BETWEEN act on the following fields: // - // * @scheduledStartTime + // * + // @scheduledStartTime // - // * @scheduledEndTime + // * @scheduledEndTime // - // * - // @actualStartTime + // * @actualStartTime // - // * @actualEndTime + // * + // @actualEndTime // - // Note that fields beginning with the at - // sign (@) are read-only and set by the web service. When you name fields, you - // should choose names containing only alpha-numeric values, as symbols may be - // reserved by AWS Data Pipeline. User-defined fields that you add to a pipeline - // should prefix their name with the string "my". + // Note that fields beginning with the at sign (@) are read-only + // and set by the web service. When you name fields, you should choose names + // containing only alpha-numeric values, as symbols may be reserved by AWS Data + // Pipeline. User-defined fields that you add to a pipeline should prefix their + // name with the string "my". Type OperatorType // The value that the actual field value will be compared with. diff --git a/service/datasync/api_op_CreateLocationEfs.go b/service/datasync/api_op_CreateLocationEfs.go index 684c74d29ed..ba7af562dbe 100644 --- a/service/datasync/api_op_CreateLocationEfs.go +++ b/service/datasync/api_op_CreateLocationEfs.go @@ -36,18 +36,18 @@ type CreateLocationEfsInput struct { // security group M (of the mount target) and security group S (which you provide // for DataSync to use at this stage) is as follows: // - // * Security group M (which - // you associate with the mount target) must allow inbound access for the - // Transmission Control Protocol (TCP) on the NFS port (2049) from security group - // S. You can enable inbound connections either by IP address (CIDR range) or - // security group. + // * Security group M (which you + // associate with the mount target) must allow inbound access for the Transmission + // Control Protocol (TCP) on the NFS port (2049) from security group S. You can + // enable inbound connections either by IP address (CIDR range) or security + // group. // - // * Security group S (provided to DataSync to access EFS) - // should have a rule that enables outbound connections to the NFS port on one of - // the file system’s mount targets. You can enable outbound connections either by - // IP address (CIDR range) or security group. For information about security groups - // and mount targets, see Security Groups for Amazon EC2 Instances and Mount - // Targets in the Amazon EFS User Guide. + // * Security group S (provided to DataSync to access EFS) should have a + // rule that enables outbound connections to the NFS port on one of the file + // system’s mount targets. You can enable outbound connections either by IP address + // (CIDR range) or security group. For information about security groups and mount + // targets, see Security Groups for Amazon EC2 Instances and Mount Targets in the + // Amazon EFS User Guide. // // This member is required. Ec2Config *types.Ec2Config diff --git a/service/datasync/types/enums.go b/service/datasync/types/enums.go index 34e48ed3d03..a0e82f0092c 100644 --- a/service/datasync/types/enums.go +++ b/service/datasync/types/enums.go @@ -24,8 +24,8 @@ type Atime string // Enum values for Atime const ( - AtimeNone Atime = "NONE" - AtimeBest_effort Atime = "BEST_EFFORT" + AtimeNone Atime = "NONE" + AtimeBestEffort Atime = "BEST_EFFORT" ) // Values returns all known values for Atime. Note that this can be expanded in the @@ -42,9 +42,9 @@ type EndpointType string // Enum values for EndpointType const ( - EndpointTypePublic EndpointType = "PUBLIC" - EndpointTypePrivate_link EndpointType = "PRIVATE_LINK" - EndpointTypeFips EndpointType = "FIPS" + EndpointTypePublic EndpointType = "PUBLIC" + EndpointTypePrivateLink EndpointType = "PRIVATE_LINK" + EndpointTypeFips EndpointType = "FIPS" ) // Values returns all known values for EndpointType. Note that this can be expanded @@ -62,7 +62,7 @@ type FilterType string // Enum values for FilterType const ( - FilterTypeSimple_pattern FilterType = "SIMPLE_PATTERN" + FilterTypeSimplePattern FilterType = "SIMPLE_PATTERN" ) // Values returns all known values for FilterType. Note that this can be expanded @@ -78,10 +78,10 @@ type Gid string // Enum values for Gid const ( - GidNone Gid = "NONE" - GidInt_value Gid = "INT_VALUE" - GidName Gid = "NAME" - GidBoth Gid = "BOTH" + GidNone Gid = "NONE" + GidIntValue Gid = "INT_VALUE" + GidName Gid = "NAME" + GidBoth Gid = "BOTH" ) // Values returns all known values for Gid. Note that this can be expanded in the @@ -160,8 +160,8 @@ type NfsVersion string const ( NfsVersionAutomatic NfsVersion = "AUTOMATIC" NfsVersionNfs3 NfsVersion = "NFS3" - NfsVersionNfs4_0 NfsVersion = "NFS4_0" - NfsVersionNfs4_1 NfsVersion = "NFS4_1" + NfsVersionNfs40 NfsVersion = "NFS4_0" + NfsVersionNfs41 NfsVersion = "NFS4_1" ) // Values returns all known values for NfsVersion. Note that this can be expanded @@ -198,16 +198,16 @@ type Operator string // Enum values for Operator const ( - OperatorEq Operator = "Equals" - OperatorNe Operator = "NotEquals" - OperatorIn Operator = "In" - OperatorLe Operator = "LessThanOrEqual" - OperatorLt Operator = "LessThan" - OperatorGe Operator = "GreaterThanOrEqual" - OperatorGt Operator = "GreaterThan" - OperatorContains Operator = "Contains" - OperatorNot_contains Operator = "NotContains" - OperatorBegins_with Operator = "BeginsWith" + OperatorEq Operator = "Equals" + OperatorNe Operator = "NotEquals" + OperatorIn Operator = "In" + OperatorLe Operator = "LessThanOrEqual" + OperatorLt Operator = "LessThan" + OperatorGe Operator = "GreaterThanOrEqual" + OperatorGt Operator = "GreaterThan" + OperatorContains Operator = "Contains" + OperatorNotContains Operator = "NotContains" + OperatorBeginsWith Operator = "BeginsWith" ) // Values returns all known values for Operator. Note that this can be expanded in @@ -324,13 +324,13 @@ type S3StorageClass string // Enum values for S3StorageClass const ( - S3StorageClassStandard S3StorageClass = "STANDARD" - S3StorageClassStandard_ia S3StorageClass = "STANDARD_IA" - S3StorageClassOnezone_ia S3StorageClass = "ONEZONE_IA" - S3StorageClassIntelligent_tiering S3StorageClass = "INTELLIGENT_TIERING" - S3StorageClassGlacier S3StorageClass = "GLACIER" - S3StorageClassDeep_archive S3StorageClass = "DEEP_ARCHIVE" - S3StorageClassOutposts S3StorageClass = "OUTPOSTS" + S3StorageClassStandard S3StorageClass = "STANDARD" + S3StorageClassStandardIa S3StorageClass = "STANDARD_IA" + S3StorageClassOnezoneIa S3StorageClass = "ONEZONE_IA" + S3StorageClassIntelligentTiering S3StorageClass = "INTELLIGENT_TIERING" + S3StorageClassGlacier S3StorageClass = "GLACIER" + S3StorageClassDeepArchive S3StorageClass = "DEEP_ARCHIVE" + S3StorageClassOutposts S3StorageClass = "OUTPOSTS" ) // Values returns all known values for S3StorageClass. Note that this can be @@ -478,10 +478,10 @@ type Uid string // Enum values for Uid const ( - UidNone Uid = "NONE" - UidInt_value Uid = "INT_VALUE" - UidName Uid = "NAME" - UidBoth Uid = "BOTH" + UidNone Uid = "NONE" + UidIntValue Uid = "INT_VALUE" + UidName Uid = "NAME" + UidBoth Uid = "BOTH" ) // Values returns all known values for Uid. Note that this can be expanded in the @@ -500,9 +500,9 @@ type VerifyMode string // Enum values for VerifyMode const ( - VerifyModePoint_in_time_consistent VerifyMode = "POINT_IN_TIME_CONSISTENT" - VerifyModeOnly_files_transferred VerifyMode = "ONLY_FILES_TRANSFERRED" - VerifyModeNone VerifyMode = "NONE" + VerifyModePointInTimeConsistent VerifyMode = "POINT_IN_TIME_CONSISTENT" + VerifyModeOnlyFilesTransferred VerifyMode = "ONLY_FILES_TRANSFERRED" + VerifyModeNone VerifyMode = "NONE" ) // Values returns all known values for VerifyMode. Note that this can be expanded diff --git a/service/datasync/types/types.go b/service/datasync/types/types.go index d0a70308e7e..08a04804063 100644 --- a/service/datasync/types/types.go +++ b/service/datasync/types/types.go @@ -106,15 +106,15 @@ type NfsMountOptions struct { // automatically selects a version based on negotiation with the NFS server. You // can specify the following NFS versions: // - // * NFSv3 + // * NFSv3 // (https://tools.ietf.org/html/rfc1813) - stateless protocol version that allows // for asynchronous writes on the server. // - // * NFSv4.0 + // * NFSv4.0 // (https://tools.ietf.org/html/rfc3530) - stateful, firewall-friendly protocol // version that supports delegations and pseudo filesystems. // - // * NFSv4.1 + // * NFSv4.1 // (https://tools.ietf.org/html/rfc5661) - stateful protocol version that supports // sessions, directory delegations, and parallel data processing. Version 4.1 also // includes all features available in version 4.0. diff --git a/service/dax/api_op_CreateCluster.go b/service/dax/api_op_CreateCluster.go index cd2be056b5b..fa713887528 100644 --- a/service/dax/api_op_CreateCluster.go +++ b/service/dax/api_op_CreateCluster.go @@ -33,13 +33,13 @@ type CreateClusterInput struct { // The cluster identifier. This parameter is stored as a lowercase string. // Constraints: // - // * A name must contain from 1 to 20 alphanumeric characters or + // * A name must contain from 1 to 20 alphanumeric characters or // hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * A name cannot end - // with a hyphen or contain two consecutive hyphens. + // * A name cannot end with a + // hyphen or contain two consecutive hyphens. // // This member is required. ClusterName *string @@ -88,24 +88,24 @@ type CreateClusterInput struct { // Clock UTC). The minimum maintenance window is a 60 minute period. Valid values // for ddd are: // - // * sun + // * sun // - // * mon + // * mon // - // * tue + // * tue // - // * wed + // * wed // - // * thu - // - // * fri + // * thu // + // * fri // // * sat // - // Example: sun:05:00-sun:09:00 If you don't specify a preferred maintenance - // window when you create or modify a cache cluster, DAX assigns a 60-minute - // maintenance window on a randomly selected day of the week. + // Example: + // sun:05:00-sun:09:00 If you don't specify a preferred maintenance window when you + // create or modify a cache cluster, DAX assigns a 60-minute maintenance window on + // a randomly selected day of the week. PreferredMaintenanceWindow *string // Represents the settings used to enable server-side encryption on the cluster. diff --git a/service/dax/types/enums.go b/service/dax/types/enums.go index 05709df0360..e39c82bff7f 100644 --- a/service/dax/types/enums.go +++ b/service/dax/types/enums.go @@ -6,8 +6,8 @@ type ChangeType string // Enum values for ChangeType const ( - ChangeTypeImmediate ChangeType = "IMMEDIATE" - ChangeTypeRequires_reboot ChangeType = "REQUIRES_REBOOT" + ChangeTypeImmediate ChangeType = "IMMEDIATE" + ChangeTypeRequiresReboot ChangeType = "REQUIRES_REBOOT" ) // Values returns all known values for ChangeType. Note that this can be expanded @@ -44,8 +44,8 @@ type ParameterType string // Enum values for ParameterType const ( - ParameterTypeDefault ParameterType = "DEFAULT" - ParameterTypeNode_type_specific ParameterType = "NODE_TYPE_SPECIFIC" + ParameterTypeDefault ParameterType = "DEFAULT" + ParameterTypeNodeTypeSpecific ParameterType = "NODE_TYPE_SPECIFIC" ) // Values returns all known values for ParameterType. Note that this can be @@ -62,9 +62,9 @@ type SourceType string // Enum values for SourceType const ( - SourceTypeCluster SourceType = "CLUSTER" - SourceTypeParameter_group SourceType = "PARAMETER_GROUP" - SourceTypeSubnet_group SourceType = "SUBNET_GROUP" + SourceTypeCluster SourceType = "CLUSTER" + SourceTypeParameterGroup SourceType = "PARAMETER_GROUP" + SourceTypeSubnetGroup SourceType = "SUBNET_GROUP" ) // Values returns all known values for SourceType. Note that this can be expanded diff --git a/service/dax/types/types.go b/service/dax/types/types.go index 0f87ca241e1..8adbc84a40a 100644 --- a/service/dax/types/types.go +++ b/service/dax/types/types.go @@ -240,16 +240,16 @@ type SSEDescription struct { // The current state of server-side encryption: // - // * ENABLING - Server-side + // * ENABLING - Server-side // encryption is being enabled. // - // * ENABLED - Server-side encryption is - // enabled. + // * ENABLED - Server-side encryption is enabled. // - // * DISABLING - Server-side encryption is being disabled. + // * + // DISABLING - Server-side encryption is being disabled. // - // * - // DISABLED - Server-side encryption is disabled. + // * DISABLED - Server-side + // encryption is disabled. Status SSEStatus } @@ -276,10 +276,10 @@ type Subnet struct { // Represents the output of one of the following actions: // -// * -// CreateSubnetGroup +// * CreateSubnetGroup // -// * ModifySubnetGroup +// * +// ModifySubnetGroup type SubnetGroup struct { // The description of the subnet group. diff --git a/service/detective/api_op_CreateMembers.go b/service/detective/api_op_CreateMembers.go index 6bc0136a26d..af0c2f279dd 100644 --- a/service/detective/api_op_CreateMembers.go +++ b/service/detective/api_op_CreateMembers.go @@ -18,14 +18,14 @@ import ( // list of accounts to invite. The response separates the requested accounts into // two lists: // -// * The accounts that CreateMembers was able to start the -// verification for. This list includes member accounts that are being verified, -// that have passed verification and are being sent an invitation, and that have -// failed verification. +// * The accounts that CreateMembers was able to start the verification +// for. This list includes member accounts that are being verified, that have +// passed verification and are being sent an invitation, and that have failed +// verification. // -// * The accounts that CreateMembers was unable to -// process. This list includes accounts that were already invited to be member -// accounts in the behavior graph. +// * The accounts that CreateMembers was unable to process. This +// list includes accounts that were already invited to be member accounts in the +// behavior graph. func (c *Client) CreateMembers(ctx context.Context, params *CreateMembersInput, optFns ...func(*Options)) (*CreateMembersOutput, error) { if params == nil { params = &CreateMembersInput{} diff --git a/service/detective/api_op_StartMonitoringMember.go b/service/detective/api_op_StartMonitoringMember.go index 66b9590246a..d498c418531 100644 --- a/service/detective/api_op_StartMonitoringMember.go +++ b/service/detective/api_op_StartMonitoringMember.go @@ -14,11 +14,11 @@ import ( // ACCEPTED_BUT_DISABLED. For valid member accounts, the status is updated as // follows. // -// * If Detective enabled the member account, then the new status is +// * If Detective enabled the member account, then the new status is // ENABLED. // -// * If Detective cannot enable the member account, the status -// remains ACCEPTED_BUT_DISABLED. +// * If Detective cannot enable the member account, the status remains +// ACCEPTED_BUT_DISABLED. func (c *Client) StartMonitoringMember(ctx context.Context, params *StartMonitoringMemberInput, optFns ...func(*Options)) (*StartMonitoringMemberOutput, error) { if params == nil { params = &StartMonitoringMemberInput{} diff --git a/service/detective/doc.go b/service/detective/doc.go index 2f60f649037..a09d93a3ddf 100644 --- a/service/detective/doc.go +++ b/service/detective/doc.go @@ -16,32 +16,31 @@ // endpoint. A Detective master account can use the Detective API to do the // following: // -// * Enable and disable Detective. Enabling Detective creates a new +// * Enable and disable Detective. Enabling Detective creates a new // behavior graph. // -// * View the list of member accounts in a behavior graph. +// * View the list of member accounts in a behavior graph. // +// * Add +// member accounts to a behavior graph. // -// * Add member accounts to a behavior graph. +// * Remove member accounts from a behavior +// graph. // -// * Remove member accounts from a -// behavior graph. -// -// A member account can use the Detective API to do the -// following: +// A member account can use the Detective API to do the following: // -// * View the list of behavior graphs that they are invited to. +// * View +// the list of behavior graphs that they are invited to. // +// * Accept an invitation to +// contribute to a behavior graph. // -// * Accept an invitation to contribute to a behavior graph. -// -// * Decline an -// invitation to contribute to a behavior graph. -// -// * Remove their account from a +// * Decline an invitation to contribute to a // behavior graph. // -// All API actions are logged as CloudTrail events. See Logging -// Detective API Calls with CloudTrail +// * Remove their account from a behavior graph. +// +// All API actions +// are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail // (https://docs.aws.amazon.com/detective/latest/adminguide/logging-using-cloudtrail.html). package detective diff --git a/service/detective/types/enums.go b/service/detective/types/enums.go index 78d7e557461..cbd5f1aa00d 100644 --- a/service/detective/types/enums.go +++ b/service/detective/types/enums.go @@ -6,8 +6,8 @@ type MemberDisabledReason string // Enum values for MemberDisabledReason const ( - MemberDisabledReasonVolume_too_high MemberDisabledReason = "VOLUME_TOO_HIGH" - MemberDisabledReasonVolume_unknown MemberDisabledReason = "VOLUME_UNKNOWN" + MemberDisabledReasonVolumeTooHigh MemberDisabledReason = "VOLUME_TOO_HIGH" + MemberDisabledReasonVolumeUnknown MemberDisabledReason = "VOLUME_UNKNOWN" ) // Values returns all known values for MemberDisabledReason. Note that this can be @@ -24,11 +24,11 @@ type MemberStatus string // Enum values for MemberStatus const ( - MemberStatusInvited MemberStatus = "INVITED" - MemberStatusVerification_in_progress MemberStatus = "VERIFICATION_IN_PROGRESS" - MemberStatusVerification_failed MemberStatus = "VERIFICATION_FAILED" - MemberStatusEnabled MemberStatus = "ENABLED" - MemberStatusAccepted_but_disabled MemberStatus = "ACCEPTED_BUT_DISABLED" + MemberStatusInvited MemberStatus = "INVITED" + MemberStatusVerificationInProgress MemberStatus = "VERIFICATION_IN_PROGRESS" + MemberStatusVerificationFailed MemberStatus = "VERIFICATION_FAILED" + MemberStatusEnabled MemberStatus = "ENABLED" + MemberStatusAcceptedButDisabled MemberStatus = "ACCEPTED_BUT_DISABLED" ) // Values returns all known values for MemberStatus. Note that this can be expanded diff --git a/service/detective/types/errors.go b/service/detective/types/errors.go index 2ee4ec9108a..420c64c2db7 100644 --- a/service/detective/types/errors.go +++ b/service/detective/types/errors.go @@ -60,17 +60,17 @@ func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smit // This request cannot be completed for one of the following reasons. // -// * The +// * The // request would cause the number of member accounts in the behavior graph to // exceed the maximum allowed. A behavior graph cannot have more than 1000 member // accounts. // -// * The request would cause the data rate for the behavior graph to +// * The request would cause the data rate for the behavior graph to // exceed the maximum allowed. // -// * Detective is unable to verify the data rate -// for the member account. This is usually because the member account is not -// enrolled in Amazon GuardDuty. +// * Detective is unable to verify the data rate for +// the member account. This is usually because the member account is not enrolled +// in Amazon GuardDuty. type ServiceQuotaExceededException struct { Message *string } diff --git a/service/detective/types/types.go b/service/detective/types/types.go index 0f64dc2c1d8..c74b1be0df3 100644 --- a/service/detective/types/types.go +++ b/service/detective/types/types.go @@ -42,13 +42,13 @@ type MemberDetail struct { // member account is not enabled. The reason can have one of the following // values: // - // * VOLUME_TOO_HIGH - Indicates that adding the member account would + // * VOLUME_TOO_HIGH - Indicates that adding the member account would // cause the data volume for the behavior graph to be too high. // - // * - // VOLUME_UNKNOWN - Indicates that Detective is unable to verify the data volume - // for the member account. This is usually because the member account is not - // enrolled in Amazon GuardDuty. + // * VOLUME_UNKNOWN - + // Indicates that Detective is unable to verify the data volume for the member + // account. This is usually because the member account is not enrolled in Amazon + // GuardDuty. DisabledReason MemberDisabledReason // The AWS account root user email address for the member account. @@ -79,30 +79,30 @@ type MemberDetail struct { // The current membership status of the member account. The status can have one of // the following values: // - // * INVITED - Indicates that the member was sent an + // * INVITED - Indicates that the member was sent an // invitation but has not yet responded. // - // * VERIFICATION_IN_PROGRESS - - // Indicates that Detective is verifying that the account identifier and email - // address provided for the member account match. If they do match, then Detective - // sends the invitation. If the email address and account identifier don't match, - // then the member cannot be added to the behavior graph. + // * VERIFICATION_IN_PROGRESS - Indicates + // that Detective is verifying that the account identifier and email address + // provided for the member account match. If they do match, then Detective sends + // the invitation. If the email address and account identifier don't match, then + // the member cannot be added to the behavior graph. // - // * - // VERIFICATION_FAILED - Indicates that the account and email address provided for - // the member account do not match, and Detective did not send an invitation to the - // account. + // * VERIFICATION_FAILED - + // Indicates that the account and email address provided for the member account do + // not match, and Detective did not send an invitation to the account. // - // * ENABLED - Indicates that the member account accepted the - // invitation to contribute to the behavior graph. + // * ENABLED - + // Indicates that the member account accepted the invitation to contribute to the + // behavior graph. // - // * ACCEPTED_BUT_DISABLED - - // Indicates that the member account accepted the invitation but is prevented from - // contributing data to the behavior graph. DisabledReason provides the reason why - // the member account is not enabled. + // * ACCEPTED_BUT_DISABLED - Indicates that the member account + // accepted the invitation but is prevented from contributing data to the behavior + // graph. DisabledReason provides the reason why the member account is not + // enabled. // - // Member accounts that declined an invitation - // or that were removed from the behavior graph are not included. + // Member accounts that declined an invitation or that were removed from + // the behavior graph are not included. Status MemberStatus // The date and time that the member account was last updated. The value is in diff --git a/service/devicefarm/api_op_CreateRemoteAccessSession.go b/service/devicefarm/api_op_CreateRemoteAccessSession.go index 20369c73159..67f01d7ba58 100644 --- a/service/devicefarm/api_op_CreateRemoteAccessSession.go +++ b/service/devicefarm/api_op_CreateRemoteAccessSession.go @@ -57,17 +57,17 @@ type CreateRemoteAccessSessionInput struct { // The interaction mode of the remote access session. Valid values are: // - // * + // * // INTERACTIVE: You can interact with the iOS device by viewing, touching, and // rotating the screen. You cannot run XCUITest framework-based tests in this // mode. // - // * NO_VIDEO: You are connected to the device, but cannot interact with - // it or view the screen. This mode has the fastest test execution speed. You can - // run XCUITest framework-based tests in this mode. + // * NO_VIDEO: You are connected to the device, but cannot interact with it + // or view the screen. This mode has the fastest test execution speed. You can run + // XCUITest framework-based tests in this mode. // - // * VIDEO_ONLY: You can view - // the screen, but cannot touch or rotate it. You can run XCUITest framework-based + // * VIDEO_ONLY: You can view the + // screen, but cannot touch or rotate it. You can run XCUITest framework-based // tests and watch the screen in this mode. InteractionMode types.InteractionMode diff --git a/service/devicefarm/api_op_CreateUpload.go b/service/devicefarm/api_op_CreateUpload.go index d6fb9844e05..7df179da3c2 100644 --- a/service/devicefarm/api_op_CreateUpload.go +++ b/service/devicefarm/api_op_CreateUpload.go @@ -45,87 +45,86 @@ type CreateUploadInput struct { // The upload's upload type. Must be one of the following values: // - // * - // ANDROID_APP + // * ANDROID_APP // - // * IOS_APP + // * + // IOS_APP // - // * WEB_APP + // * WEB_APP // - // * EXTERNAL_DATA + // * EXTERNAL_DATA // - // * - // APPIUM_JAVA_JUNIT_TEST_PACKAGE + // * APPIUM_JAVA_JUNIT_TEST_PACKAGE // - // * APPIUM_JAVA_TESTNG_TEST_PACKAGE + // * + // APPIUM_JAVA_TESTNG_TEST_PACKAGE // - // * - // APPIUM_PYTHON_TEST_PACKAGE + // * APPIUM_PYTHON_TEST_PACKAGE // - // * APPIUM_NODE_TEST_PACKAGE + // * + // APPIUM_NODE_TEST_PACKAGE // - // * - // APPIUM_RUBY_TEST_PACKAGE + // * APPIUM_RUBY_TEST_PACKAGE // - // * APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE + // * + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE // - // * - // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE + // * APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE // - // * APPIUM_WEB_PYTHON_TEST_PACKAGE + // * + // APPIUM_WEB_PYTHON_TEST_PACKAGE // - // * - // APPIUM_WEB_NODE_TEST_PACKAGE + // * APPIUM_WEB_NODE_TEST_PACKAGE // - // * APPIUM_WEB_RUBY_TEST_PACKAGE + // * + // APPIUM_WEB_RUBY_TEST_PACKAGE // - // * - // CALABASH_TEST_PACKAGE + // * CALABASH_TEST_PACKAGE // - // * INSTRUMENTATION_TEST_PACKAGE + // * + // INSTRUMENTATION_TEST_PACKAGE // - // * - // UIAUTOMATION_TEST_PACKAGE + // * UIAUTOMATION_TEST_PACKAGE // - // * UIAUTOMATOR_TEST_PACKAGE + // * + // UIAUTOMATOR_TEST_PACKAGE // - // * - // XCTEST_TEST_PACKAGE + // * XCTEST_TEST_PACKAGE // - // * XCTEST_UI_TEST_PACKAGE + // * XCTEST_UI_TEST_PACKAGE // - // * + // * // APPIUM_JAVA_JUNIT_TEST_SPEC // - // * APPIUM_JAVA_TESTNG_TEST_SPEC + // * APPIUM_JAVA_TESTNG_TEST_SPEC // - // * + // * // APPIUM_PYTHON_TEST_SPEC // - // * APPIUM_NODE_TEST_SPEC + // * APPIUM_NODE_TEST_SPEC // - // * - // APPIUM_RUBY_TEST_SPEC + // * APPIUM_RUBY_TEST_SPEC // - // * APPIUM_WEB_JAVA_JUNIT_TEST_SPEC + // * + // APPIUM_WEB_JAVA_JUNIT_TEST_SPEC // - // * - // APPIUM_WEB_JAVA_TESTNG_TEST_SPEC + // * APPIUM_WEB_JAVA_TESTNG_TEST_SPEC // - // * APPIUM_WEB_PYTHON_TEST_SPEC + // * + // APPIUM_WEB_PYTHON_TEST_SPEC // - // * - // APPIUM_WEB_NODE_TEST_SPEC + // * APPIUM_WEB_NODE_TEST_SPEC // - // * APPIUM_WEB_RUBY_TEST_SPEC + // * + // APPIUM_WEB_RUBY_TEST_SPEC // - // * - // INSTRUMENTATION_TEST_SPEC + // * INSTRUMENTATION_TEST_SPEC // - // * XCTEST_UI_TEST_SPEC + // * + // XCTEST_UI_TEST_SPEC // - // If you call CreateUpload - // with WEB_APP specified, AWS Device Farm throws an ArgumentException error. + // If you call CreateUpload with WEB_APP specified, AWS Device + // Farm throws an ArgumentException error. // // This member is required. Type types.UploadType diff --git a/service/devicefarm/api_op_GetDevicePoolCompatibility.go b/service/devicefarm/api_op_GetDevicePoolCompatibility.go index 906b275418b..457e8ffc2cb 100644 --- a/service/devicefarm/api_op_GetDevicePoolCompatibility.go +++ b/service/devicefarm/api_op_GetDevicePoolCompatibility.go @@ -47,48 +47,47 @@ type GetDevicePoolCompatibilityInput struct { // The test type for the specified device pool. Allowed values include the // following: // - // * BUILTIN_FUZZ. + // * BUILTIN_FUZZ. // - // * BUILTIN_EXPLORER. For Android, an app - // explorer that traverses an Android app, interacting with it and capturing - // screenshots at the same time. + // * BUILTIN_EXPLORER. For Android, an app explorer + // that traverses an Android app, interacting with it and capturing screenshots at + // the same time. // - // * APPIUM_JAVA_JUNIT. + // * APPIUM_JAVA_JUNIT. // - // * - // APPIUM_JAVA_TESTNG. + // * APPIUM_JAVA_TESTNG. // - // * APPIUM_PYTHON. + // * APPIUM_PYTHON. // - // * APPIUM_NODE. + // * + // APPIUM_NODE. // - // * - // APPIUM_RUBY. + // * APPIUM_RUBY. // - // * APPIUM_WEB_JAVA_JUNIT. + // * APPIUM_WEB_JAVA_JUNIT. // - // * APPIUM_WEB_JAVA_TESTNG. + // * + // APPIUM_WEB_JAVA_TESTNG. // - // * - // APPIUM_WEB_PYTHON. + // * APPIUM_WEB_PYTHON. // - // * APPIUM_WEB_NODE. + // * APPIUM_WEB_NODE. // - // * APPIUM_WEB_RUBY. + // * + // APPIUM_WEB_RUBY. // - // * - // CALABASH. + // * CALABASH. // - // * INSTRUMENTATION. + // * INSTRUMENTATION. // - // * UIAUTOMATION. - // - // * UIAUTOMATOR. + // * UIAUTOMATION. // + // * + // UIAUTOMATOR. // // * XCTEST. // - // * XCTEST_UI. + // * XCTEST_UI. TestType types.TestType } diff --git a/service/devicefarm/api_op_GetTestGridSession.go b/service/devicefarm/api_op_GetTestGridSession.go index 341b84f1fd5..c5dac71cfdb 100644 --- a/service/devicefarm/api_op_GetTestGridSession.go +++ b/service/devicefarm/api_op_GetTestGridSession.go @@ -15,10 +15,10 @@ import ( // URL from CreateTestGridUrlResult$url. You can use the following to look up // sessions: // -// * The session ARN (GetTestGridSessionRequest$sessionArn). +// * The session ARN (GetTestGridSessionRequest$sessionArn). // -// * -// The project ARN and a session ID (GetTestGridSessionRequest$projectArn and +// * The +// project ARN and a session ID (GetTestGridSessionRequest$projectArn and // GetTestGridSessionRequest$sessionId). func (c *Client) GetTestGridSession(ctx context.Context, params *GetTestGridSessionInput, optFns ...func(*Options)) (*GetTestGridSessionOutput, error) { if params == nil { diff --git a/service/devicefarm/api_op_ListArtifacts.go b/service/devicefarm/api_op_ListArtifacts.go index febb90f4cf5..6af157d0243 100644 --- a/service/devicefarm/api_op_ListArtifacts.go +++ b/service/devicefarm/api_op_ListArtifacts.go @@ -37,12 +37,11 @@ type ListArtifactsInput struct { // The artifacts' type. Allowed values include: // - // * FILE + // * FILE // - // * LOG + // * LOG // - // * - // SCREENSHOT + // * SCREENSHOT // // This member is required. Type types.ArtifactCategory diff --git a/service/devicefarm/api_op_ListDevicePools.go b/service/devicefarm/api_op_ListDevicePools.go index 3951de5194a..b0afe8531f8 100644 --- a/service/devicefarm/api_op_ListDevicePools.go +++ b/service/devicefarm/api_op_ListDevicePools.go @@ -41,11 +41,11 @@ type ListDevicePoolsInput struct { // The device pools' type. Allowed values include: // - // * CURATED: A device pool - // that is created and managed by AWS Device Farm. + // * CURATED: A device pool that + // is created and managed by AWS Device Farm. // - // * PRIVATE: A device pool - // that is created and managed by the device pool developer. + // * PRIVATE: A device pool that is + // created and managed by the device pool developer. Type types.DevicePoolType } diff --git a/service/devicefarm/api_op_ListDevices.go b/service/devicefarm/api_op_ListDevices.go index 4b3be21d87d..277d18cfaa4 100644 --- a/service/devicefarm/api_op_ListDevices.go +++ b/service/devicefarm/api_op_ListDevices.go @@ -36,78 +36,76 @@ type ListDevicesInput struct { // Used to select a set of devices. A filter is made up of an attribute, an // operator, and one or more values. // - // * Attribute: The aspect of a device such - // as platform or model used as the selection criteria in a device filter. Allowed + // * Attribute: The aspect of a device such as + // platform or model used as the selection criteria in a device filter. Allowed // values include: // - // * ARN: The Amazon Resource Name (ARN) of the device - // (for example, arn:aws:devicefarm:us-west-2::device:12345Example). + // * ARN: The Amazon Resource Name (ARN) of the device (for + // example, arn:aws:devicefarm:us-west-2::device:12345Example). // - // * - // PLATFORM: The device platform. Valid values are ANDROID or IOS. + // * PLATFORM: The + // device platform. Valid values are ANDROID or IOS. // - // * - // OS_VERSION: The operating system version (for example, 10.3.2). + // * OS_VERSION: The operating + // system version (for example, 10.3.2). // - // * - // MODEL: The device model (for example, iPad 5th Gen). + // * MODEL: The device model (for example, + // iPad 5th Gen). // - // * AVAILABILITY: - // The current availability of the device. Valid values are AVAILABLE, - // HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE. + // * AVAILABILITY: The current availability of the device. Valid + // values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE. // - // * FORM_FACTOR: The - // device form factor. Valid values are PHONE or TABLET. + // * + // FORM_FACTOR: The device form factor. Valid values are PHONE or TABLET. // - // * MANUFACTURER: - // The device manufacturer (for example, Apple). + // * + // MANUFACTURER: The device manufacturer (for example, Apple). // - // * REMOTE_ACCESS_ENABLED: - // Whether the device is enabled for remote access. Valid values are TRUE or - // FALSE. + // * + // REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote access. Valid + // values are TRUE or FALSE. // - // * REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote - // debugging. Valid values are TRUE or FALSE. Because remote debugging is no longer - // supported + // * REMOTE_DEBUG_ENABLED: Whether the device is enabled + // for remote debugging. Valid values are TRUE or FALSE. Because remote debugging + // is no longer supported // (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html), // this attribute is ignored. // - // * INSTANCE_ARN: The Amazon Resource Name - // (ARN) of the device instance. + // * INSTANCE_ARN: The Amazon Resource Name (ARN) of + // the device instance. // - // * INSTANCE_LABELS: The label of the - // device instance. + // * INSTANCE_LABELS: The label of the device instance. // - // * FLEET_TYPE: The fleet type. Valid values are PUBLIC - // or PRIVATE. + // * + // FLEET_TYPE: The fleet type. Valid values are PUBLIC or PRIVATE. // - // * Operator: The filter operator. + // * Operator: The + // filter operator. // - // * The EQUALS operator - // is available for every attribute except INSTANCE_LABELS. + // * The EQUALS operator is available for every attribute except + // INSTANCE_LABELS. // - // * The CONTAINS - // operator is available for the INSTANCE_LABELS and MODEL attributes. + // * The CONTAINS operator is available for the INSTANCE_LABELS + // and MODEL attributes. // - // * - // The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, - // MANUFACTURER, and INSTANCE_ARN attributes. + // * The IN and NOT_IN operators are available for the ARN, + // OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes. // - // * The LESS_THAN, + // * The LESS_THAN, // GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also // available for the OS_VERSION attribute. // - // * Values: An array of one or more + // * Values: An array of one or more // filter values. // - // * The IN and NOT_IN operators take a values array that - // has one or more elements. + // * The IN and NOT_IN operators take a values array that has one + // or more elements. // - // * The other operators require an array with a - // single element. + // * The other operators require an array with a single + // element. // - // * In a request, the AVAILABILITY attribute takes the - // following values: AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE. + // * In a request, the AVAILABILITY attribute takes the following values: + // AVAILABLE, HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE. Filters []*types.DeviceFilter // An identifier that was returned from the previous call to this operation, which diff --git a/service/devicefarm/api_op_ListUniqueProblems.go b/service/devicefarm/api_op_ListUniqueProblems.go index cb12558de9d..f404643238b 100644 --- a/service/devicefarm/api_op_ListUniqueProblems.go +++ b/service/devicefarm/api_op_ListUniqueProblems.go @@ -54,21 +54,20 @@ type ListUniqueProblemsOutput struct { // Information about the unique problems. Allowed values include: // - // * PENDING + // * PENDING // + // * + // PASSED // - // * PASSED + // * WARNED // - // * WARNED + // * FAILED // - // * FAILED + // * SKIPPED // - // * SKIPPED + // * ERRORED // - // * ERRORED - // - // * - // STOPPED + // * STOPPED UniqueProblems map[string][]*types.UniqueProblem // Metadata pertaining to the operation's result. diff --git a/service/devicefarm/api_op_ListUploads.go b/service/devicefarm/api_op_ListUploads.go index 1d9d44c504d..aee1f745603 100644 --- a/service/devicefarm/api_op_ListUploads.go +++ b/service/devicefarm/api_op_ListUploads.go @@ -42,84 +42,82 @@ type ListUploadsInput struct { // The type of upload. Must be one of the following values: // - // * ANDROID_APP + // * ANDROID_APP // + // * + // IOS_APP // - // * IOS_APP + // * WEB_APP // - // * WEB_APP + // * EXTERNAL_DATA // - // * EXTERNAL_DATA + // * APPIUM_JAVA_JUNIT_TEST_PACKAGE // - // * - // APPIUM_JAVA_JUNIT_TEST_PACKAGE + // * + // APPIUM_JAVA_TESTNG_TEST_PACKAGE // - // * APPIUM_JAVA_TESTNG_TEST_PACKAGE + // * APPIUM_PYTHON_TEST_PACKAGE // - // * - // APPIUM_PYTHON_TEST_PACKAGE + // * + // APPIUM_NODE_TEST_PACKAGE // - // * APPIUM_NODE_TEST_PACKAGE + // * APPIUM_RUBY_TEST_PACKAGE // - // * - // APPIUM_RUBY_TEST_PACKAGE + // * + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE // - // * APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE + // * APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE // - // * - // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE + // * + // APPIUM_WEB_PYTHON_TEST_PACKAGE // - // * APPIUM_WEB_PYTHON_TEST_PACKAGE + // * APPIUM_WEB_NODE_TEST_PACKAGE // - // * - // APPIUM_WEB_NODE_TEST_PACKAGE + // * + // APPIUM_WEB_RUBY_TEST_PACKAGE // - // * APPIUM_WEB_RUBY_TEST_PACKAGE + // * CALABASH_TEST_PACKAGE // - // * - // CALABASH_TEST_PACKAGE + // * + // INSTRUMENTATION_TEST_PACKAGE // - // * INSTRUMENTATION_TEST_PACKAGE + // * UIAUTOMATION_TEST_PACKAGE // - // * - // UIAUTOMATION_TEST_PACKAGE + // * + // UIAUTOMATOR_TEST_PACKAGE // - // * UIAUTOMATOR_TEST_PACKAGE + // * XCTEST_TEST_PACKAGE // - // * - // XCTEST_TEST_PACKAGE + // * XCTEST_UI_TEST_PACKAGE // - // * XCTEST_UI_TEST_PACKAGE - // - // * + // * // APPIUM_JAVA_JUNIT_TEST_SPEC // - // * APPIUM_JAVA_TESTNG_TEST_SPEC + // * APPIUM_JAVA_TESTNG_TEST_SPEC // - // * + // * // APPIUM_PYTHON_TEST_SPEC // - // * APPIUM_NODE_TEST_SPEC + // * APPIUM_NODE_TEST_SPEC // - // * - // APPIUM_RUBY_TEST_SPEC + // * APPIUM_RUBY_TEST_SPEC // - // * APPIUM_WEB_JAVA_JUNIT_TEST_SPEC + // * + // APPIUM_WEB_JAVA_JUNIT_TEST_SPEC // - // * - // APPIUM_WEB_JAVA_TESTNG_TEST_SPEC + // * APPIUM_WEB_JAVA_TESTNG_TEST_SPEC // - // * APPIUM_WEB_PYTHON_TEST_SPEC + // * + // APPIUM_WEB_PYTHON_TEST_SPEC // - // * - // APPIUM_WEB_NODE_TEST_SPEC + // * APPIUM_WEB_NODE_TEST_SPEC // - // * APPIUM_WEB_RUBY_TEST_SPEC + // * + // APPIUM_WEB_RUBY_TEST_SPEC // - // * - // INSTRUMENTATION_TEST_SPEC + // * INSTRUMENTATION_TEST_SPEC // - // * XCTEST_UI_TEST_SPEC + // * XCTEST_UI_TEST_SPEC Type types.UploadType } diff --git a/service/devicefarm/doc.go b/service/devicefarm/doc.go index 0b4812ba377..4507e5bc9ef 100644 --- a/service/devicefarm/doc.go +++ b/service/devicefarm/doc.go @@ -5,16 +5,16 @@ // // Welcome to the AWS Device Farm API documentation, which contains APIs for: // -// -// * Testing on desktop browsers Device Farm makes it possible for you to test your +// * +// Testing on desktop browsers Device Farm makes it possible for you to test your // web applications on desktop browsers using Selenium. The APIs for desktop // browser testing contain TestGrid in their names. For more information, see // Testing Web Applications on Selenium with Device Farm // (https://docs.aws.amazon.com/devicefarm/latest/testgrid/). // -// * Testing on -// real mobile devices Device Farm makes it possible for you to test apps on -// physical phones, tablets, and other devices in the cloud. For more information, -// see the Device Farm Developer Guide +// * Testing on real +// mobile devices Device Farm makes it possible for you to test apps on physical +// phones, tablets, and other devices in the cloud. For more information, see the +// Device Farm Developer Guide // (https://docs.aws.amazon.com/devicefarm/latest/developerguide/). package devicefarm diff --git a/service/devicefarm/types/enums.go b/service/devicefarm/types/enums.go index 7a7cf636862..9f6cbbf8ea3 100644 --- a/service/devicefarm/types/enums.go +++ b/service/devicefarm/types/enums.go @@ -26,34 +26,34 @@ type ArtifactType string // Enum values for ArtifactType const ( - ArtifactTypeUnknown ArtifactType = "UNKNOWN" - ArtifactTypeScreenshot ArtifactType = "SCREENSHOT" - ArtifactTypeDevice_log ArtifactType = "DEVICE_LOG" - ArtifactTypeMessage_log ArtifactType = "MESSAGE_LOG" - ArtifactTypeVideo_log ArtifactType = "VIDEO_LOG" - ArtifactTypeResult_log ArtifactType = "RESULT_LOG" - ArtifactTypeService_log ArtifactType = "SERVICE_LOG" - ArtifactTypeWebkit_log ArtifactType = "WEBKIT_LOG" - ArtifactTypeInstrumentation_output ArtifactType = "INSTRUMENTATION_OUTPUT" - ArtifactTypeExerciser_monkey_output ArtifactType = "EXERCISER_MONKEY_OUTPUT" - ArtifactTypeCalabash_json_output ArtifactType = "CALABASH_JSON_OUTPUT" - ArtifactTypeCalabash_pretty_output ArtifactType = "CALABASH_PRETTY_OUTPUT" - ArtifactTypeCalabash_standard_output ArtifactType = "CALABASH_STANDARD_OUTPUT" - ArtifactTypeCalabash_java_xml_output ArtifactType = "CALABASH_JAVA_XML_OUTPUT" - ArtifactTypeAutomation_output ArtifactType = "AUTOMATION_OUTPUT" - ArtifactTypeAppium_server_output ArtifactType = "APPIUM_SERVER_OUTPUT" - ArtifactTypeAppium_java_output ArtifactType = "APPIUM_JAVA_OUTPUT" - ArtifactTypeAppium_java_xml_output ArtifactType = "APPIUM_JAVA_XML_OUTPUT" - ArtifactTypeAppium_python_output ArtifactType = "APPIUM_PYTHON_OUTPUT" - ArtifactTypeAppium_python_xml_output ArtifactType = "APPIUM_PYTHON_XML_OUTPUT" - ArtifactTypeExplorer_event_log ArtifactType = "EXPLORER_EVENT_LOG" - ArtifactTypeExplorer_summary_log ArtifactType = "EXPLORER_SUMMARY_LOG" - ArtifactTypeApplication_crash_report ArtifactType = "APPLICATION_CRASH_REPORT" - ArtifactTypeXctest_log ArtifactType = "XCTEST_LOG" - ArtifactTypeVideo ArtifactType = "VIDEO" - ArtifactTypeCustomer_artifact ArtifactType = "CUSTOMER_ARTIFACT" - ArtifactTypeCustomer_artifact_log ArtifactType = "CUSTOMER_ARTIFACT_LOG" - ArtifactTypeTestspec_output ArtifactType = "TESTSPEC_OUTPUT" + ArtifactTypeUnknown ArtifactType = "UNKNOWN" + ArtifactTypeScreenshot ArtifactType = "SCREENSHOT" + ArtifactTypeDeviceLog ArtifactType = "DEVICE_LOG" + ArtifactTypeMessageLog ArtifactType = "MESSAGE_LOG" + ArtifactTypeVideoLog ArtifactType = "VIDEO_LOG" + ArtifactTypeResultLog ArtifactType = "RESULT_LOG" + ArtifactTypeServiceLog ArtifactType = "SERVICE_LOG" + ArtifactTypeWebkitLog ArtifactType = "WEBKIT_LOG" + ArtifactTypeInstrumentationOutput ArtifactType = "INSTRUMENTATION_OUTPUT" + ArtifactTypeExerciserMonkeyOutput ArtifactType = "EXERCISER_MONKEY_OUTPUT" + ArtifactTypeCalabashJsonOutput ArtifactType = "CALABASH_JSON_OUTPUT" + ArtifactTypeCalabashPrettyOutput ArtifactType = "CALABASH_PRETTY_OUTPUT" + ArtifactTypeCalabashStandardOutput ArtifactType = "CALABASH_STANDARD_OUTPUT" + ArtifactTypeCalabashJavaXmlOutput ArtifactType = "CALABASH_JAVA_XML_OUTPUT" + ArtifactTypeAutomationOutput ArtifactType = "AUTOMATION_OUTPUT" + ArtifactTypeAppiumServerOutput ArtifactType = "APPIUM_SERVER_OUTPUT" + ArtifactTypeAppiumJavaOutput ArtifactType = "APPIUM_JAVA_OUTPUT" + ArtifactTypeAppiumJavaXmlOutput ArtifactType = "APPIUM_JAVA_XML_OUTPUT" + ArtifactTypeAppiumPythonOutput ArtifactType = "APPIUM_PYTHON_OUTPUT" + ArtifactTypeAppiumPythonXmlOutput ArtifactType = "APPIUM_PYTHON_XML_OUTPUT" + ArtifactTypeExplorerEventLog ArtifactType = "EXPLORER_EVENT_LOG" + ArtifactTypeExplorerSummaryLog ArtifactType = "EXPLORER_SUMMARY_LOG" + ArtifactTypeApplicationCrashReport ArtifactType = "APPLICATION_CRASH_REPORT" + ArtifactTypeXctestLog ArtifactType = "XCTEST_LOG" + ArtifactTypeVideo ArtifactType = "VIDEO" + ArtifactTypeCustomerArtifact ArtifactType = "CUSTOMER_ARTIFACT" + ArtifactTypeCustomerArtifactLog ArtifactType = "CUSTOMER_ARTIFACT_LOG" + ArtifactTypeTestspecOutput ArtifactType = "TESTSPEC_OUTPUT" ) // Values returns all known values for ArtifactType. Note that this can be expanded @@ -130,19 +130,19 @@ type DeviceAttribute string // Enum values for DeviceAttribute const ( - DeviceAttributeArn DeviceAttribute = "ARN" - DeviceAttributePlatform DeviceAttribute = "PLATFORM" - DeviceAttributeForm_factor DeviceAttribute = "FORM_FACTOR" - DeviceAttributeManufacturer DeviceAttribute = "MANUFACTURER" - DeviceAttributeRemote_access_enabled DeviceAttribute = "REMOTE_ACCESS_ENABLED" - DeviceAttributeRemote_debug_enabled DeviceAttribute = "REMOTE_DEBUG_ENABLED" - DeviceAttributeAppium_version DeviceAttribute = "APPIUM_VERSION" - DeviceAttributeInstance_arn DeviceAttribute = "INSTANCE_ARN" - DeviceAttributeInstance_labels DeviceAttribute = "INSTANCE_LABELS" - DeviceAttributeFleet_type DeviceAttribute = "FLEET_TYPE" - DeviceAttributeOs_version DeviceAttribute = "OS_VERSION" - DeviceAttributeModel DeviceAttribute = "MODEL" - DeviceAttributeAvailability DeviceAttribute = "AVAILABILITY" + DeviceAttributeArn DeviceAttribute = "ARN" + DeviceAttributePlatform DeviceAttribute = "PLATFORM" + DeviceAttributeFormFactor DeviceAttribute = "FORM_FACTOR" + DeviceAttributeManufacturer DeviceAttribute = "MANUFACTURER" + DeviceAttributeRemoteAccessEnabled DeviceAttribute = "REMOTE_ACCESS_ENABLED" + DeviceAttributeRemoteDebugEnabled DeviceAttribute = "REMOTE_DEBUG_ENABLED" + DeviceAttributeAppiumVersion DeviceAttribute = "APPIUM_VERSION" + DeviceAttributeInstanceArn DeviceAttribute = "INSTANCE_ARN" + DeviceAttributeInstanceLabels DeviceAttribute = "INSTANCE_LABELS" + DeviceAttributeFleetType DeviceAttribute = "FLEET_TYPE" + DeviceAttributeOsVersion DeviceAttribute = "OS_VERSION" + DeviceAttributeModel DeviceAttribute = "MODEL" + DeviceAttributeAvailability DeviceAttribute = "AVAILABILITY" ) // Values returns all known values for DeviceAttribute. Note that this can be @@ -170,10 +170,10 @@ type DeviceAvailability string // Enum values for DeviceAvailability const ( - DeviceAvailabilityTemporary_not_available DeviceAvailability = "TEMPORARY_NOT_AVAILABLE" - DeviceAvailabilityBusy DeviceAvailability = "BUSY" - DeviceAvailabilityAvailable DeviceAvailability = "AVAILABLE" - DeviceAvailabilityHighly_available DeviceAvailability = "HIGHLY_AVAILABLE" + DeviceAvailabilityTemporaryNotAvailable DeviceAvailability = "TEMPORARY_NOT_AVAILABLE" + DeviceAvailabilityBusy DeviceAvailability = "BUSY" + DeviceAvailabilityAvailable DeviceAvailability = "AVAILABLE" + DeviceAvailabilityHighlyAvailable DeviceAvailability = "HIGHLY_AVAILABLE" ) // Values returns all known values for DeviceAvailability. Note that this can be @@ -192,18 +192,18 @@ type DeviceFilterAttribute string // Enum values for DeviceFilterAttribute const ( - DeviceFilterAttributeArn DeviceFilterAttribute = "ARN" - DeviceFilterAttributePlatform DeviceFilterAttribute = "PLATFORM" - DeviceFilterAttributeOs_version DeviceFilterAttribute = "OS_VERSION" - DeviceFilterAttributeModel DeviceFilterAttribute = "MODEL" - DeviceFilterAttributeAvailability DeviceFilterAttribute = "AVAILABILITY" - DeviceFilterAttributeForm_factor DeviceFilterAttribute = "FORM_FACTOR" - DeviceFilterAttributeManufacturer DeviceFilterAttribute = "MANUFACTURER" - DeviceFilterAttributeRemote_access_enabled DeviceFilterAttribute = "REMOTE_ACCESS_ENABLED" - DeviceFilterAttributeRemote_debug_enabled DeviceFilterAttribute = "REMOTE_DEBUG_ENABLED" - DeviceFilterAttributeInstance_arn DeviceFilterAttribute = "INSTANCE_ARN" - DeviceFilterAttributeInstance_labels DeviceFilterAttribute = "INSTANCE_LABELS" - DeviceFilterAttributeFleet_type DeviceFilterAttribute = "FLEET_TYPE" + DeviceFilterAttributeArn DeviceFilterAttribute = "ARN" + DeviceFilterAttributePlatform DeviceFilterAttribute = "PLATFORM" + DeviceFilterAttributeOsVersion DeviceFilterAttribute = "OS_VERSION" + DeviceFilterAttributeModel DeviceFilterAttribute = "MODEL" + DeviceFilterAttributeAvailability DeviceFilterAttribute = "AVAILABILITY" + DeviceFilterAttributeFormFactor DeviceFilterAttribute = "FORM_FACTOR" + DeviceFilterAttributeManufacturer DeviceFilterAttribute = "MANUFACTURER" + DeviceFilterAttributeRemoteAccessEnabled DeviceFilterAttribute = "REMOTE_ACCESS_ENABLED" + DeviceFilterAttributeRemoteDebugEnabled DeviceFilterAttribute = "REMOTE_DEBUG_ENABLED" + DeviceFilterAttributeInstanceArn DeviceFilterAttribute = "INSTANCE_ARN" + DeviceFilterAttributeInstanceLabels DeviceFilterAttribute = "INSTANCE_LABELS" + DeviceFilterAttributeFleetType DeviceFilterAttribute = "FLEET_TYPE" ) // Values returns all known values for DeviceFilterAttribute. Note that this can be @@ -312,8 +312,8 @@ type ExecutionResultCode string // Enum values for ExecutionResultCode const ( - ExecutionResultCodeParsing_failed ExecutionResultCode = "PARSING_FAILED" - ExecutionResultCodeVpc_endpoint_setup_failed ExecutionResultCode = "VPC_ENDPOINT_SETUP_FAILED" + ExecutionResultCodeParsingFailed ExecutionResultCode = "PARSING_FAILED" + ExecutionResultCodeVpcEndpointSetupFailed ExecutionResultCode = "VPC_ENDPOINT_SETUP_FAILED" ) // Values returns all known values for ExecutionResultCode. Note that this can be @@ -330,15 +330,15 @@ type ExecutionStatus string // Enum values for ExecutionStatus const ( - ExecutionStatusPending ExecutionStatus = "PENDING" - ExecutionStatusPending_concurrnecy ExecutionStatus = "PENDING_CONCURRENCY" - ExecutionStatusPending_device ExecutionStatus = "PENDING_DEVICE" - ExecutionStatusProcessing ExecutionStatus = "PROCESSING" - ExecutionStatusScheduling ExecutionStatus = "SCHEDULING" - ExecutionStatusPreparing ExecutionStatus = "PREPARING" - ExecutionStatusRunning ExecutionStatus = "RUNNING" - ExecutionStatusCompleted ExecutionStatus = "COMPLETED" - ExecutionStatusStopping ExecutionStatus = "STOPPING" + ExecutionStatusPending ExecutionStatus = "PENDING" + ExecutionStatusPendingConcurrnecy ExecutionStatus = "PENDING_CONCURRENCY" + ExecutionStatusPendingDevice ExecutionStatus = "PENDING_DEVICE" + ExecutionStatusProcessing ExecutionStatus = "PROCESSING" + ExecutionStatusScheduling ExecutionStatus = "SCHEDULING" + ExecutionStatusPreparing ExecutionStatus = "PREPARING" + ExecutionStatusRunning ExecutionStatus = "RUNNING" + ExecutionStatusCompleted ExecutionStatus = "COMPLETED" + ExecutionStatusStopping ExecutionStatus = "STOPPING" ) // Values returns all known values for ExecutionStatus. Note that this can be @@ -362,10 +362,10 @@ type InstanceStatus string // Enum values for InstanceStatus const ( - InstanceStatusIn_use InstanceStatus = "IN_USE" - InstanceStatusPreparing InstanceStatus = "PREPARING" - InstanceStatusAvailable InstanceStatus = "AVAILABLE" - InstanceStatusNot_available InstanceStatus = "NOT_AVAILABLE" + InstanceStatusInUse InstanceStatus = "IN_USE" + InstanceStatusPreparing InstanceStatus = "PREPARING" + InstanceStatusAvailable InstanceStatus = "AVAILABLE" + InstanceStatusNotAvailable InstanceStatus = "NOT_AVAILABLE" ) // Values returns all known values for InstanceStatus. Note that this can be @@ -385,8 +385,8 @@ type InteractionMode string // Enum values for InteractionMode const ( InteractionModeInteractive InteractionMode = "INTERACTIVE" - InteractionModeNo_video InteractionMode = "NO_VIDEO" - InteractionModeVideo_only InteractionMode = "VIDEO_ONLY" + InteractionModeNoVideo InteractionMode = "NO_VIDEO" + InteractionModeVideoOnly InteractionMode = "VIDEO_ONLY" ) // Values returns all known values for InteractionMode. Note that this can be @@ -474,14 +474,14 @@ type RuleOperator string // Enum values for RuleOperator const ( - RuleOperatorEquals RuleOperator = "EQUALS" - RuleOperatorLess_than RuleOperator = "LESS_THAN" - RuleOperatorLess_than_or_equals RuleOperator = "LESS_THAN_OR_EQUALS" - RuleOperatorGreater_than RuleOperator = "GREATER_THAN" - RuleOperatorGreater_than_or_equals RuleOperator = "GREATER_THAN_OR_EQUALS" - RuleOperatorIn RuleOperator = "IN" - RuleOperatorNot_in RuleOperator = "NOT_IN" - RuleOperatorContains RuleOperator = "CONTAINS" + RuleOperatorEquals RuleOperator = "EQUALS" + RuleOperatorLessThan RuleOperator = "LESS_THAN" + RuleOperatorLessThanOrEquals RuleOperator = "LESS_THAN_OR_EQUALS" + RuleOperatorGreaterThan RuleOperator = "GREATER_THAN" + RuleOperatorGreaterThanOrEquals RuleOperator = "GREATER_THAN_OR_EQUALS" + RuleOperatorIn RuleOperator = "IN" + RuleOperatorNotIn RuleOperator = "NOT_IN" + RuleOperatorContains RuleOperator = "CONTAINS" ) // Values returns all known values for RuleOperator. Note that this can be expanded @@ -504,23 +504,23 @@ type SampleType string // Enum values for SampleType const ( - SampleTypeCpu SampleType = "CPU" - SampleTypeMemory SampleType = "MEMORY" - SampleTypeThreads SampleType = "THREADS" - SampleTypeRx_rate SampleType = "RX_RATE" - SampleTypeTx_rate SampleType = "TX_RATE" - SampleTypeRx SampleType = "RX" - SampleTypeTx SampleType = "TX" - SampleTypeNative_frames SampleType = "NATIVE_FRAMES" - SampleTypeNative_fps SampleType = "NATIVE_FPS" - SampleTypeNative_min_drawtime SampleType = "NATIVE_MIN_DRAWTIME" - SampleTypeNative_avg_drawtime SampleType = "NATIVE_AVG_DRAWTIME" - SampleTypeNative_max_drawtime SampleType = "NATIVE_MAX_DRAWTIME" - SampleTypeOpengl_frames SampleType = "OPENGL_FRAMES" - SampleTypeOpengl_fps SampleType = "OPENGL_FPS" - SampleTypeOpengl_min_drawtime SampleType = "OPENGL_MIN_DRAWTIME" - SampleTypeOpengl_avg_drawtime SampleType = "OPENGL_AVG_DRAWTIME" - SampleTypeOpengl_max_drawtime SampleType = "OPENGL_MAX_DRAWTIME" + SampleTypeCpu SampleType = "CPU" + SampleTypeMemory SampleType = "MEMORY" + SampleTypeThreads SampleType = "THREADS" + SampleTypeRxRate SampleType = "RX_RATE" + SampleTypeTxRate SampleType = "TX_RATE" + SampleTypeRx SampleType = "RX" + SampleTypeTx SampleType = "TX" + SampleTypeNativeFrames SampleType = "NATIVE_FRAMES" + SampleTypeNativeFps SampleType = "NATIVE_FPS" + SampleTypeNativeMinDrawtime SampleType = "NATIVE_MIN_DRAWTIME" + SampleTypeNativeAvgDrawtime SampleType = "NATIVE_AVG_DRAWTIME" + SampleTypeNativeMaxDrawtime SampleType = "NATIVE_MAX_DRAWTIME" + SampleTypeOpenglFrames SampleType = "OPENGL_FRAMES" + SampleTypeOpenglFps SampleType = "OPENGL_FPS" + SampleTypeOpenglMinDrawtime SampleType = "OPENGL_MIN_DRAWTIME" + SampleTypeOpenglAvgDrawtime SampleType = "OPENGL_AVG_DRAWTIME" + SampleTypeOpenglMaxDrawtime SampleType = "OPENGL_MAX_DRAWTIME" ) // Values returns all known values for SampleType. Note that this can be expanded @@ -571,9 +571,9 @@ type TestGridSessionArtifactType string // Enum values for TestGridSessionArtifactType const ( - TestGridSessionArtifactTypeUnknown TestGridSessionArtifactType = "UNKNOWN" - TestGridSessionArtifactTypeVideo TestGridSessionArtifactType = "VIDEO" - TestGridSessionArtifactTypeSelenium_log TestGridSessionArtifactType = "SELENIUM_LOG" + TestGridSessionArtifactTypeUnknown TestGridSessionArtifactType = "UNKNOWN" + TestGridSessionArtifactTypeVideo TestGridSessionArtifactType = "VIDEO" + TestGridSessionArtifactTypeSeleniumLog TestGridSessionArtifactType = "SELENIUM_LOG" ) // Values returns all known values for TestGridSessionArtifactType. Note that this @@ -611,27 +611,27 @@ type TestType string // Enum values for TestType const ( - TestTypeBuiltin_fuzz TestType = "BUILTIN_FUZZ" - TestTypeBuiltin_explorer TestType = "BUILTIN_EXPLORER" - TestTypeWeb_performance_profile TestType = "WEB_PERFORMANCE_PROFILE" - TestTypeAppium_java_junit TestType = "APPIUM_JAVA_JUNIT" - TestTypeAppium_java_testng TestType = "APPIUM_JAVA_TESTNG" - TestTypeAppium_python TestType = "APPIUM_PYTHON" - TestTypeAppium_node TestType = "APPIUM_NODE" - TestTypeAppium_ruby TestType = "APPIUM_RUBY" - TestTypeAppium_web_java_junit TestType = "APPIUM_WEB_JAVA_JUNIT" - TestTypeAppium_web_java_testng TestType = "APPIUM_WEB_JAVA_TESTNG" - TestTypeAppium_web_python TestType = "APPIUM_WEB_PYTHON" - TestTypeAppium_web_node TestType = "APPIUM_WEB_NODE" - TestTypeAppium_web_ruby TestType = "APPIUM_WEB_RUBY" - TestTypeCalabash TestType = "CALABASH" - TestTypeInstrumentation TestType = "INSTRUMENTATION" - TestTypeUiautomation TestType = "UIAUTOMATION" - TestTypeUiautomator TestType = "UIAUTOMATOR" - TestTypeXctest TestType = "XCTEST" - TestTypeXctest_ui TestType = "XCTEST_UI" - TestTypeRemote_access_record TestType = "REMOTE_ACCESS_RECORD" - TestTypeRemote_access_replay TestType = "REMOTE_ACCESS_REPLAY" + TestTypeBuiltinFuzz TestType = "BUILTIN_FUZZ" + TestTypeBuiltinExplorer TestType = "BUILTIN_EXPLORER" + TestTypeWebPerformanceProfile TestType = "WEB_PERFORMANCE_PROFILE" + TestTypeAppiumJavaJunit TestType = "APPIUM_JAVA_JUNIT" + TestTypeAppiumJavaTestng TestType = "APPIUM_JAVA_TESTNG" + TestTypeAppiumPython TestType = "APPIUM_PYTHON" + TestTypeAppiumNode TestType = "APPIUM_NODE" + TestTypeAppiumRuby TestType = "APPIUM_RUBY" + TestTypeAppiumWebJavaJunit TestType = "APPIUM_WEB_JAVA_JUNIT" + TestTypeAppiumWebJavaTestng TestType = "APPIUM_WEB_JAVA_TESTNG" + TestTypeAppiumWebPython TestType = "APPIUM_WEB_PYTHON" + TestTypeAppiumWebNode TestType = "APPIUM_WEB_NODE" + TestTypeAppiumWebRuby TestType = "APPIUM_WEB_RUBY" + TestTypeCalabash TestType = "CALABASH" + TestTypeInstrumentation TestType = "INSTRUMENTATION" + TestTypeUiautomation TestType = "UIAUTOMATION" + TestTypeUiautomator TestType = "UIAUTOMATOR" + TestTypeXctest TestType = "XCTEST" + TestTypeXctestUi TestType = "XCTEST_UI" + TestTypeRemoteAccessRecord TestType = "REMOTE_ACCESS_RECORD" + TestTypeRemoteAccessReplay TestType = "REMOTE_ACCESS_REPLAY" ) // Values returns all known values for TestType. Note that this can be expanded in @@ -707,38 +707,38 @@ type UploadType string // Enum values for UploadType const ( - UploadTypeAndroid_app UploadType = "ANDROID_APP" - UploadTypeIos_app UploadType = "IOS_APP" - UploadTypeWeb_app UploadType = "WEB_APP" - UploadTypeExternal_data UploadType = "EXTERNAL_DATA" - UploadTypeAppium_java_junit_test_package UploadType = "APPIUM_JAVA_JUNIT_TEST_PACKAGE" - UploadTypeAppium_java_testng_test_package UploadType = "APPIUM_JAVA_TESTNG_TEST_PACKAGE" - UploadTypeAppium_python_test_package UploadType = "APPIUM_PYTHON_TEST_PACKAGE" - UploadTypeAppium_node_test_package UploadType = "APPIUM_NODE_TEST_PACKAGE" - UploadTypeAppium_ruby_test_package UploadType = "APPIUM_RUBY_TEST_PACKAGE" - UploadTypeAppium_web_java_junit_test_package UploadType = "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE" - UploadTypeAppium_web_java_testng_test_package UploadType = "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE" - UploadTypeAppium_web_python_test_package UploadType = "APPIUM_WEB_PYTHON_TEST_PACKAGE" - UploadTypeAppium_web_node_test_package UploadType = "APPIUM_WEB_NODE_TEST_PACKAGE" - UploadTypeAppium_web_ruby_test_package UploadType = "APPIUM_WEB_RUBY_TEST_PACKAGE" - UploadTypeCalabash_test_package UploadType = "CALABASH_TEST_PACKAGE" - UploadTypeInstrumentation_test_package UploadType = "INSTRUMENTATION_TEST_PACKAGE" - UploadTypeUiautomation_test_package UploadType = "UIAUTOMATION_TEST_PACKAGE" - UploadTypeUiautomator_test_package UploadType = "UIAUTOMATOR_TEST_PACKAGE" - UploadTypeXctest_test_package UploadType = "XCTEST_TEST_PACKAGE" - UploadTypeXctest_ui_test_package UploadType = "XCTEST_UI_TEST_PACKAGE" - UploadTypeAppium_java_junit_test_spec UploadType = "APPIUM_JAVA_JUNIT_TEST_SPEC" - UploadTypeAppium_java_testng_test_spec UploadType = "APPIUM_JAVA_TESTNG_TEST_SPEC" - UploadTypeAppium_python_test_spec UploadType = "APPIUM_PYTHON_TEST_SPEC" - UploadTypeAppium_node_test_spec UploadType = "APPIUM_NODE_TEST_SPEC" - UploadTypeAppium_ruby_test_spec UploadType = "APPIUM_RUBY_TEST_SPEC" - UploadTypeAppium_web_java_junit_test_spec UploadType = "APPIUM_WEB_JAVA_JUNIT_TEST_SPEC" - UploadTypeAppium_web_java_testng_test_spec UploadType = "APPIUM_WEB_JAVA_TESTNG_TEST_SPEC" - UploadTypeAppium_web_python_test_spec UploadType = "APPIUM_WEB_PYTHON_TEST_SPEC" - UploadTypeAppium_web_node_test_spec UploadType = "APPIUM_WEB_NODE_TEST_SPEC" - UploadTypeAppium_web_ruby_test_spec UploadType = "APPIUM_WEB_RUBY_TEST_SPEC" - UploadTypeInstrumentation_test_spec UploadType = "INSTRUMENTATION_TEST_SPEC" - UploadTypeXctest_ui_test_spec UploadType = "XCTEST_UI_TEST_SPEC" + UploadTypeAndroidApp UploadType = "ANDROID_APP" + UploadTypeIosApp UploadType = "IOS_APP" + UploadTypeWebApp UploadType = "WEB_APP" + UploadTypeExternalData UploadType = "EXTERNAL_DATA" + UploadTypeAppiumJavaJunitTestPackage UploadType = "APPIUM_JAVA_JUNIT_TEST_PACKAGE" + UploadTypeAppiumJavaTestngTestPackage UploadType = "APPIUM_JAVA_TESTNG_TEST_PACKAGE" + UploadTypeAppiumPythonTestPackage UploadType = "APPIUM_PYTHON_TEST_PACKAGE" + UploadTypeAppiumNodeTestPackage UploadType = "APPIUM_NODE_TEST_PACKAGE" + UploadTypeAppiumRubyTestPackage UploadType = "APPIUM_RUBY_TEST_PACKAGE" + UploadTypeAppiumWebJavaJunitTestPackage UploadType = "APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE" + UploadTypeAppiumWebJavaTestngTestPackage UploadType = "APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE" + UploadTypeAppiumWebPythonTestPackage UploadType = "APPIUM_WEB_PYTHON_TEST_PACKAGE" + UploadTypeAppiumWebNodeTestPackage UploadType = "APPIUM_WEB_NODE_TEST_PACKAGE" + UploadTypeAppiumWebRubyTestPackage UploadType = "APPIUM_WEB_RUBY_TEST_PACKAGE" + UploadTypeCalabashTestPackage UploadType = "CALABASH_TEST_PACKAGE" + UploadTypeInstrumentationTestPackage UploadType = "INSTRUMENTATION_TEST_PACKAGE" + UploadTypeUiautomationTestPackage UploadType = "UIAUTOMATION_TEST_PACKAGE" + UploadTypeUiautomatorTestPackage UploadType = "UIAUTOMATOR_TEST_PACKAGE" + UploadTypeXctestTestPackage UploadType = "XCTEST_TEST_PACKAGE" + UploadTypeXctestUiTestPackage UploadType = "XCTEST_UI_TEST_PACKAGE" + UploadTypeAppiumJavaJunitTestSpec UploadType = "APPIUM_JAVA_JUNIT_TEST_SPEC" + UploadTypeAppiumJavaTestngTestSpec UploadType = "APPIUM_JAVA_TESTNG_TEST_SPEC" + UploadTypeAppiumPythonTestSpec UploadType = "APPIUM_PYTHON_TEST_SPEC" + UploadTypeAppiumNodeTestSpec UploadType = "APPIUM_NODE_TEST_SPEC" + UploadTypeAppiumRubyTestSpec UploadType = "APPIUM_RUBY_TEST_SPEC" + UploadTypeAppiumWebJavaJunitTestSpec UploadType = "APPIUM_WEB_JAVA_JUNIT_TEST_SPEC" + UploadTypeAppiumWebJavaTestngTestSpec UploadType = "APPIUM_WEB_JAVA_TESTNG_TEST_SPEC" + UploadTypeAppiumWebPythonTestSpec UploadType = "APPIUM_WEB_PYTHON_TEST_SPEC" + UploadTypeAppiumWebNodeTestSpec UploadType = "APPIUM_WEB_NODE_TEST_SPEC" + UploadTypeAppiumWebRubyTestSpec UploadType = "APPIUM_WEB_RUBY_TEST_SPEC" + UploadTypeInstrumentationTestSpec UploadType = "INSTRUMENTATION_TEST_SPEC" + UploadTypeXctestUiTestSpec UploadType = "XCTEST_UI_TEST_SPEC" ) // Values returns all known values for UploadType. Note that this can be expanded diff --git a/service/devicefarm/types/types.go b/service/devicefarm/types/types.go index 84844ca6972..522552c6f78 100644 --- a/service/devicefarm/types/types.go +++ b/service/devicefarm/types/types.go @@ -56,71 +56,70 @@ type Artifact struct { // The artifact's type. Allowed values include the following: // - // * UNKNOWN + // * UNKNOWN // - // * + // * // SCREENSHOT // - // * DEVICE_LOG + // * DEVICE_LOG // - // * MESSAGE_LOG + // * MESSAGE_LOG // - // * VIDEO_LOG + // * VIDEO_LOG // - // * - // RESULT_LOG + // * RESULT_LOG // - // * SERVICE_LOG + // * + // SERVICE_LOG // - // * WEBKIT_LOG + // * WEBKIT_LOG // - // * INSTRUMENTATION_OUTPUT + // * INSTRUMENTATION_OUTPUT // + // * EXERCISER_MONKEY_OUTPUT: + // the artifact (log) generated by an Android fuzz test. // - // * EXERCISER_MONKEY_OUTPUT: the artifact (log) generated by an Android fuzz - // test. + // * CALABASH_JSON_OUTPUT // - // * CALABASH_JSON_OUTPUT + // * + // CALABASH_PRETTY_OUTPUT // - // * CALABASH_PRETTY_OUTPUT + // * CALABASH_STANDARD_OUTPUT // - // * - // CALABASH_STANDARD_OUTPUT + // * + // CALABASH_JAVA_XML_OUTPUT // - // * CALABASH_JAVA_XML_OUTPUT + // * AUTOMATION_OUTPUT // - // * - // AUTOMATION_OUTPUT + // * APPIUM_SERVER_OUTPUT // - // * APPIUM_SERVER_OUTPUT + // * + // APPIUM_JAVA_OUTPUT // - // * APPIUM_JAVA_OUTPUT + // * APPIUM_JAVA_XML_OUTPUT // - // * - // APPIUM_JAVA_XML_OUTPUT + // * APPIUM_PYTHON_OUTPUT // - // * APPIUM_PYTHON_OUTPUT - // - // * + // * // APPIUM_PYTHON_XML_OUTPUT // - // * EXPLORER_EVENT_LOG - // - // * EXPLORER_SUMMARY_LOG + // * EXPLORER_EVENT_LOG // + // * EXPLORER_SUMMARY_LOG // - // * APPLICATION_CRASH_REPORT + // * + // APPLICATION_CRASH_REPORT // - // * XCTEST_LOG + // * XCTEST_LOG // - // * VIDEO + // * VIDEO // - // * - // CUSTOMER_ARTIFACT + // * CUSTOMER_ARTIFACT // - // * CUSTOMER_ARTIFACT_LOG + // * + // CUSTOMER_ARTIFACT_LOG // - // * TESTSPEC_OUTPUT + // * TESTSPEC_OUTPUT Type ArtifactType // The presigned Amazon S3 URL that can be used with a GET request to download the @@ -222,9 +221,9 @@ type Device struct { // The device's form factor. Allowed values include: // - // * PHONE + // * PHONE // - // * TABLET + // * TABLET FormFactor DeviceFormFactor // The device's heap size, expressed in bytes. @@ -256,9 +255,9 @@ type Device struct { // The device's platform. Allowed values include: // - // * ANDROID + // * ANDROID // - // * IOS + // * IOS Platform DevicePlatform // The device's radio. @@ -312,28 +311,27 @@ type DeviceFilter struct { // attribute descriptions. Operator RuleOperator - // An array of one or more filter values used in a device filter. Operator Values + // An array of one or more filter values used in a device filter. Operator + // Values // + // * The IN and NOT_IN operators can take a values array that has more than + // one element. // - // * The IN and NOT_IN operators can take a values array that has more than one - // element. - // - // * The other operators require an array with a single + // * The other operators require an array with a single // element. // // Attribute Values // - // * The PLATFORM attribute can be set to ANDROID - // or IOS. + // * The PLATFORM attribute can be set to ANDROID or + // IOS. // - // * The AVAILABILITY attribute can be set to AVAILABLE, - // HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE. + // * The AVAILABILITY attribute can be set to AVAILABLE, HIGHLY_AVAILABLE, + // BUSY, or TEMPORARY_NOT_AVAILABLE. // - // * The FORM_FACTOR - // attribute can be set to PHONE or TABLET. + // * The FORM_FACTOR attribute can be set to + // PHONE or TABLET. // - // * The FLEET_TYPE attribute can be - // set to PUBLIC or PRIVATE. + // * The FLEET_TYPE attribute can be set to PUBLIC or PRIVATE. Values []*string } @@ -400,11 +398,11 @@ type DevicePool struct { // The device pool's type. Allowed values include: // - // * CURATED: A device pool - // that is created and managed by AWS Device Farm. + // * CURATED: A device pool that + // is created and managed by AWS Device Farm. // - // * PRIVATE: A device pool - // that is created and managed by the device pool developer. + // * PRIVATE: A device pool that is + // created and managed by the device pool developer. Type DevicePoolType } @@ -429,90 +427,87 @@ type DeviceSelectionConfiguration struct { // Used to dynamically select a set of devices for a test run. A filter is made up // of an attribute, an operator, and one or more values. // - // * Attribute The - // aspect of a device such as platform or model used as the selection criteria in a - // device filter. Allowed values include: + // * Attribute The aspect of + // a device such as platform or model used as the selection criteria in a device + // filter. Allowed values include: // - // * ARN: The Amazon Resource Name - // (ARN) of the device (for example, - // arn:aws:devicefarm:us-west-2::device:12345Example). + // * ARN: The Amazon Resource Name (ARN) of the + // device (for example, arn:aws:devicefarm:us-west-2::device:12345Example). // - // * PLATFORM: The - // device platform. Valid values are ANDROID or IOS. + // * + // PLATFORM: The device platform. Valid values are ANDROID or IOS. // - // * OS_VERSION: The - // operating system version (for example, 10.3.2). + // * OS_VERSION: + // The operating system version (for example, 10.3.2). // - // * MODEL: The device - // model (for example, iPad 5th Gen). + // * MODEL: The device model + // (for example, iPad 5th Gen). // - // * AVAILABILITY: The current - // availability of the device. Valid values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, - // or TEMPORARY_NOT_AVAILABLE. + // * AVAILABILITY: The current availability of the + // device. Valid values are AVAILABLE, HIGHLY_AVAILABLE, BUSY, or + // TEMPORARY_NOT_AVAILABLE. // - // * FORM_FACTOR: The device form factor. - // Valid values are PHONE or TABLET. + // * FORM_FACTOR: The device form factor. Valid values + // are PHONE or TABLET. // - // * MANUFACTURER: The device - // manufacturer (for example, Apple). + // * MANUFACTURER: The device manufacturer (for example, + // Apple). // - // * REMOTE_ACCESS_ENABLED: Whether the - // device is enabled for remote access. Valid values are TRUE or FALSE. + // * REMOTE_ACCESS_ENABLED: Whether the device is enabled for remote + // access. Valid values are TRUE or FALSE. // - // * - // REMOTE_DEBUG_ENABLED: Whether the device is enabled for remote debugging. Valid - // values are TRUE or FALSE. Because remote debugging is no longer supported + // * REMOTE_DEBUG_ENABLED: Whether the + // device is enabled for remote debugging. Valid values are TRUE or FALSE. Because + // remote debugging is no longer supported // (https://docs.aws.amazon.com/devicefarm/latest/developerguide/history.html), // this filter is ignored. // - // * INSTANCE_ARN: The Amazon Resource Name (ARN) - // of the device instance. + // * INSTANCE_ARN: The Amazon Resource Name (ARN) of the + // device instance. // - // * INSTANCE_LABELS: The label of the device - // instance. + // * INSTANCE_LABELS: The label of the device instance. // - // * FLEET_TYPE: The fleet type. Valid values are PUBLIC or - // PRIVATE. + // * + // FLEET_TYPE: The fleet type. Valid values are PUBLIC or PRIVATE. // - // * Operator The filter operator. + // * Operator The + // filter operator. // - // * The EQUALS operator is - // available for every attribute except INSTANCE_LABELS. + // * The EQUALS operator is available for every attribute except + // INSTANCE_LABELS. // - // * The CONTAINS - // operator is available for the INSTANCE_LABELS and MODEL attributes. + // * The CONTAINS operator is available for the INSTANCE_LABELS + // and MODEL attributes. // - // * - // The IN and NOT_IN operators are available for the ARN, OS_VERSION, MODEL, - // MANUFACTURER, and INSTANCE_ARN attributes. + // * The IN and NOT_IN operators are available for the ARN, + // OS_VERSION, MODEL, MANUFACTURER, and INSTANCE_ARN attributes. // - // * The LESS_THAN, + // * The LESS_THAN, // GREATER_THAN, LESS_THAN_OR_EQUALS, and GREATER_THAN_OR_EQUALS operators are also // available for the OS_VERSION attribute. // - // * Values An array of one or more - // filter values. Operator Values + // * Values An array of one or more filter + // values. Operator Values // - // * The IN and NOT_IN operators can take a - // values array that has more than one element. + // * The IN and NOT_IN operators can take a values array + // that has more than one element. // - // * The other operators - // require an array with a single element. + // * The other operators require an array with a + // single element. // - // Attribute Values + // Attribute Values // - // * The - // PLATFORM attribute can be set to ANDROID or IOS. + // * The PLATFORM attribute can be set to + // ANDROID or IOS. // - // * The AVAILABILITY - // attribute can be set to AVAILABLE, HIGHLY_AVAILABLE, BUSY, or - // TEMPORARY_NOT_AVAILABLE. + // * The AVAILABILITY attribute can be set to AVAILABLE, + // HIGHLY_AVAILABLE, BUSY, or TEMPORARY_NOT_AVAILABLE. // - // * The FORM_FACTOR attribute can be set to - // PHONE or TABLET. + // * The FORM_FACTOR attribute + // can be set to PHONE or TABLET. // - // * The FLEET_TYPE attribute can be set to PUBLIC or - // PRIVATE. + // * The FLEET_TYPE attribute can be set to PUBLIC + // or PRIVATE. // // This member is required. Filters []*DeviceFilter @@ -573,19 +568,19 @@ type IncompatibilityMessage struct { // The type of incompatibility. Allowed values include: // - // * ARN + // * ARN // - // * - // FORM_FACTOR (for example, phone or tablet) + // * FORM_FACTOR (for + // example, phone or tablet) // - // * MANUFACTURER + // * MANUFACTURER // - // * PLATFORM - // (for example, Android or iOS) + // * PLATFORM (for example, Android or + // iOS) // - // * REMOTE_ACCESS_ENABLED + // * REMOTE_ACCESS_ENABLED // - // * APPIUM_VERSION + // * APPIUM_VERSION Type DeviceAttribute } @@ -644,20 +639,20 @@ type Job struct { // The job's result. Allowed values include: // - // * PENDING + // * PENDING // - // * PASSED + // * PASSED // - // * - // WARNED + // * WARNED // - // * FAILED + // * + // FAILED // - // * SKIPPED + // * SKIPPED // - // * ERRORED + // * ERRORED // - // * STOPPED + // * STOPPED Result ExecutionResult // The job's start time. @@ -665,25 +660,25 @@ type Job struct { // The job's status. Allowed values include: // - // * PENDING - // - // * - // PENDING_CONCURRENCY + // * PENDING // - // * PENDING_DEVICE + // * PENDING_CONCURRENCY // - // * PROCESSING + // * + // PENDING_DEVICE // - // * SCHEDULING + // * PROCESSING // + // * SCHEDULING // // * PREPARING // - // * RUNNING + // * RUNNING // - // * COMPLETED + // * + // COMPLETED // - // * STOPPING + // * STOPPING Status ExecutionStatus // The job's stop time. @@ -691,48 +686,47 @@ type Job struct { // The job's type. Allowed values include the following: // - // * BUILTIN_FUZZ + // * BUILTIN_FUZZ // - // * + // * // BUILTIN_EXPLORER. For Android, an app explorer that traverses an Android app, // interacting with it and capturing screenshots at the same time. // - // * + // * // APPIUM_JAVA_JUNIT // - // * APPIUM_JAVA_TESTNG + // * APPIUM_JAVA_TESTNG // - // * APPIUM_PYTHON + // * APPIUM_PYTHON // - // * - // APPIUM_NODE + // * APPIUM_NODE // - // * APPIUM_RUBY + // * + // APPIUM_RUBY // - // * APPIUM_WEB_JAVA_JUNIT + // * APPIUM_WEB_JAVA_JUNIT // - // * - // APPIUM_WEB_JAVA_TESTNG + // * APPIUM_WEB_JAVA_TESTNG // - // * APPIUM_WEB_PYTHON + // * + // APPIUM_WEB_PYTHON // - // * APPIUM_WEB_NODE + // * APPIUM_WEB_NODE // - // * - // APPIUM_WEB_RUBY + // * APPIUM_WEB_RUBY // - // * CALABASH + // * CALABASH // - // * INSTRUMENTATION - // - // * UIAUTOMATION + // * + // INSTRUMENTATION // + // * UIAUTOMATION // // * UIAUTOMATOR // - // * XCTEST + // * XCTEST // - // * XCTEST_UI + // * XCTEST_UI Type TestType // This value is set to true if video capture is enabled. Otherwise, it is set to @@ -891,20 +885,20 @@ type Problem struct { // The problem's result. Allowed values include: // - // * PENDING - // - // * PASSED + // * PENDING // + // * PASSED // // * WARNED // - // * FAILED + // * + // FAILED // - // * SKIPPED + // * SKIPPED // - // * ERRORED + // * ERRORED // - // * STOPPED + // * STOPPED Result ExecutionResult // Information about the associated run. @@ -1019,17 +1013,17 @@ type RemoteAccessSession struct { // The interaction mode of the remote access session. Valid values are: // - // * + // * // INTERACTIVE: You can interact with the iOS device by viewing, touching, and // rotating the screen. You cannot run XCUITest framework-based tests in this // mode. // - // * NO_VIDEO: You are connected to the device, but cannot interact with - // it or view the screen. This mode has the fastest test execution speed. You can - // run XCUITest framework-based tests in this mode. + // * NO_VIDEO: You are connected to the device, but cannot interact with it + // or view the screen. This mode has the fastest test execution speed. You can run + // XCUITest framework-based tests in this mode. // - // * VIDEO_ONLY: You can view - // the screen, but cannot touch or rotate it. You can run XCUITest framework-based + // * VIDEO_ONLY: You can view the + // screen, but cannot touch or rotate it. You can run XCUITest framework-based // tests and watch the screen in this mode. InteractionMode InteractionMode @@ -1053,21 +1047,20 @@ type RemoteAccessSession struct { // The result of the remote access session. Can be any of the following: // - // * + // * // PENDING. // - // * PASSED. + // * PASSED. // - // * WARNED. + // * WARNED. // - // * FAILED. + // * FAILED. // - // * SKIPPED. + // * SKIPPED. // - // * - // ERRORED. + // * ERRORED. // - // * STOPPED. + // * STOPPED. Result ExecutionResult // When set to true, for private devices, Device Farm does not sign your app again. @@ -1081,26 +1074,25 @@ type RemoteAccessSession struct { // The status of the remote access session. Can be any of the following: // - // * + // * // PENDING. // - // * PENDING_CONCURRENCY. + // * PENDING_CONCURRENCY. // - // * PENDING_DEVICE. + // * PENDING_DEVICE. // - // * PROCESSING. + // * PROCESSING. // + // * + // SCHEDULING. // - // * SCHEDULING. + // * PREPARING. // - // * PREPARING. + // * RUNNING. // - // * RUNNING. + // * COMPLETED. // - // * COMPLETED. - // - // * - // STOPPING. + // * STOPPING. Status ExecutionStatus // The date and time the remote access session was stopped. @@ -1222,9 +1214,9 @@ type Run struct { // The run's platform. Allowed values include: // - // * ANDROID + // * ANDROID // - // * IOS + // * IOS Platform DevicePlatform // Information about the radio states for the run. @@ -1232,20 +1224,20 @@ type Run struct { // The run's result. Allowed values include: // - // * PENDING + // * PENDING // - // * PASSED + // * PASSED // - // * - // WARNED + // * WARNED // - // * FAILED + // * + // FAILED // - // * SKIPPED + // * SKIPPED // - // * ERRORED + // * ERRORED // - // * STOPPED + // * STOPPED Result ExecutionResult // Supporting field for the result field. Set only if result is SKIPPED. @@ -1267,25 +1259,25 @@ type Run struct { // The run's status. Allowed values include: // - // * PENDING + // * PENDING // - // * - // PENDING_CONCURRENCY + // * PENDING_CONCURRENCY // - // * PENDING_DEVICE + // * + // PENDING_DEVICE // - // * PROCESSING - // - // * SCHEDULING + // * PROCESSING // + // * SCHEDULING // // * PREPARING // - // * RUNNING + // * RUNNING // - // * COMPLETED + // * + // COMPLETED // - // * STOPPING + // * STOPPING Status ExecutionStatus // The run's stop time. @@ -1299,48 +1291,47 @@ type Run struct { // The run's type. Must be one of the following values: // - // * BUILTIN_FUZZ + // * BUILTIN_FUZZ // - // * + // * // BUILTIN_EXPLORER For Android, an app explorer that traverses an Android app, // interacting with it and capturing screenshots at the same time. // - // * + // * // APPIUM_JAVA_JUNIT // - // * APPIUM_JAVA_TESTNG - // - // * APPIUM_PYTHON + // * APPIUM_JAVA_TESTNG // - // * - // APPIUM_NODE + // * APPIUM_PYTHON // - // * APPIUM_RUBY + // * APPIUM_NODE // - // * APPIUM_WEB_JAVA_JUNIT + // * + // APPIUM_RUBY // - // * - // APPIUM_WEB_JAVA_TESTNG + // * APPIUM_WEB_JAVA_JUNIT // - // * APPIUM_WEB_PYTHON + // * APPIUM_WEB_JAVA_TESTNG // - // * APPIUM_WEB_NODE + // * + // APPIUM_WEB_PYTHON // - // * - // APPIUM_WEB_RUBY + // * APPIUM_WEB_NODE // - // * CALABASH + // * APPIUM_WEB_RUBY // - // * INSTRUMENTATION + // * CALABASH // - // * UIAUTOMATION + // * + // INSTRUMENTATION // + // * UIAUTOMATION // // * UIAUTOMATOR // - // * XCTEST + // * XCTEST // - // * XCTEST_UI + // * XCTEST_UI Type TestType // The Device Farm console URL for the recording of the run. @@ -1355,50 +1346,49 @@ type Sample struct { // The sample's type. Must be one of the following values: // - // * CPU: A CPU sample + // * CPU: A CPU sample // type. This is expressed as the app processing CPU time (including child // processes) as reported by process, as a percentage. // - // * MEMORY: A memory - // usage sample type. This is expressed as the total proportional set size of an - // app process, in kilobytes. + // * MEMORY: A memory usage + // sample type. This is expressed as the total proportional set size of an app + // process, in kilobytes. // - // * NATIVE_AVG_DRAWTIME + // * NATIVE_AVG_DRAWTIME // - // * NATIVE_FPS + // * NATIVE_FPS // - // * - // NATIVE_FRAMES + // * NATIVE_FRAMES // - // * NATIVE_MAX_DRAWTIME + // * + // NATIVE_MAX_DRAWTIME // - // * NATIVE_MIN_DRAWTIME + // * NATIVE_MIN_DRAWTIME // - // * - // OPENGL_AVG_DRAWTIME + // * OPENGL_AVG_DRAWTIME // - // * OPENGL_FPS + // * + // OPENGL_FPS // - // * OPENGL_FRAMES + // * OPENGL_FRAMES // - // * - // OPENGL_MAX_DRAWTIME + // * OPENGL_MAX_DRAWTIME // - // * OPENGL_MIN_DRAWTIME + // * OPENGL_MIN_DRAWTIME // - // * RX + // * + // RX // - // * RX_RATE: The - // total number of bytes per second (TCP and UDP) that are sent, by app process. + // * RX_RATE: The total number of bytes per second (TCP and UDP) that are sent, + // by app process. // + // * THREADS: A threads sample type. This is expressed as the + // total number of threads per app process. // - // * THREADS: A threads sample type. This is expressed as the total number of - // threads per app process. + // * TX // - // * TX - // - // * TX_RATE: The total number of bytes per - // second (TCP and UDP) that are received, by app process. + // * TX_RATE: The total number of + // bytes per second (TCP and UDP) that are received, by app process. Type SampleType // The presigned Amazon S3 URL that can be used with a GET request to download the @@ -1448,48 +1438,47 @@ type ScheduleRunTest struct { // The test's type. Must be one of the following values: // - // * BUILTIN_FUZZ + // * BUILTIN_FUZZ // - // * + // * // BUILTIN_EXPLORER. For Android, an app explorer that traverses an Android app, // interacting with it and capturing screenshots at the same time. // - // * + // * // APPIUM_JAVA_JUNIT // - // * APPIUM_JAVA_TESTNG - // - // * APPIUM_PYTHON + // * APPIUM_JAVA_TESTNG // - // * - // APPIUM_NODE + // * APPIUM_PYTHON // - // * APPIUM_RUBY + // * APPIUM_NODE // - // * APPIUM_WEB_JAVA_JUNIT + // * + // APPIUM_RUBY // - // * - // APPIUM_WEB_JAVA_TESTNG + // * APPIUM_WEB_JAVA_JUNIT // - // * APPIUM_WEB_PYTHON + // * APPIUM_WEB_JAVA_TESTNG // - // * APPIUM_WEB_NODE + // * + // APPIUM_WEB_PYTHON // - // * - // APPIUM_WEB_RUBY + // * APPIUM_WEB_NODE // - // * CALABASH + // * APPIUM_WEB_RUBY // - // * INSTRUMENTATION + // * CALABASH // - // * UIAUTOMATION + // * + // INSTRUMENTATION // + // * UIAUTOMATION // // * UIAUTOMATOR // - // * XCTEST + // * XCTEST // - // * XCTEST_UI + // * XCTEST_UI // // This member is required. Type TestType @@ -1500,99 +1489,97 @@ type ScheduleRunTest struct { // The test's parameters, such as test framework parameters and fixture settings. // Parameters are represented by name-value pairs of strings. For all tests: // - // * + // * // app_performance_monitoring: Performance monitoring is enabled by default. Set // this parameter to false to disable it. // // For Calabash tests: // - // * profile: A + // * profile: A // cucumber profile (for example, my_profile_name). // - // * tags: You can limit + // * tags: You can limit // execution to features or scenarios that have (or don't have) certain tags (for // example, @smoke or @smoke,~@wip). // // For Appium tests (all types): // - // * + // * // appium_version: The Appium version. Currently supported values are 1.6.5 (and // later), latest, and default. // - // * latest runs the latest Appium version - // supported by Device Farm (1.9.1). - // - // * For default, Device Farm selects a - // compatible version of Appium for the device. The current behavior is to run - // 1.7.2 on Android devices and iOS 9 and earlier and 1.7.2 for iOS 10 and later. + // * latest runs the latest Appium version supported + // by Device Farm (1.9.1). // + // * For default, Device Farm selects a compatible version + // of Appium for the device. The current behavior is to run 1.7.2 on Android + // devices and iOS 9 and earlier and 1.7.2 for iOS 10 and later. // - // * This behavior is subject to change. + // * This behavior + // is subject to change. // // For fuzz tests (Android only): // - // * - // event_count: The number of events, between 1 and 10000, that the UI fuzz test - // should perform. + // * event_count: The number + // of events, between 1 and 10000, that the UI fuzz test should perform. // - // * throttle: The time, in ms, between 0 and 1000, that the - // UI fuzz test should wait between events. + // * + // throttle: The time, in ms, between 0 and 1000, that the UI fuzz test should wait + // between events. // - // * seed: A seed to use for - // randomizing the UI fuzz test. Using the same seed value between tests ensures - // identical event sequences. + // * seed: A seed to use for randomizing the UI fuzz test. Using + // the same seed value between tests ensures identical event sequences. // - // For Explorer tests: + // For + // Explorer tests: // - // * username: A user name to - // use if the Explorer encounters a login form. If not supplied, no user name is - // inserted. + // * username: A user name to use if the Explorer encounters a + // login form. If not supplied, no user name is inserted. // - // * password: A password to use if the Explorer encounters a login - // form. If not supplied, no password is inserted. + // * password: A password + // to use if the Explorer encounters a login form. If not supplied, no password is + // inserted. // // For Instrumentation: // - // * - // filter: A test filter string. Examples: + // * filter: A test filter string. Examples: // - // * Running a single test case: - // com.android.abc.Test1 + // * + // Running a single test case: com.android.abc.Test1 // - // * Running a single test: + // * Running a single test: // com.android.abc.Test1#smoke // - // * Running multiple tests: + // * Running multiple tests: // com.android.abc.Test1,com.android.abc.Test2 // // For XCTest and XCTestUI: // - // * - // filter: A test filter string. Examples: - // - // * Running a single test class: - // LoginTests + // * filter: + // A test filter string. Examples: // - // * Running a multiple test classes: LoginTests,SmokeTests + // * Running a single test class: LoginTests // + // * + // Running a multiple test classes: LoginTests,SmokeTests // - // * Running a single test: LoginTests/testValid + // * Running a single test: + // LoginTests/testValid // - // * Running multiple tests: + // * Running multiple tests: // LoginTests/testValid,LoginTests/testInvalid // // For UIAutomator: // - // * filter: A - // test filter string. Examples: + // * filter: A test + // filter string. Examples: // - // * Running a single test case: - // com.android.abc.Test1 + // * Running a single test case: com.android.abc.Test1 // - // * Running a single test: - // com.android.abc.Test1#smoke + // * + // Running a single test: com.android.abc.Test1#smoke // - // * Running multiple tests: + // * Running multiple tests: // com.android.abc.Test1,com.android.abc.Test2 Parameters map[string]*string @@ -1626,20 +1613,20 @@ type Suite struct { // The suite's result. Allowed values include: // - // * PENDING + // * PENDING // - // * PASSED + // * PASSED // - // * - // WARNED + // * WARNED // - // * FAILED + // * + // FAILED // - // * SKIPPED + // * SKIPPED // - // * ERRORED + // * ERRORED // - // * STOPPED + // * STOPPED Result ExecutionResult // The suite's start time. @@ -1647,25 +1634,25 @@ type Suite struct { // The suite's status. Allowed values include: // - // * PENDING + // * PENDING // - // * - // PENDING_CONCURRENCY + // * PENDING_CONCURRENCY // - // * PENDING_DEVICE + // * + // PENDING_DEVICE // - // * PROCESSING - // - // * SCHEDULING + // * PROCESSING // + // * SCHEDULING // // * PREPARING // - // * RUNNING + // * RUNNING // - // * COMPLETED + // * + // COMPLETED // - // * STOPPING + // * STOPPING Status ExecutionStatus // The suite's stop time. @@ -1673,48 +1660,47 @@ type Suite struct { // The suite's type. Must be one of the following values: // - // * BUILTIN_FUZZ - // + // * BUILTIN_FUZZ // - // * BUILTIN_EXPLORER Only available for Android; an app explorer that traverses an + // * + // BUILTIN_EXPLORER Only available for Android; an app explorer that traverses an // Android app, interacting with it and capturing screenshots at the same time. // + // * + // APPIUM_JAVA_JUNIT // - // * APPIUM_JAVA_JUNIT - // - // * APPIUM_JAVA_TESTNG - // - // * APPIUM_PYTHON + // * APPIUM_JAVA_TESTNG // - // * - // APPIUM_NODE + // * APPIUM_PYTHON // - // * APPIUM_RUBY + // * APPIUM_NODE // - // * APPIUM_WEB_JAVA_JUNIT + // * + // APPIUM_RUBY // - // * - // APPIUM_WEB_JAVA_TESTNG + // * APPIUM_WEB_JAVA_JUNIT // - // * APPIUM_WEB_PYTHON + // * APPIUM_WEB_JAVA_TESTNG // - // * APPIUM_WEB_NODE + // * + // APPIUM_WEB_PYTHON // - // * - // APPIUM_WEB_RUBY + // * APPIUM_WEB_NODE // - // * CALABASH + // * APPIUM_WEB_RUBY // - // * INSTRUMENTATION + // * CALABASH // - // * UIAUTOMATION + // * + // INSTRUMENTATION // + // * UIAUTOMATION // // * UIAUTOMATOR // - // * XCTEST + // * XCTEST // - // * XCTEST_UI + // * XCTEST_UI Type TestType } @@ -1760,20 +1746,20 @@ type Test struct { // The test's result. Allowed values include: // - // * PENDING + // * PENDING // - // * PASSED + // * PASSED // - // * - // WARNED + // * WARNED // - // * FAILED + // * + // FAILED // - // * SKIPPED + // * SKIPPED // - // * ERRORED + // * ERRORED // - // * STOPPED + // * STOPPED Result ExecutionResult // The test's start time. @@ -1781,25 +1767,25 @@ type Test struct { // The test's status. Allowed values include: // - // * PENDING - // - // * - // PENDING_CONCURRENCY + // * PENDING // - // * PENDING_DEVICE + // * PENDING_CONCURRENCY // - // * PROCESSING + // * + // PENDING_DEVICE // - // * SCHEDULING + // * PROCESSING // + // * SCHEDULING // // * PREPARING // - // * RUNNING + // * RUNNING // - // * COMPLETED + // * + // COMPLETED // - // * STOPPING + // * STOPPING Status ExecutionStatus // The test's stop time. @@ -1807,48 +1793,47 @@ type Test struct { // The test's type. Must be one of the following values: // - // * BUILTIN_FUZZ + // * BUILTIN_FUZZ // - // * + // * // BUILTIN_EXPLORER For Android, an app explorer that traverses an Android app, // interacting with it and capturing screenshots at the same time. // - // * + // * // APPIUM_JAVA_JUNIT // - // * APPIUM_JAVA_TESTNG + // * APPIUM_JAVA_TESTNG // - // * APPIUM_PYTHON + // * APPIUM_PYTHON // - // * - // APPIUM_NODE + // * APPIUM_NODE // - // * APPIUM_RUBY + // * + // APPIUM_RUBY // - // * APPIUM_WEB_JAVA_JUNIT + // * APPIUM_WEB_JAVA_JUNIT // - // * - // APPIUM_WEB_JAVA_TESTNG + // * APPIUM_WEB_JAVA_TESTNG // - // * APPIUM_WEB_PYTHON + // * + // APPIUM_WEB_PYTHON // - // * APPIUM_WEB_NODE + // * APPIUM_WEB_NODE // - // * - // APPIUM_WEB_RUBY + // * APPIUM_WEB_RUBY // - // * CALABASH + // * CALABASH // - // * INSTRUMENTATION - // - // * UIAUTOMATION + // * + // INSTRUMENTATION // + // * UIAUTOMATION // // * UIAUTOMATOR // - // * XCTEST + // * XCTEST // - // * XCTEST_UI + // * XCTEST_UI Type TestType } @@ -1953,11 +1938,10 @@ type Upload struct { // The upload's category. Allowed values include: // - // * CURATED: An upload managed - // by AWS Device Farm. + // * CURATED: An upload managed by + // AWS Device Farm. // - // * PRIVATE: An upload managed by the AWS Device Farm - // customer. + // * PRIVATE: An upload managed by the AWS Device Farm customer. Category UploadCategory // The upload's content type (for example, application/octet-stream). @@ -1979,96 +1963,94 @@ type Upload struct { // The upload's status. Must be one of the following values: // - // * FAILED + // * FAILED // - // * + // * // INITIALIZED // - // * PROCESSING + // * PROCESSING // - // * SUCCEEDED + // * SUCCEEDED Status UploadStatus // The upload's type. Must be one of the following values: // - // * ANDROID_APP - // + // * ANDROID_APP // - // * IOS_APP + // * + // IOS_APP // - // * WEB_APP + // * WEB_APP // - // * EXTERNAL_DATA + // * EXTERNAL_DATA // - // * - // APPIUM_JAVA_JUNIT_TEST_PACKAGE + // * APPIUM_JAVA_JUNIT_TEST_PACKAGE // - // * APPIUM_JAVA_TESTNG_TEST_PACKAGE + // * + // APPIUM_JAVA_TESTNG_TEST_PACKAGE // - // * - // APPIUM_PYTHON_TEST_PACKAGE + // * APPIUM_PYTHON_TEST_PACKAGE // - // * APPIUM_NODE_TEST_PACKAGE + // * + // APPIUM_NODE_TEST_PACKAGE // - // * - // APPIUM_RUBY_TEST_PACKAGE + // * APPIUM_RUBY_TEST_PACKAGE // - // * APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE + // * + // APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE // - // * - // APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE + // * APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE // - // * APPIUM_WEB_PYTHON_TEST_PACKAGE + // * + // APPIUM_WEB_PYTHON_TEST_PACKAGE // - // * - // APPIUM_WEB_NODE_TEST_PACKAGE + // * APPIUM_WEB_NODE_TEST_PACKAGE // - // * APPIUM_WEB_RUBY_TEST_PACKAGE + // * + // APPIUM_WEB_RUBY_TEST_PACKAGE // - // * - // CALABASH_TEST_PACKAGE + // * CALABASH_TEST_PACKAGE // - // * INSTRUMENTATION_TEST_PACKAGE + // * + // INSTRUMENTATION_TEST_PACKAGE // - // * - // UIAUTOMATION_TEST_PACKAGE + // * UIAUTOMATION_TEST_PACKAGE // - // * UIAUTOMATOR_TEST_PACKAGE + // * + // UIAUTOMATOR_TEST_PACKAGE // - // * - // XCTEST_TEST_PACKAGE + // * XCTEST_TEST_PACKAGE // - // * XCTEST_UI_TEST_PACKAGE + // * XCTEST_UI_TEST_PACKAGE // - // * + // * // APPIUM_JAVA_JUNIT_TEST_SPEC // - // * APPIUM_JAVA_TESTNG_TEST_SPEC + // * APPIUM_JAVA_TESTNG_TEST_SPEC // - // * + // * // APPIUM_PYTHON_TEST_SPEC // - // * APPIUM_NODE_TEST_SPEC + // * APPIUM_NODE_TEST_SPEC // - // * - // APPIUM_RUBY_TEST_SPEC + // * APPIUM_RUBY_TEST_SPEC // - // * APPIUM_WEB_JAVA_JUNIT_TEST_SPEC + // * + // APPIUM_WEB_JAVA_JUNIT_TEST_SPEC // - // * - // APPIUM_WEB_JAVA_TESTNG_TEST_SPEC + // * APPIUM_WEB_JAVA_TESTNG_TEST_SPEC // - // * APPIUM_WEB_PYTHON_TEST_SPEC + // * + // APPIUM_WEB_PYTHON_TEST_SPEC // - // * - // APPIUM_WEB_NODE_TEST_SPEC + // * APPIUM_WEB_NODE_TEST_SPEC // - // * APPIUM_WEB_RUBY_TEST_SPEC + // * + // APPIUM_WEB_RUBY_TEST_SPEC // - // * - // INSTRUMENTATION_TEST_SPEC + // * INSTRUMENTATION_TEST_SPEC // - // * XCTEST_UI_TEST_SPEC + // * XCTEST_UI_TEST_SPEC Type UploadType // The presigned Amazon S3 URL that was used to store a file using a PUT request. diff --git a/service/directconnect/api_op_AllocateConnectionOnInterconnect.go b/service/directconnect/api_op_AllocateConnectionOnInterconnect.go index 2b57c10cbd0..278870acd79 100644 --- a/service/directconnect/api_op_AllocateConnectionOnInterconnect.go +++ b/service/directconnect/api_op_AllocateConnectionOnInterconnect.go @@ -83,35 +83,34 @@ type AllocateConnectionOnInterconnectOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_AllocateHostedConnection.go b/service/directconnect/api_op_AllocateHostedConnection.go index 78c26db01d1..636f4e1f679 100644 --- a/service/directconnect/api_op_AllocateHostedConnection.go +++ b/service/directconnect/api_op_AllocateHostedConnection.go @@ -88,35 +88,34 @@ type AllocateHostedConnectionOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_AllocatePrivateVirtualInterface.go b/service/directconnect/api_op_AllocatePrivateVirtualInterface.go index 19e7567836c..1fd4b77e965 100644 --- a/service/directconnect/api_op_AllocatePrivateVirtualInterface.go +++ b/service/directconnect/api_op_AllocatePrivateVirtualInterface.go @@ -123,40 +123,39 @@ type AllocatePrivateVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directconnect/api_op_AllocatePublicVirtualInterface.go b/service/directconnect/api_op_AllocatePublicVirtualInterface.go index 65acd8228fe..1f0ef3fe44a 100644 --- a/service/directconnect/api_op_AllocatePublicVirtualInterface.go +++ b/service/directconnect/api_op_AllocatePublicVirtualInterface.go @@ -128,40 +128,39 @@ type AllocatePublicVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directconnect/api_op_AssociateConnectionWithLag.go b/service/directconnect/api_op_AssociateConnectionWithLag.go index cf781bcd65e..7b241c087ce 100644 --- a/service/directconnect/api_op_AssociateConnectionWithLag.go +++ b/service/directconnect/api_op_AssociateConnectionWithLag.go @@ -74,35 +74,34 @@ type AssociateConnectionWithLagOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_AssociateHostedConnection.go b/service/directconnect/api_op_AssociateHostedConnection.go index 761df56de9b..999e3c6300e 100644 --- a/service/directconnect/api_op_AssociateHostedConnection.go +++ b/service/directconnect/api_op_AssociateHostedConnection.go @@ -66,35 +66,34 @@ type AssociateHostedConnectionOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_AssociateVirtualInterface.go b/service/directconnect/api_op_AssociateVirtualInterface.go index 63bfc6ea068..430bc3c53aa 100644 --- a/service/directconnect/api_op_AssociateVirtualInterface.go +++ b/service/directconnect/api_op_AssociateVirtualInterface.go @@ -125,40 +125,39 @@ type AssociateVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directconnect/api_op_ConfirmConnection.go b/service/directconnect/api_op_ConfirmConnection.go index 9ae4813bb97..af80f7d0dd4 100644 --- a/service/directconnect/api_op_ConfirmConnection.go +++ b/service/directconnect/api_op_ConfirmConnection.go @@ -42,35 +42,34 @@ type ConfirmConnectionOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Metadata pertaining to the operation's result. diff --git a/service/directconnect/api_op_ConfirmPrivateVirtualInterface.go b/service/directconnect/api_op_ConfirmPrivateVirtualInterface.go index a9245933823..1f3319b4ebc 100644 --- a/service/directconnect/api_op_ConfirmPrivateVirtualInterface.go +++ b/service/directconnect/api_op_ConfirmPrivateVirtualInterface.go @@ -48,40 +48,39 @@ type ConfirmPrivateVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // Metadata pertaining to the operation's result. diff --git a/service/directconnect/api_op_ConfirmPublicVirtualInterface.go b/service/directconnect/api_op_ConfirmPublicVirtualInterface.go index f1a12415339..f66438b96c7 100644 --- a/service/directconnect/api_op_ConfirmPublicVirtualInterface.go +++ b/service/directconnect/api_op_ConfirmPublicVirtualInterface.go @@ -41,40 +41,39 @@ type ConfirmPublicVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // Metadata pertaining to the operation's result. diff --git a/service/directconnect/api_op_ConfirmTransitVirtualInterface.go b/service/directconnect/api_op_ConfirmTransitVirtualInterface.go index f9442cdcd64..d2910f8ee3f 100644 --- a/service/directconnect/api_op_ConfirmTransitVirtualInterface.go +++ b/service/directconnect/api_op_ConfirmTransitVirtualInterface.go @@ -46,40 +46,39 @@ type ConfirmTransitVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // Metadata pertaining to the operation's result. diff --git a/service/directconnect/api_op_CreateConnection.go b/service/directconnect/api_op_CreateConnection.go index 79e50b2db42..b3992a02eed 100644 --- a/service/directconnect/api_op_CreateConnection.go +++ b/service/directconnect/api_op_CreateConnection.go @@ -83,35 +83,34 @@ type CreateConnectionOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_CreateInterconnect.go b/service/directconnect/api_op_CreateInterconnect.go index 17bebd2baf8..8db752787bf 100644 --- a/service/directconnect/api_op_CreateInterconnect.go +++ b/service/directconnect/api_op_CreateInterconnect.go @@ -95,26 +95,26 @@ type CreateInterconnectOutput struct { // The state of the interconnect. The following are the possible values: // - // * + // * // requested: The initial state of an interconnect. The interconnect stays in the // requested state until the Letter of Authorization (LOA) is sent to the // customer. // - // * pending: The interconnect is approved, and is being - // initialized. + // * pending: The interconnect is approved, and is being initialized. // - // * available: The network link is up, and the interconnect is - // ready for use. + // * + // available: The network link is up, and the interconnect is ready for use. // - // * down: The network link is down. + // * + // down: The network link is down. // - // * deleting: The - // interconnect is being deleted. + // * deleting: The interconnect is being + // deleted. // - // * deleted: The interconnect is deleted. + // * deleted: The interconnect is deleted. // - // - // * unknown: The state of the interconnect is not available. + // * unknown: The state of the + // interconnect is not available. InterconnectState types.InterconnectState // Indicates whether jumbo frames (9001 MTU) are supported. diff --git a/service/directconnect/api_op_CreateLag.go b/service/directconnect/api_op_CreateLag.go index 2e83067ad52..2c80f2d3c1d 100644 --- a/service/directconnect/api_op_CreateLag.go +++ b/service/directconnect/api_op_CreateLag.go @@ -116,25 +116,25 @@ type CreateLagOutput struct { // The state of the LAG. The following are the possible values: // - // * requested: - // The initial state of a LAG. The LAG stays in the requested state until the - // Letter of Authorization (LOA) is available. + // * requested: The + // initial state of a LAG. The LAG stays in the requested state until the Letter of + // Authorization (LOA) is available. // - // * pending: The LAG has been - // approved and is being initialized. + // * pending: The LAG has been approved and is + // being initialized. // - // * available: The network link is - // established and the LAG is ready for use. + // * available: The network link is established and the LAG is + // ready for use. // - // * down: The network link is - // down. + // * down: The network link is down. // - // * deleting: The LAG is being deleted. - // - // * deleted: The LAG is + // * deleting: The LAG is being // deleted. // - // * unknown: The state of the LAG is not available. + // * deleted: The LAG is deleted. + // + // * unknown: The state of the LAG is not + // available. LagState types.LagState // The location of the LAG. diff --git a/service/directconnect/api_op_CreatePrivateVirtualInterface.go b/service/directconnect/api_op_CreatePrivateVirtualInterface.go index 35a39932dc7..05e7b5de3ce 100644 --- a/service/directconnect/api_op_CreatePrivateVirtualInterface.go +++ b/service/directconnect/api_op_CreatePrivateVirtualInterface.go @@ -127,40 +127,39 @@ type CreatePrivateVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directconnect/api_op_CreatePublicVirtualInterface.go b/service/directconnect/api_op_CreatePublicVirtualInterface.go index ec994d85a63..8921ae37509 100644 --- a/service/directconnect/api_op_CreatePublicVirtualInterface.go +++ b/service/directconnect/api_op_CreatePublicVirtualInterface.go @@ -120,40 +120,39 @@ type CreatePublicVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directconnect/api_op_DeleteConnection.go b/service/directconnect/api_op_DeleteConnection.go index 6faceddffca..e912ea826d2 100644 --- a/service/directconnect/api_op_DeleteConnection.go +++ b/service/directconnect/api_op_DeleteConnection.go @@ -59,35 +59,34 @@ type DeleteConnectionOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_DeleteInterconnect.go b/service/directconnect/api_op_DeleteInterconnect.go index c5fe7bb5f04..393b0b6aeab 100644 --- a/service/directconnect/api_op_DeleteInterconnect.go +++ b/service/directconnect/api_op_DeleteInterconnect.go @@ -40,26 +40,26 @@ type DeleteInterconnectOutput struct { // The state of the interconnect. The following are the possible values: // - // * + // * // requested: The initial state of an interconnect. The interconnect stays in the // requested state until the Letter of Authorization (LOA) is sent to the // customer. // - // * pending: The interconnect is approved, and is being - // initialized. + // * pending: The interconnect is approved, and is being initialized. // - // * available: The network link is up, and the interconnect is - // ready for use. + // * + // available: The network link is up, and the interconnect is ready for use. // - // * down: The network link is down. + // * + // down: The network link is down. // - // * deleting: The - // interconnect is being deleted. + // * deleting: The interconnect is being + // deleted. // - // * deleted: The interconnect is deleted. + // * deleted: The interconnect is deleted. // - // - // * unknown: The state of the interconnect is not available. + // * unknown: The state of the + // interconnect is not available. InterconnectState types.InterconnectState // Metadata pertaining to the operation's result. diff --git a/service/directconnect/api_op_DeleteLag.go b/service/directconnect/api_op_DeleteLag.go index 0dda486c5b6..0d020ed600e 100644 --- a/service/directconnect/api_op_DeleteLag.go +++ b/service/directconnect/api_op_DeleteLag.go @@ -70,25 +70,25 @@ type DeleteLagOutput struct { // The state of the LAG. The following are the possible values: // - // * requested: - // The initial state of a LAG. The LAG stays in the requested state until the - // Letter of Authorization (LOA) is available. + // * requested: The + // initial state of a LAG. The LAG stays in the requested state until the Letter of + // Authorization (LOA) is available. // - // * pending: The LAG has been - // approved and is being initialized. + // * pending: The LAG has been approved and is + // being initialized. // - // * available: The network link is - // established and the LAG is ready for use. + // * available: The network link is established and the LAG is + // ready for use. // - // * down: The network link is - // down. + // * down: The network link is down. // - // * deleting: The LAG is being deleted. - // - // * deleted: The LAG is + // * deleting: The LAG is being // deleted. // - // * unknown: The state of the LAG is not available. + // * deleted: The LAG is deleted. + // + // * unknown: The state of the LAG is not + // available. LagState types.LagState // The location of the LAG. diff --git a/service/directconnect/api_op_DeleteVirtualInterface.go b/service/directconnect/api_op_DeleteVirtualInterface.go index c9ccafadb58..81ff0a48f9a 100644 --- a/service/directconnect/api_op_DeleteVirtualInterface.go +++ b/service/directconnect/api_op_DeleteVirtualInterface.go @@ -39,40 +39,39 @@ type DeleteVirtualInterfaceOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // Metadata pertaining to the operation's result. diff --git a/service/directconnect/api_op_DisassociateConnectionFromLag.go b/service/directconnect/api_op_DisassociateConnectionFromLag.go index 64918b679f0..03f4f747043 100644 --- a/service/directconnect/api_op_DisassociateConnectionFromLag.go +++ b/service/directconnect/api_op_DisassociateConnectionFromLag.go @@ -70,35 +70,34 @@ type DisassociateConnectionFromLagOutput struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. + // * available: The network + // link is up and the connection is ready for use. // - // * down: The network link is down. + // * down: The network link is + // down. // - // * deleting: The - // connection is being deleted. + // * deleting: The connection is being deleted. // - // * deleted: The connection has been deleted. + // * deleted: The connection + // has been deleted. // + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. - // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState types.ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same diff --git a/service/directconnect/api_op_UpdateLag.go b/service/directconnect/api_op_UpdateLag.go index 8c0d641c13a..6dfb1062874 100644 --- a/service/directconnect/api_op_UpdateLag.go +++ b/service/directconnect/api_op_UpdateLag.go @@ -14,18 +14,18 @@ import ( // Updates the attributes of the specified link aggregation group (LAG). You can // update the following attributes: // -// * The name of the LAG. +// * The name of the LAG. // -// * The value -// for the minimum number of connections that must be operational for the LAG -// itself to be operational. +// * The value for the +// minimum number of connections that must be operational for the LAG itself to be +// operational. // -// When you create a LAG, the default value for the -// minimum number of operational connections is zero (0). If you update this value -// and the number of operational connections falls below the specified value, the -// LAG automatically goes down to avoid over-utilization of the remaining -// connections. Adjust this value with care, as it could force the LAG down if it -// is set higher than the current number of operational connections. +// When you create a LAG, the default value for the minimum number of +// operational connections is zero (0). If you update this value and the number of +// operational connections falls below the specified value, the LAG automatically +// goes down to avoid over-utilization of the remaining connections. Adjust this +// value with care, as it could force the LAG down if it is set higher than the +// current number of operational connections. func (c *Client) UpdateLag(ctx context.Context, params *UpdateLagInput, optFns ...func(*Options)) (*UpdateLagOutput, error) { if params == nil { params = &UpdateLagInput{} @@ -90,25 +90,25 @@ type UpdateLagOutput struct { // The state of the LAG. The following are the possible values: // - // * requested: - // The initial state of a LAG. The LAG stays in the requested state until the - // Letter of Authorization (LOA) is available. + // * requested: The + // initial state of a LAG. The LAG stays in the requested state until the Letter of + // Authorization (LOA) is available. // - // * pending: The LAG has been - // approved and is being initialized. + // * pending: The LAG has been approved and is + // being initialized. // - // * available: The network link is - // established and the LAG is ready for use. + // * available: The network link is established and the LAG is + // ready for use. // - // * down: The network link is - // down. + // * down: The network link is down. // - // * deleting: The LAG is being deleted. - // - // * deleted: The LAG is + // * deleting: The LAG is being // deleted. // - // * unknown: The state of the LAG is not available. + // * deleted: The LAG is deleted. + // + // * unknown: The state of the LAG is not + // available. LagState types.LagState // The location of the LAG. diff --git a/service/directconnect/api_op_UpdateVirtualInterfaceAttributes.go b/service/directconnect/api_op_UpdateVirtualInterfaceAttributes.go index 89af4cb8e72..9997a532a6a 100644 --- a/service/directconnect/api_op_UpdateVirtualInterfaceAttributes.go +++ b/service/directconnect/api_op_UpdateVirtualInterfaceAttributes.go @@ -121,40 +121,39 @@ type UpdateVirtualInterfaceAttributesOutput struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState types.VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directconnect/types/types.go b/service/directconnect/types/types.go index 8616ffdb3c8..55b894792ea 100644 --- a/service/directconnect/types/types.go +++ b/service/directconnect/types/types.go @@ -48,34 +48,33 @@ type BGPPeer struct { // The state of the BGP peer. The following are the possible values: // - // * - // verifying: The BGP peering addresses or ASN require validation before the BGP - // peer can be created. This state applies only to public virtual interfaces. + // * verifying: + // The BGP peering addresses or ASN require validation before the BGP peer can be + // created. This state applies only to public virtual interfaces. // + // * pending: The + // BGP peer is created, and remains in this state until it is ready to be + // established. // - // * pending: The BGP peer is created, and remains in this state until it is ready - // to be established. + // * available: The BGP peer is ready to be established. // - // * available: The BGP peer is ready to be established. + // * deleting: + // The BGP peer is being deleted. // - // - // * deleting: The BGP peer is being deleted. - // - // * deleted: The BGP peer is - // deleted and cannot be established. + // * deleted: The BGP peer is deleted and cannot be + // established. BgpPeerState BGPPeerState // The status of the BGP peer. The following are the possible values: // - // * up: - // The BGP peer is established. This state does not indicate the state of the - // routing function. Ensure that you are receiving routes over the BGP session. - // + // * up: The + // BGP peer is established. This state does not indicate the state of the routing + // function. Ensure that you are receiving routes over the BGP session. // - // * down: The BGP peer is down. + // * down: + // The BGP peer is down. // - // * unknown: The BGP peer status is not - // available. + // * unknown: The BGP peer status is not available. BgpStatus BGPStatus // The IP address assigned to the customer interface. @@ -102,35 +101,34 @@ type Connection struct { // The state of the connection. The following are the possible values: // - // * - // ordering: The initial state of a hosted connection provisioned on an - // interconnect. The connection stays in the ordering state until the owner of the - // hosted connection confirms or declines the connection order. + // * ordering: + // The initial state of a hosted connection provisioned on an interconnect. The + // connection stays in the ordering state until the owner of the hosted connection + // confirms or declines the connection order. // - // * requested: - // The initial state of a standard connection. The connection stays in the - // requested state until the Letter of Authorization (LOA) is sent to the - // customer. + // * requested: The initial state of a + // standard connection. The connection stays in the requested state until the + // Letter of Authorization (LOA) is sent to the customer. // - // * pending: The connection has been approved and is being - // initialized. + // * pending: The + // connection has been approved and is being initialized. // - // * available: The network link is up and the connection is - // ready for use. - // - // * down: The network link is down. + // * available: The network + // link is up and the connection is ready for use. // - // * deleting: The - // connection is being deleted. + // * down: The network link is + // down. // - // * deleted: The connection has been deleted. + // * deleting: The connection is being deleted. // + // * deleted: The connection + // has been deleted. // - // * rejected: A hosted connection in the ordering state enters the rejected state - // if it is deleted by the customer. + // * rejected: A hosted connection in the ordering state enters + // the rejected state if it is deleted by the customer. // - // * unknown: The state of the connection is - // not available. + // * unknown: The state of + // the connection is not available. ConnectionState ConnectionState // Indicates whether the connection supports a secondary BGP peer in the same @@ -185,17 +183,17 @@ type DirectConnectGateway struct { // The state of the Direct Connect gateway. The following are the possible // values: // - // * pending: The initial state after calling + // * pending: The initial state after calling // CreateDirectConnectGateway. // - // * available: The Direct Connect gateway is - // ready for use. + // * available: The Direct Connect gateway is ready + // for use. // - // * deleting: The initial state after calling + // * deleting: The initial state after calling // DeleteDirectConnectGateway. // - // * deleted: The Direct Connect gateway is - // deleted and cannot pass traffic. + // * deleted: The Direct Connect gateway is deleted + // and cannot pass traffic. DirectConnectGatewayState DirectConnectGatewayState // The ID of the AWS account that owns the Direct Connect gateway. @@ -220,21 +218,21 @@ type DirectConnectGatewayAssociation struct { // The state of the association. The following are the possible values: // - // * + // * // associating: The initial state after calling // CreateDirectConnectGatewayAssociation. // - // * associated: The Direct Connect - // gateway and virtual private gateway or transit gateway are successfully - // associated and ready to pass traffic. + // * associated: The Direct Connect gateway + // and virtual private gateway or transit gateway are successfully associated and + // ready to pass traffic. // - // * disassociating: The initial state - // after calling DeleteDirectConnectGatewayAssociation. + // * disassociating: The initial state after calling + // DeleteDirectConnectGatewayAssociation. // - // * disassociated: The - // virtual private gateway or transit gateway is disassociated from the Direct - // Connect gateway. Traffic flow between the Direct Connect gateway and virtual - // private gateway or transit gateway is stopped. + // * disassociated: The virtual private + // gateway or transit gateway is disassociated from the Direct Connect gateway. + // Traffic flow between the Direct Connect gateway and virtual private gateway or + // transit gateway is stopped. AssociationState DirectConnectGatewayAssociationState // The ID of the Direct Connect gateway. @@ -278,16 +276,16 @@ type DirectConnectGatewayAssociationProposal struct { // The state of the proposal. The following are possible values: // - // * accepted: - // The proposal has been accepted. The Direct Connect gateway association is - // available to use in this state. + // * accepted: The + // proposal has been accepted. The Direct Connect gateway association is available + // to use in this state. // - // * deleted: The proposal has been deleted by - // the owner that made the proposal. The Direct Connect gateway association cannot - // be used in this state. + // * deleted: The proposal has been deleted by the owner + // that made the proposal. The Direct Connect gateway association cannot be used in + // this state. // - // * requested: The proposal has been requested. The - // Direct Connect gateway association cannot be used in this state. + // * requested: The proposal has been requested. The Direct Connect + // gateway association cannot be used in this state. ProposalState DirectConnectGatewayAssociationProposalState // The Amazon VPC prefixes to advertise to the Direct Connect gateway. @@ -300,19 +298,19 @@ type DirectConnectGatewayAttachment struct { // The state of the attachment. The following are the possible values: // - // * + // * // attaching: The initial state after a virtual interface is created using the // Direct Connect gateway. // - // * attached: The Direct Connect gateway and virtual + // * attached: The Direct Connect gateway and virtual // interface are attached and ready to pass traffic. // - // * detaching: The initial + // * detaching: The initial // state after calling DeleteVirtualInterface. // - // * detached: The virtual - // interface is detached from the Direct Connect gateway. Traffic flow between the - // Direct Connect gateway and virtual interface is stopped. + // * detached: The virtual interface + // is detached from the Direct Connect gateway. Traffic flow between the Direct + // Connect gateway and virtual interface is stopped. AttachmentState DirectConnectGatewayAttachmentState // The type of attachment. @@ -358,26 +356,26 @@ type Interconnect struct { // The state of the interconnect. The following are the possible values: // - // * + // * // requested: The initial state of an interconnect. The interconnect stays in the // requested state until the Letter of Authorization (LOA) is sent to the // customer. // - // * pending: The interconnect is approved, and is being - // initialized. + // * pending: The interconnect is approved, and is being initialized. // - // * available: The network link is up, and the interconnect is - // ready for use. + // * + // available: The network link is up, and the interconnect is ready for use. // - // * down: The network link is down. + // * + // down: The network link is down. // - // * deleting: The - // interconnect is being deleted. - // - // * deleted: The interconnect is deleted. + // * deleting: The interconnect is being + // deleted. // + // * deleted: The interconnect is deleted. // - // * unknown: The state of the interconnect is not available. + // * unknown: The state of the + // interconnect is not available. InterconnectState InterconnectState // Indicates whether jumbo frames (9001 MTU) are supported. @@ -436,25 +434,25 @@ type Lag struct { // The state of the LAG. The following are the possible values: // - // * requested: - // The initial state of a LAG. The LAG stays in the requested state until the - // Letter of Authorization (LOA) is available. - // - // * pending: The LAG has been - // approved and is being initialized. + // * requested: The + // initial state of a LAG. The LAG stays in the requested state until the Letter of + // Authorization (LOA) is available. // - // * available: The network link is - // established and the LAG is ready for use. + // * pending: The LAG has been approved and is + // being initialized. // - // * down: The network link is - // down. + // * available: The network link is established and the LAG is + // ready for use. // - // * deleting: The LAG is being deleted. + // * down: The network link is down. // - // * deleted: The LAG is + // * deleting: The LAG is being // deleted. // - // * unknown: The state of the LAG is not available. + // * deleted: The LAG is deleted. + // + // * unknown: The state of the LAG is not + // available. LagState LagState // The location of the LAG. @@ -823,17 +821,17 @@ type VirtualGateway struct { // The state of the virtual private gateway. The following are the possible // values: // - // * pending: Initial state after creating the virtual private - // gateway. + // * pending: Initial state after creating the virtual private gateway. // - // * available: Ready for use by a private virtual interface. + // * + // available: Ready for use by a private virtual interface. // - // * - // deleting: Initial state after deleting the virtual private gateway. + // * deleting: Initial + // state after deleting the virtual private gateway. // - // * - // deleted: The virtual private gateway is deleted. The private virtual interface - // is unable to send traffic over this gateway. + // * deleted: The virtual + // private gateway is deleted. The private virtual interface is unable to send + // traffic over this gateway. VirtualGatewayState *string } @@ -912,40 +910,39 @@ type VirtualInterface struct { // The state of the virtual interface. The following are the possible values: // - // - // * confirming: The creation of the virtual interface is pending confirmation from + // * + // confirming: The creation of the virtual interface is pending confirmation from // the virtual interface owner. If the owner of the virtual interface is different // from the owner of the connection on which it is provisioned, then the virtual // interface will remain in this state until it is confirmed by the virtual // interface owner. // - // * verifying: This state only applies to public virtual + // * verifying: This state only applies to public virtual // interfaces. Each public virtual interface needs validation before the virtual // interface can be created. // - // * pending: A virtual interface is in this state - // from the time that it is created until the virtual interface is ready to forward + // * pending: A virtual interface is in this state from + // the time that it is created until the virtual interface is ready to forward // traffic. // - // * available: A virtual interface that is able to forward - // traffic. + // * available: A virtual interface that is able to forward traffic. // - // * down: A virtual interface that is BGP down. + // * + // down: A virtual interface that is BGP down. // - // * deleting: A - // virtual interface is in this state immediately after calling - // DeleteVirtualInterface until it can no longer forward traffic. + // * deleting: A virtual interface is + // in this state immediately after calling DeleteVirtualInterface until it can no + // longer forward traffic. // - // * deleted: A - // virtual interface that cannot forward traffic. + // * deleted: A virtual interface that cannot forward + // traffic. // - // * rejected: The virtual - // interface owner has declined creation of the virtual interface. If a virtual - // interface in the Confirming state is deleted by the virtual interface owner, the - // virtual interface enters the Rejected state. + // * rejected: The virtual interface owner has declined creation of the + // virtual interface. If a virtual interface in the Confirming state is deleted by + // the virtual interface owner, the virtual interface enters the Rejected state. // - // * unknown: The state of the - // virtual interface is not available. + // * + // unknown: The state of the virtual interface is not available. VirtualInterfaceState VirtualInterfaceState // The type of virtual interface. The possible values are private and public. diff --git a/service/directoryservice/api_op_AddIpRoutes.go b/service/directoryservice/api_op_AddIpRoutes.go index 4289b63c348..f4b5f84b77c 100644 --- a/service/directoryservice/api_op_AddIpRoutes.go +++ b/service/directoryservice/api_op_AddIpRoutes.go @@ -53,64 +53,63 @@ type AddIpRoutesInput struct { // that has the description: "AWS created security group for directory ID directory // controllers." Following are the new rules: Inbound: // - // * Type: Custom UDP - // Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0 + // * Type: Custom UDP Rule, + // Protocol: UDP, Range: 88, Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, - // Protocol: UDP, Range: 123, Source: 0.0.0.0/0 + // * Type: Custom UDP Rule, Protocol: + // UDP, Range: 123, Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, - // Protocol: UDP, Range: 138, Source: 0.0.0.0/0 + // * Type: Custom UDP Rule, Protocol: UDP, + // Range: 138, Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, - // Protocol: UDP, Range: 389, Source: 0.0.0.0/0 + // * Type: Custom UDP Rule, Protocol: UDP, Range: + // 389, Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, - // Protocol: UDP, Range: 464, Source: 0.0.0.0/0 + // * Type: Custom UDP Rule, Protocol: UDP, Range: 464, + // Source: 0.0.0.0/0 // - // * Type: Custom UDP Rule, - // Protocol: UDP, Range: 445, Source: 0.0.0.0/0 + // * Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP Rule, - // Protocol: TCP, Range: 88, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP Rule, - // Protocol: TCP, Range: 135, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP Rule, - // Protocol: TCP, Range: 445, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP Rule, - // Protocol: TCP, Range: 464, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP Rule, - // Protocol: TCP, Range: 636, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP Rule, - // Protocol: TCP, Range: 1024-65535, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: + // 0.0.0.0/0 // - // * Type: Custom TCP - // Rule, Protocol: TCP, Range: 3268-33269, Source: 0.0.0.0/0 + // * Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: + // 0.0.0.0/0 // - // * Type: DNS - // (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0 + // * Type: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0 // - // * Type: DNS (TCP), - // Protocol: TCP, Range: 53, Source: 0.0.0.0/0 + // * + // Type: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0 // - // * Type: LDAP, Protocol: TCP, - // Range: 389, Source: 0.0.0.0/0 + // * Type: LDAP, + // Protocol: TCP, Range: 389, Source: 0.0.0.0/0 // - // * Type: All ICMP, Protocol: All, Range: N/A, - // Source: 0.0.0.0/0 + // * Type: All ICMP, Protocol: All, + // Range: N/A, Source: 0.0.0.0/0 // // Outbound: // - // * Type: All traffic, Protocol: All, Range: - // All, Destination: 0.0.0.0/0 + // * Type: All traffic, Protocol: All, + // Range: All, Destination: 0.0.0.0/0 // - // These security rules impact an internal network - // interface that is not exposed publicly. + // These security rules impact an internal + // network interface that is not exposed publicly. UpdateSecurityGroupForDirectoryControllers *bool } diff --git a/service/directoryservice/api_op_ResetUserPassword.go b/service/directoryservice/api_op_ResetUserPassword.go index 203621543fe..06d7b5f5e24 100644 --- a/service/directoryservice/api_op_ResetUserPassword.go +++ b/service/directoryservice/api_op_ResetUserPassword.go @@ -14,16 +14,15 @@ import ( // directory. You can reset the password for any user in your directory with the // following exceptions: // -// * For Simple AD, you cannot reset the password for -// any user that is a member of either the Domain Admins or Enterprise Admins group +// * For Simple AD, you cannot reset the password for any +// user that is a member of either the Domain Admins or Enterprise Admins group // except for the administrator user. // -// * For AWS Managed Microsoft AD, you can -// only reset the password for a user that is in an OU based off of the NetBIOS -// name that you typed when you created your directory. For example, you cannot -// reset the password for a user in the AWS Reserved OU. For more information about -// the OU structure for an AWS Managed Microsoft AD directory, see What Gets -// Created +// * For AWS Managed Microsoft AD, you can only +// reset the password for a user that is in an OU based off of the NetBIOS name +// that you typed when you created your directory. For example, you cannot reset +// the password for a user in the AWS Reserved OU. For more information about the +// OU structure for an AWS Managed Microsoft AD directory, see What Gets Created // (https://docs.aws.amazon.com/directoryservice/latest/admin-guide/ms_ad_getting_started_what_gets_created.html) // in the AWS Directory Service Administration Guide. func (c *Client) ResetUserPassword(ctx context.Context, params *ResetUserPasswordInput, optFns ...func(*Options)) (*ResetUserPasswordOutput, error) { diff --git a/service/directoryservice/types/enums.go b/service/directoryservice/types/enums.go index e6a849bd6d6..f69f3cafda9 100644 --- a/service/directoryservice/types/enums.go +++ b/service/directoryservice/types/enums.go @@ -6,12 +6,12 @@ type CertificateState string // Enum values for CertificateState const ( - CertificateStateRegistering CertificateState = "Registering" - CertificateStateRegistered CertificateState = "Registered" - CertificateStateRegister_failed CertificateState = "RegisterFailed" - CertificateStateDeregistering CertificateState = "Deregistering" - CertificateStateDeregistered CertificateState = "Deregistered" - CertificateStateDeregister_failed CertificateState = "DeregisterFailed" + CertificateStateRegistering CertificateState = "Registering" + CertificateStateRegistered CertificateState = "Registered" + CertificateStateRegisterFailed CertificateState = "RegisterFailed" + CertificateStateDeregistering CertificateState = "Deregistering" + CertificateStateDeregistered CertificateState = "Deregistered" + CertificateStateDeregisterFailed CertificateState = "DeregisterFailed" ) // Values returns all known values for CertificateState. Note that this can be @@ -104,10 +104,10 @@ type DirectoryType string // Enum values for DirectoryType const ( - DirectoryTypeSimple_ad DirectoryType = "SimpleAD" - DirectoryTypeAd_connector DirectoryType = "ADConnector" - DirectoryTypeMicrosoft_ad DirectoryType = "MicrosoftAD" - DirectoryTypeShared_microsoft_ad DirectoryType = "SharedMicrosoftAD" + DirectoryTypeSimpleAd DirectoryType = "SimpleAD" + DirectoryTypeAdConnector DirectoryType = "ADConnector" + DirectoryTypeMicrosoftAd DirectoryType = "MicrosoftAD" + DirectoryTypeSharedMicrosoftAd DirectoryType = "SharedMicrosoftAD" ) // Values returns all known values for DirectoryType. Note that this can be @@ -154,12 +154,12 @@ type IpRouteStatusMsg string // Enum values for IpRouteStatusMsg const ( - IpRouteStatusMsgAdding IpRouteStatusMsg = "Adding" - IpRouteStatusMsgAdded IpRouteStatusMsg = "Added" - IpRouteStatusMsgRemoving IpRouteStatusMsg = "Removing" - IpRouteStatusMsgRemoved IpRouteStatusMsg = "Removed" - IpRouteStatusMsgAdd_failed IpRouteStatusMsg = "AddFailed" - IpRouteStatusMsgRemove_failed IpRouteStatusMsg = "RemoveFailed" + IpRouteStatusMsgAdding IpRouteStatusMsg = "Adding" + IpRouteStatusMsgAdded IpRouteStatusMsg = "Added" + IpRouteStatusMsgRemoving IpRouteStatusMsg = "Removing" + IpRouteStatusMsgRemoved IpRouteStatusMsg = "Removed" + IpRouteStatusMsgAddFailed IpRouteStatusMsg = "AddFailed" + IpRouteStatusMsgRemoveFailed IpRouteStatusMsg = "RemoveFailed" ) // Values returns all known values for IpRouteStatusMsg. Note that this can be @@ -180,10 +180,10 @@ type LDAPSStatus string // Enum values for LDAPSStatus const ( - LDAPSStatusEnabling LDAPSStatus = "Enabling" - LDAPSStatusEnabled LDAPSStatus = "Enabled" - LDAPSStatusEnable_failed LDAPSStatus = "EnableFailed" - LDAPSStatusDisabled LDAPSStatus = "Disabled" + LDAPSStatusEnabling LDAPSStatus = "Enabling" + LDAPSStatusEnabled LDAPSStatus = "Enabled" + LDAPSStatusEnableFailed LDAPSStatus = "EnableFailed" + LDAPSStatusDisabled LDAPSStatus = "Disabled" ) // Values returns all known values for LDAPSStatus. Note that this can be expanded @@ -276,15 +276,15 @@ type SchemaExtensionStatus string // Enum values for SchemaExtensionStatus const ( - SchemaExtensionStatusInitializing SchemaExtensionStatus = "Initializing" - SchemaExtensionStatusCreating_snapshot SchemaExtensionStatus = "CreatingSnapshot" - SchemaExtensionStatusUpdating_schema SchemaExtensionStatus = "UpdatingSchema" - SchemaExtensionStatusReplicating SchemaExtensionStatus = "Replicating" - SchemaExtensionStatusCancel_in_progress SchemaExtensionStatus = "CancelInProgress" - SchemaExtensionStatusRollback_in_progress SchemaExtensionStatus = "RollbackInProgress" - SchemaExtensionStatusCancelled SchemaExtensionStatus = "Cancelled" - SchemaExtensionStatusFailed SchemaExtensionStatus = "Failed" - SchemaExtensionStatusCompleted SchemaExtensionStatus = "Completed" + SchemaExtensionStatusInitializing SchemaExtensionStatus = "Initializing" + SchemaExtensionStatusCreatingSnapshot SchemaExtensionStatus = "CreatingSnapshot" + SchemaExtensionStatusUpdatingSchema SchemaExtensionStatus = "UpdatingSchema" + SchemaExtensionStatusReplicating SchemaExtensionStatus = "Replicating" + SchemaExtensionStatusCancelInProgress SchemaExtensionStatus = "CancelInProgress" + SchemaExtensionStatusRollbackInProgress SchemaExtensionStatus = "RollbackInProgress" + SchemaExtensionStatusCancelled SchemaExtensionStatus = "Cancelled" + SchemaExtensionStatusFailed SchemaExtensionStatus = "Failed" + SchemaExtensionStatusCompleted SchemaExtensionStatus = "Completed" ) // Values returns all known values for SchemaExtensionStatus. Note that this can be @@ -344,15 +344,15 @@ type ShareStatus string // Enum values for ShareStatus const ( - ShareStatusShared ShareStatus = "Shared" - ShareStatusPending_acceptance ShareStatus = "PendingAcceptance" - ShareStatusRejected ShareStatus = "Rejected" - ShareStatusRejecting ShareStatus = "Rejecting" - ShareStatusReject_failed ShareStatus = "RejectFailed" - ShareStatusSharing ShareStatus = "Sharing" - ShareStatusShare_failed ShareStatus = "ShareFailed" - ShareStatusDeleted ShareStatus = "Deleted" - ShareStatusDeleting ShareStatus = "Deleting" + ShareStatusShared ShareStatus = "Shared" + ShareStatusPendingAcceptance ShareStatus = "PendingAcceptance" + ShareStatusRejected ShareStatus = "Rejected" + ShareStatusRejecting ShareStatus = "Rejecting" + ShareStatusRejectFailed ShareStatus = "RejectFailed" + ShareStatusSharing ShareStatus = "Sharing" + ShareStatusShareFailed ShareStatus = "ShareFailed" + ShareStatusDeleted ShareStatus = "Deleted" + ShareStatusDeleting ShareStatus = "Deleting" ) // Values returns all known values for ShareStatus. Note that this can be expanded @@ -430,10 +430,10 @@ type TopicStatus string // Enum values for TopicStatus const ( - TopicStatusRegistered TopicStatus = "Registered" - TopicStatusTopic_not_found TopicStatus = "Topic not found" - TopicStatusFailed TopicStatus = "Failed" - TopicStatusDeleted TopicStatus = "Deleted" + TopicStatusRegistered TopicStatus = "Registered" + TopicStatusTopicNotFound TopicStatus = "Topic not found" + TopicStatusFailed TopicStatus = "Failed" + TopicStatusDeleted TopicStatus = "Deleted" ) // Values returns all known values for TopicStatus. Note that this can be expanded @@ -452,9 +452,9 @@ type TrustDirection string // Enum values for TrustDirection const ( - TrustDirectionOne_way_outgoing TrustDirection = "One-Way: Outgoing" - TrustDirectionOne_way_incoming TrustDirection = "One-Way: Incoming" - TrustDirectionTwo_way TrustDirection = "Two-Way" + TrustDirectionOneWayOutgoing TrustDirection = "One-Way: Outgoing" + TrustDirectionOneWayIncoming TrustDirection = "One-Way: Incoming" + TrustDirectionTwoWay TrustDirection = "Two-Way" ) // Values returns all known values for TrustDirection. Note that this can be @@ -472,17 +472,17 @@ type TrustState string // Enum values for TrustState const ( - TrustStateCreating TrustState = "Creating" - TrustStateCreated TrustState = "Created" - TrustStateVerifying TrustState = "Verifying" - TrustStateVerify_failed TrustState = "VerifyFailed" - TrustStateVerified TrustState = "Verified" - TrustStateUpdating TrustState = "Updating" - TrustStateUpdate_failed TrustState = "UpdateFailed" - TrustStateUpdated TrustState = "Updated" - TrustStateDeleting TrustState = "Deleting" - TrustStateDeleted TrustState = "Deleted" - TrustStateFailed TrustState = "Failed" + TrustStateCreating TrustState = "Creating" + TrustStateCreated TrustState = "Created" + TrustStateVerifying TrustState = "Verifying" + TrustStateVerifyFailed TrustState = "VerifyFailed" + TrustStateVerified TrustState = "Verified" + TrustStateUpdating TrustState = "Updating" + TrustStateUpdateFailed TrustState = "UpdateFailed" + TrustStateUpdated TrustState = "Updated" + TrustStateDeleting TrustState = "Deleting" + TrustStateDeleted TrustState = "Deleted" + TrustStateFailed TrustState = "Failed" ) // Values returns all known values for TrustState. Note that this can be expanded diff --git a/service/directoryservice/types/types.go b/service/directoryservice/types/types.go index d087e88294f..adfcfbb0ba2 100644 --- a/service/directoryservice/types/types.go +++ b/service/directoryservice/types/types.go @@ -100,13 +100,12 @@ type DirectoryConnectSettings struct { // The user name of an account in the on-premises directory that is used to connect // to the directory. This account must have the following permissions: // - // * Read + // * Read // users and groups // - // * Create computer objects + // * Create computer objects // - // * Join computers to the - // domain + // * Join computers to the domain // // This member is required. CustomerUserName *string diff --git a/service/dlm/types/enums.go b/service/dlm/types/enums.go index 93fa31723b5..774aebe99a6 100644 --- a/service/dlm/types/enums.go +++ b/service/dlm/types/enums.go @@ -42,7 +42,7 @@ type PolicyTypeValues string // Enum values for PolicyTypeValues const ( - PolicyTypeValuesEbs_snapshot_management PolicyTypeValues = "EBS_SNAPSHOT_MANAGEMENT" + PolicyTypeValuesEbsSnapshotManagement PolicyTypeValues = "EBS_SNAPSHOT_MANAGEMENT" ) // Values returns all known values for PolicyTypeValues. Note that this can be diff --git a/service/docdb/api_op_ApplyPendingMaintenanceAction.go b/service/docdb/api_op_ApplyPendingMaintenanceAction.go index 561ab82135a..09d8fb5ebc5 100644 --- a/service/docdb/api_op_ApplyPendingMaintenanceAction.go +++ b/service/docdb/api_op_ApplyPendingMaintenanceAction.go @@ -40,15 +40,14 @@ type ApplyPendingMaintenanceActionInput struct { // A value that specifies the type of opt-in request or undoes an opt-in request. // An opt-in request of type immediate can't be undone. Valid values: // - // * - // immediate - Apply the maintenance action immediately. + // * immediate + // - Apply the maintenance action immediately. // - // * next-maintenance - - // Apply the maintenance action during the next maintenance window for the - // resource. + // * next-maintenance - Apply the + // maintenance action during the next maintenance window for the resource. // - // * undo-opt-in - Cancel any existing next-maintenance opt-in - // requests. + // * + // undo-opt-in - Cancel any existing next-maintenance opt-in requests. // // This member is required. OptInType *string diff --git a/service/docdb/api_op_CopyDBClusterParameterGroup.go b/service/docdb/api_op_CopyDBClusterParameterGroup.go index 8de79bc3bc1..3d798795c7a 100644 --- a/service/docdb/api_op_CopyDBClusterParameterGroup.go +++ b/service/docdb/api_op_CopyDBClusterParameterGroup.go @@ -33,16 +33,15 @@ type CopyDBClusterParameterGroupInput struct { // The identifier or Amazon Resource Name (ARN) for the source cluster parameter // group. Constraints: // - // * Must specify a valid cluster parameter group. + // * Must specify a valid cluster parameter group. // - // * - // If the source cluster parameter group is in the same AWS Region as the copy, - // specify a valid parameter group identifier; for example, - // my-db-cluster-param-group, or a valid ARN. + // * If the + // source cluster parameter group is in the same AWS Region as the copy, specify a + // valid parameter group identifier; for example, my-db-cluster-param-group, or a + // valid ARN. // - // * If the source parameter group - // is in a different AWS Region than the copy, specify a valid cluster parameter - // group ARN; for example, + // * If the source parameter group is in a different AWS Region than + // the copy, specify a valid cluster parameter group ARN; for example, // arn:aws:rds:us-east-1:123456789012:sample-cluster:sample-parameter-group. // // This member is required. @@ -55,19 +54,18 @@ type CopyDBClusterParameterGroupInput struct { // The identifier for the copied cluster parameter group. Constraints: // - // * - // Cannot be null, empty, or blank. + // * Cannot be + // null, empty, or blank. // - // * Must contain from 1 to 255 letters, - // numbers, or hyphens. + // * Must contain from 1 to 255 letters, numbers, or + // hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * Cannot - // end with a hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // - // Example: - // my-cluster-param-group1 + // Example: my-cluster-param-group1 // // This member is required. TargetDBClusterParameterGroupIdentifier *string diff --git a/service/docdb/api_op_CopyDBClusterSnapshot.go b/service/docdb/api_op_CopyDBClusterSnapshot.go index ec9f8b5bc37..a16e899b49c 100644 --- a/service/docdb/api_op_CopyDBClusterSnapshot.go +++ b/service/docdb/api_op_CopyDBClusterSnapshot.go @@ -39,17 +39,17 @@ type CopyDBClusterSnapshotInput struct { // The identifier of the cluster snapshot to copy. This parameter is not case // sensitive. Constraints: // - // * Must specify a valid system snapshot in the - // available state. + // * Must specify a valid system snapshot in the available + // state. // - // * If the source snapshot is in the same AWS Region as the - // copy, specify a valid snapshot identifier. + // * If the source snapshot is in the same AWS Region as the copy, specify + // a valid snapshot identifier. // - // * If the source snapshot is in a - // different AWS Region than the copy, specify a valid cluster snapshot - // ARN. + // * If the source snapshot is in a different AWS + // Region than the copy, specify a valid cluster snapshot ARN. // - // Example: my-cluster-snapshot1 + // Example: + // my-cluster-snapshot1 // // This member is required. SourceDBClusterSnapshotIdentifier *string @@ -57,13 +57,13 @@ type CopyDBClusterSnapshotInput struct { // The identifier of the new cluster snapshot to create from the source cluster // snapshot. This parameter is not case sensitive. Constraints: // - // * Must contain + // * Must contain // from 1 to 63 letters, numbers, or hyphens. // - // * The first character must be a + // * The first character must be a // letter. // - // * Cannot end with a hyphen or contain two consecutive + // * Cannot end with a hyphen or contain two consecutive // hyphens. // // Example: my-cluster-snapshot2 @@ -103,10 +103,10 @@ type CopyDBClusterSnapshotInput struct { // source AWS Region that contains the cluster snapshot to be copied. The presigned // URL request must contain the following parameter values: // - // * SourceRegion - - // The ID of the region that contains the snapshot to be copied. + // * SourceRegion - The + // ID of the region that contains the snapshot to be copied. // - // * + // * // SourceDBClusterSnapshotIdentifier - The identifier for the the encrypted cluster // snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) // format for the source AWS Region. For example, if you are copying an encrypted @@ -114,7 +114,7 @@ type CopyDBClusterSnapshotInput struct { // SourceDBClusterSnapshotIdentifier looks something like the following: // arn:aws:rds:us-east-1:12345678012:sample-cluster:sample-cluster-snapshot. // - // * + // * // TargetDBClusterSnapshotIdentifier - The identifier for the new cluster snapshot // to be created. This parameter isn't case sensitive. PreSignedUrl *string diff --git a/service/docdb/api_op_CreateDBCluster.go b/service/docdb/api_op_CreateDBCluster.go index c40a191f6a9..9bcf94e0470 100644 --- a/service/docdb/api_op_CreateDBCluster.go +++ b/service/docdb/api_op_CreateDBCluster.go @@ -33,13 +33,13 @@ type CreateDBClusterInput struct { // The cluster identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * The + // first character must be a letter. // - // * The first character must be a letter. - // - // * Cannot end with a hyphen or - // contain two consecutive hyphens. + // * Cannot end with a hyphen or contain two + // consecutive hyphens. // // Example: my-cluster // @@ -60,13 +60,13 @@ type CreateDBClusterInput struct { // The name of the master user for the cluster. Constraints: // - // * Must be from 1 - // to 63 letters or numbers. + // * Must be from 1 to + // 63 letters or numbers. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * - // Cannot be a reserved word for the chosen database engine. + // * Cannot be a + // reserved word for the chosen database engine. // // This member is required. MasterUsername *string @@ -78,7 +78,7 @@ type CreateDBClusterInput struct { // The number of days for which automated backups are retained. You must specify a // minimum value of 1. Default: 1 Constraints: // - // * Must be a value from 1 to 35. + // * Must be a value from 1 to 35. BackupRetentionPeriod *int32 // The name of the cluster parameter group to associate with this cluster. @@ -114,7 +114,7 @@ type CreateDBClusterInput struct { // instead of the ARN for the AWS KMS encryption key. If an encryption key is not // specified in KmsKeyId: // - // * If the StorageEncrypted parameter is true, Amazon + // * If the StorageEncrypted parameter is true, Amazon // DocumentDB uses your default encryption key. // // AWS KMS creates the default @@ -133,15 +133,15 @@ type CreateDBClusterInput struct { // 30-minute window selected at random from an 8-hour block of time for each AWS // Region. Constraints: // - // * Must be in the format hh24:mi-hh24:mi. + // * Must be in the format hh24:mi-hh24:mi. // - // * Must - // be in Universal Coordinated Time (UTC). + // * Must be in + // Universal Coordinated Time (UTC). // - // * Must not conflict with the - // preferred maintenance window. + // * Must not conflict with the preferred + // maintenance window. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/docdb/api_op_CreateDBClusterParameterGroup.go b/service/docdb/api_op_CreateDBClusterParameterGroup.go index 65599962db6..6e9d0655acd 100644 --- a/service/docdb/api_op_CreateDBClusterParameterGroup.go +++ b/service/docdb/api_op_CreateDBClusterParameterGroup.go @@ -46,11 +46,11 @@ type CreateDBClusterParameterGroupInput struct { // The name of the cluster parameter group. Constraints: // - // * Must not match the - // name of an existing DBClusterParameterGroup. + // * Must not match the name + // of an existing DBClusterParameterGroup. // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. // // This member is required. DBClusterParameterGroupName *string diff --git a/service/docdb/api_op_CreateDBClusterSnapshot.go b/service/docdb/api_op_CreateDBClusterSnapshot.go index b4b6b58e30e..d56af78ac99 100644 --- a/service/docdb/api_op_CreateDBClusterSnapshot.go +++ b/service/docdb/api_op_CreateDBClusterSnapshot.go @@ -33,7 +33,7 @@ type CreateDBClusterSnapshotInput struct { // The identifier of the cluster to create a snapshot for. This parameter is not // case sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. // // Example: my-cluster @@ -44,13 +44,13 @@ type CreateDBClusterSnapshotInput struct { // The identifier of the cluster snapshot. This parameter is stored as a lowercase // string. Constraints: // - // * Must contain from 1 to 63 letters, numbers, or + // * Must contain from 1 to 63 letters, numbers, or // hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * Cannot end with a - // hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // // Example: my-cluster-snapshot1 // diff --git a/service/docdb/api_op_CreateDBInstance.go b/service/docdb/api_op_CreateDBInstance.go index 41496d4fd7b..f34693a6df1 100644 --- a/service/docdb/api_op_CreateDBInstance.go +++ b/service/docdb/api_op_CreateDBInstance.go @@ -43,13 +43,13 @@ type CreateDBInstanceInput struct { // The instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * The + // first character must be a letter. // - // * The first character must be a letter. - // - // * Cannot end with a hyphen or - // contain two consecutive hyphens. + // * Cannot end with a hyphen or contain two + // consecutive hyphens. // // Example: mydbinstance // diff --git a/service/docdb/api_op_DeleteDBCluster.go b/service/docdb/api_op_DeleteDBCluster.go index cb21c1c91de..09933b241bd 100644 --- a/service/docdb/api_op_DeleteDBCluster.go +++ b/service/docdb/api_op_DeleteDBCluster.go @@ -35,7 +35,7 @@ type DeleteDBClusterInput struct { // The cluster identifier for the cluster to be deleted. This parameter isn't case // sensitive. Constraints: // - // * Must match an existing DBClusterIdentifier. + // * Must match an existing DBClusterIdentifier. // // This member is required. DBClusterIdentifier *string @@ -44,14 +44,13 @@ type DeleteDBClusterInput struct { // SkipFinalSnapshot is set to false. Specifying this parameter and also setting // the SkipFinalShapshot parameter to true results in an error. Constraints: // - // * + // * // Must be from 1 to 255 letters, numbers, or hyphens. // - // * The first character - // must be a letter. + // * The first character must + // be a letter. // - // * Cannot end with a hyphen or contain two consecutive - // hyphens. + // * Cannot end with a hyphen or contain two consecutive hyphens. FinalDBSnapshotIdentifier *string // Determines whether a final cluster snapshot is created before the cluster is diff --git a/service/docdb/api_op_DeleteDBClusterParameterGroup.go b/service/docdb/api_op_DeleteDBClusterParameterGroup.go index c7abc1dccca..9ede323fe91 100644 --- a/service/docdb/api_op_DeleteDBClusterParameterGroup.go +++ b/service/docdb/api_op_DeleteDBClusterParameterGroup.go @@ -32,13 +32,13 @@ type DeleteDBClusterParameterGroupInput struct { // The name of the cluster parameter group. Constraints: // - // * Must be the name of - // an existing cluster parameter group. + // * Must be the name of an + // existing cluster parameter group. // - // * You can't delete a default cluster + // * You can't delete a default cluster // parameter group. // - // * Cannot be associated with any clusters. + // * Cannot be associated with any clusters. // // This member is required. DBClusterParameterGroupName *string diff --git a/service/docdb/api_op_DeleteDBInstance.go b/service/docdb/api_op_DeleteDBInstance.go index 4c8a4688e9e..2a73b59f72c 100644 --- a/service/docdb/api_op_DeleteDBInstance.go +++ b/service/docdb/api_op_DeleteDBInstance.go @@ -33,7 +33,7 @@ type DeleteDBInstanceInput struct { // The instance identifier for the instance to be deleted. This parameter isn't // case sensitive. Constraints: // - // * Must match the name of an existing instance. + // * Must match the name of an existing instance. // // This member is required. DBInstanceIdentifier *string diff --git a/service/docdb/api_op_DescribeCertificates.go b/service/docdb/api_op_DescribeCertificates.go index 29e60e9a405..77464626711 100644 --- a/service/docdb/api_op_DescribeCertificates.go +++ b/service/docdb/api_op_DescribeCertificates.go @@ -35,8 +35,7 @@ type DescribeCertificatesInput struct { // omitted, a list of up to MaxRecords certificates is returned. This parameter is // not case sensitive. Constraints // - // * Must match an existing - // CertificateIdentifier. + // * Must match an existing CertificateIdentifier. CertificateIdentifier *string // This parameter is not currently supported. @@ -52,9 +51,9 @@ type DescribeCertificatesInput struct { // included in the response so that the remaining results can be retrieved. // Default: 100 Constraints: // - // * Minimum: 20 + // * Minimum: 20 // - // * Maximum: 100 + // * Maximum: 100 MaxRecords *int32 } diff --git a/service/docdb/api_op_DescribeDBClusterParameterGroups.go b/service/docdb/api_op_DescribeDBClusterParameterGroups.go index 1b95a8fb40c..91c9741762a 100644 --- a/service/docdb/api_op_DescribeDBClusterParameterGroups.go +++ b/service/docdb/api_op_DescribeDBClusterParameterGroups.go @@ -35,7 +35,7 @@ type DescribeDBClusterParameterGroupsInput struct { // The name of a specific cluster parameter group to return details for. // Constraints: // - // * If provided, must match the name of an existing + // * If provided, must match the name of an existing // DBClusterParameterGroup. DBClusterParameterGroupName *string diff --git a/service/docdb/api_op_DescribeDBClusterParameters.go b/service/docdb/api_op_DescribeDBClusterParameters.go index cf7318d557b..0f18d665a76 100644 --- a/service/docdb/api_op_DescribeDBClusterParameters.go +++ b/service/docdb/api_op_DescribeDBClusterParameters.go @@ -33,7 +33,7 @@ type DescribeDBClusterParametersInput struct { // The name of a specific cluster parameter group to return parameter details for. // Constraints: // - // * If provided, must match the name of an existing + // * If provided, must match the name of an existing // DBClusterParameterGroup. // // This member is required. diff --git a/service/docdb/api_op_DescribeDBClusterSnapshots.go b/service/docdb/api_op_DescribeDBClusterSnapshots.go index 82bf426adcc..7df4f52859c 100644 --- a/service/docdb/api_op_DescribeDBClusterSnapshots.go +++ b/service/docdb/api_op_DescribeDBClusterSnapshots.go @@ -35,7 +35,7 @@ type DescribeDBClusterSnapshotsInput struct { // parameter can't be used with the DBClusterSnapshotIdentifier parameter. This // parameter is not case sensitive. Constraints: // - // * If provided, must match the + // * If provided, must match the // identifier of an existing DBCluster. DBClusterIdentifier *string @@ -43,11 +43,11 @@ type DescribeDBClusterSnapshotsInput struct { // with the DBClusterIdentifier parameter. This value is stored as a lowercase // string. Constraints: // - // * If provided, must match the identifier of an - // existing DBClusterSnapshot. + // * If provided, must match the identifier of an existing + // DBClusterSnapshot. // - // * If this identifier is for an automated - // snapshot, the SnapshotType parameter must also be specified. + // * If this identifier is for an automated snapshot, the + // SnapshotType parameter must also be specified. DBClusterSnapshotIdentifier *string // This parameter is not currently supported. @@ -77,27 +77,27 @@ type DescribeDBClusterSnapshotsInput struct { // The type of cluster snapshots to be returned. You can specify one of the // following values: // - // * automated - Return all cluster snapshots that Amazon + // * automated - Return all cluster snapshots that Amazon // DocumentDB has automatically created for your AWS account. // - // * manual - - // Return all cluster snapshots that you have manually created for your AWS - // account. + // * manual - Return + // all cluster snapshots that you have manually created for your AWS account. // - // * shared - Return all manual cluster snapshots that have been - // shared to your AWS account. + // * + // shared - Return all manual cluster snapshots that have been shared to your AWS + // account. // - // * public - Return all cluster snapshots that - // have been marked as public. + // * public - Return all cluster snapshots that have been marked as + // public. // - // If you don't specify a SnapshotType value, then - // both automated and manual cluster snapshots are returned. You can include shared - // cluster snapshots with these results by setting the IncludeShared parameter to - // true. You can include public cluster snapshots with these results by setting the - // IncludePublic parameter to true. The IncludeShared and IncludePublic parameters - // don't apply for SnapshotType values of manual or automated. The IncludePublic - // parameter doesn't apply when SnapshotType is set to shared. The IncludeShared - // parameter doesn't apply when SnapshotType is set to public. + // If you don't specify a SnapshotType value, then both automated and + // manual cluster snapshots are returned. You can include shared cluster snapshots + // with these results by setting the IncludeShared parameter to true. You can + // include public cluster snapshots with these results by setting the IncludePublic + // parameter to true. The IncludeShared and IncludePublic parameters don't apply + // for SnapshotType values of manual or automated. The IncludePublic parameter + // doesn't apply when SnapshotType is set to shared. The IncludeShared parameter + // doesn't apply when SnapshotType is set to public. SnapshotType *string } diff --git a/service/docdb/api_op_DescribeDBClusters.go b/service/docdb/api_op_DescribeDBClusters.go index ecbad6ff43b..b38b74c61c3 100644 --- a/service/docdb/api_op_DescribeDBClusters.go +++ b/service/docdb/api_op_DescribeDBClusters.go @@ -39,14 +39,14 @@ type DescribeDBClustersInput struct { // information from only the specific cluster is returned. This parameter isn't // case sensitive. Constraints: // - // * If provided, must match an existing + // * If provided, must match an existing // DBClusterIdentifier. DBClusterIdentifier *string // A filter that specifies one or more clusters to describe. Supported filters: // - // - // * db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names + // * + // db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names // (ARNs). The results list only includes information about the clusters identified // by these ARNs. Filters []*types.Filter diff --git a/service/docdb/api_op_DescribeDBEngineVersions.go b/service/docdb/api_op_DescribeDBEngineVersions.go index e1fda3c4439..3f3458d8ffa 100644 --- a/service/docdb/api_op_DescribeDBEngineVersions.go +++ b/service/docdb/api_op_DescribeDBEngineVersions.go @@ -33,7 +33,7 @@ type DescribeDBEngineVersionsInput struct { // The name of a specific parameter group family to return details for. // Constraints: // - // * If provided, must match an existing DBParameterGroupFamily. + // * If provided, must match an existing DBParameterGroupFamily. DBParameterGroupFamily *string // Indicates that only the default version of the specified engine or engine and diff --git a/service/docdb/api_op_DescribeDBInstances.go b/service/docdb/api_op_DescribeDBInstances.go index 0971ff830a1..175552a5d80 100644 --- a/service/docdb/api_op_DescribeDBInstances.go +++ b/service/docdb/api_op_DescribeDBInstances.go @@ -35,18 +35,18 @@ type DescribeDBInstancesInput struct { // information from only the specific instance is returned. This parameter isn't // case sensitive. Constraints: // - // * If provided, must match the identifier of an + // * If provided, must match the identifier of an // existing DBInstance. DBInstanceIdentifier *string // A filter that specifies one or more instances to describe. Supported filters: // - // - // * db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names + // * + // db-cluster-id - Accepts cluster identifiers and cluster Amazon Resource Names // (ARNs). The results list includes only the information about the instances that // are associated with the clusters that are identified by these ARNs. // - // * + // * // db-instance-id - Accepts instance identifiers and instance ARNs. The results // list includes only the information about the instances that are identified by // these ARNs. diff --git a/service/docdb/api_op_DescribeEvents.go b/service/docdb/api_op_DescribeEvents.go index 7e7099d9190..aaf23f64d7a 100644 --- a/service/docdb/api_op_DescribeEvents.go +++ b/service/docdb/api_op_DescribeEvents.go @@ -63,23 +63,23 @@ type DescribeEventsInput struct { // The identifier of the event source for which events are returned. If not // specified, then all sources are included in the response. Constraints: // - // * If + // * If // SourceIdentifier is provided, SourceType must also be provided. // - // * If the - // source type is DBInstance, a DBInstanceIdentifier must be provided. + // * If the source + // type is DBInstance, a DBInstanceIdentifier must be provided. // - // * If - // the source type is DBSecurityGroup, a DBSecurityGroupName must be provided. + // * If the source + // type is DBSecurityGroup, a DBSecurityGroupName must be provided. // + // * If the + // source type is DBParameterGroup, a DBParameterGroupName must be provided. // - // * If the source type is DBParameterGroup, a DBParameterGroupName must be - // provided. + // * If + // the source type is DBSnapshot, a DBSnapshotIdentifier must be provided. // - // * If the source type is DBSnapshot, a DBSnapshotIdentifier must - // be provided. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * + // Cannot end with a hyphen or contain two consecutive hyphens. SourceIdentifier *string // The event source to retrieve events for. If no value is specified, all events diff --git a/service/docdb/api_op_DescribePendingMaintenanceActions.go b/service/docdb/api_op_DescribePendingMaintenanceActions.go index 95abd76b5b2..94afd3fff2e 100644 --- a/service/docdb/api_op_DescribePendingMaintenanceActions.go +++ b/service/docdb/api_op_DescribePendingMaintenanceActions.go @@ -34,14 +34,13 @@ type DescribePendingMaintenanceActionsInput struct { // A filter that specifies one or more resources to return pending maintenance // actions for. Supported filters: // - // * db-cluster-id - Accepts cluster - // identifiers and cluster Amazon Resource Names (ARNs). The results list includes - // only pending maintenance actions for the clusters identified by these ARNs. + // * db-cluster-id - Accepts cluster identifiers + // and cluster Amazon Resource Names (ARNs). The results list includes only pending + // maintenance actions for the clusters identified by these ARNs. // - // - // * db-instance-id - Accepts instance identifiers and instance ARNs. The results - // list includes only pending maintenance actions for the DB instances identified - // by these ARNs. + // * db-instance-id + // - Accepts instance identifiers and instance ARNs. The results list includes only + // pending maintenance actions for the DB instances identified by these ARNs. Filters []*types.Filter // An optional pagination token provided by a previous request. If this parameter diff --git a/service/docdb/api_op_FailoverDBCluster.go b/service/docdb/api_op_FailoverDBCluster.go index e70055cff38..79a36ef5317 100644 --- a/service/docdb/api_op_FailoverDBCluster.go +++ b/service/docdb/api_op_FailoverDBCluster.go @@ -38,8 +38,7 @@ type FailoverDBClusterInput struct { // A cluster identifier to force a failover for. This parameter is not case // sensitive. Constraints: // - // * Must match the identifier of an existing - // DBCluster. + // * Must match the identifier of an existing DBCluster. DBClusterIdentifier *string // The name of the instance to promote to the primary instance. You must specify diff --git a/service/docdb/api_op_ModifyDBCluster.go b/service/docdb/api_op_ModifyDBCluster.go index d47927cd360..4224e71aa6e 100644 --- a/service/docdb/api_op_ModifyDBCluster.go +++ b/service/docdb/api_op_ModifyDBCluster.go @@ -35,7 +35,7 @@ type ModifyDBClusterInput struct { // The cluster identifier for the cluster that is being modified. This parameter is // not case sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. // // This member is required. @@ -55,7 +55,7 @@ type ModifyDBClusterInput struct { // The number of days for which automated backups are retained. You must specify a // minimum value of 1. Default: 1 Constraints: // - // * Must be a value from 1 to 35. + // * Must be a value from 1 to 35. BackupRetentionPeriod *int32 // The configuration setting for the log types to be enabled for export to Amazon @@ -85,16 +85,15 @@ type ModifyDBClusterInput struct { // The new cluster identifier for the cluster when renaming a cluster. This value // is stored as a lowercase string. Constraints: // - // * Must contain from 1 to 63 + // * Must contain from 1 to 63 // letters, numbers, or hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // + // * Cannot + // end with a hyphen or contain two consecutive hyphens. // - // * Cannot end with a hyphen or contain two consecutive hyphens. - // - // Example: - // my-cluster2 + // Example: my-cluster2 NewDBClusterIdentifier *string // The port number on which the cluster accepts connections. Constraints: Must be a @@ -106,15 +105,15 @@ type ModifyDBClusterInput struct { // 30-minute window selected at random from an 8-hour block of time for each AWS // Region. Constraints: // - // * Must be in the format hh24:mi-hh24:mi. + // * Must be in the format hh24:mi-hh24:mi. // - // * Must - // be in Universal Coordinated Time (UTC). + // * Must be in + // Universal Coordinated Time (UTC). // - // * Must not conflict with the - // preferred maintenance window. + // * Must not conflict with the preferred + // maintenance window. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/docdb/api_op_ModifyDBClusterParameterGroup.go b/service/docdb/api_op_ModifyDBClusterParameterGroup.go index a82c661f600..0d4bc7fa90f 100644 --- a/service/docdb/api_op_ModifyDBClusterParameterGroup.go +++ b/service/docdb/api_op_ModifyDBClusterParameterGroup.go @@ -57,13 +57,13 @@ type ModifyDBClusterParameterGroupOutput struct { // The name of a cluster parameter group. Constraints: // - // * Must be from 1 to 255 + // * Must be from 1 to 255 // letters or numbers. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * Cannot - // end with a hyphen or contain two consecutive hyphens. + // * Cannot end with + // a hyphen or contain two consecutive hyphens. // // This value is stored as a // lowercase string. diff --git a/service/docdb/api_op_ModifyDBInstance.go b/service/docdb/api_op_ModifyDBInstance.go index 60245046c32..a3d97a1a100 100644 --- a/service/docdb/api_op_ModifyDBInstance.go +++ b/service/docdb/api_op_ModifyDBInstance.go @@ -35,7 +35,7 @@ type ModifyDBInstanceInput struct { // The instance identifier. This value is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string @@ -72,13 +72,13 @@ type ModifyDBInstanceInput struct { // set Apply Immediately to false. This value is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * The + // first character must be a letter. // - // * The first character must be a letter. - // - // * Cannot end with a hyphen or - // contain two consecutive hyphens. + // * Cannot end with a hyphen or contain two + // consecutive hyphens. // // Example: mydbinstance NewDBInstanceIdentifier *string diff --git a/service/docdb/api_op_RebootDBInstance.go b/service/docdb/api_op_RebootDBInstance.go index bbd8e4090df..1575b821e92 100644 --- a/service/docdb/api_op_RebootDBInstance.go +++ b/service/docdb/api_op_RebootDBInstance.go @@ -38,7 +38,7 @@ type RebootDBInstanceInput struct { // The instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string diff --git a/service/docdb/api_op_ResetDBClusterParameterGroup.go b/service/docdb/api_op_ResetDBClusterParameterGroup.go index 1eff79d5877..77378abdf3d 100644 --- a/service/docdb/api_op_ResetDBClusterParameterGroup.go +++ b/service/docdb/api_op_ResetDBClusterParameterGroup.go @@ -57,13 +57,13 @@ type ResetDBClusterParameterGroupOutput struct { // The name of a cluster parameter group. Constraints: // - // * Must be from 1 to 255 + // * Must be from 1 to 255 // letters or numbers. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * Cannot - // end with a hyphen or contain two consecutive hyphens. + // * Cannot end with + // a hyphen or contain two consecutive hyphens. // // This value is stored as a // lowercase string. diff --git a/service/docdb/api_op_RestoreDBClusterFromSnapshot.go b/service/docdb/api_op_RestoreDBClusterFromSnapshot.go index 1c317be73c0..ce3ef130561 100644 --- a/service/docdb/api_op_RestoreDBClusterFromSnapshot.go +++ b/service/docdb/api_op_RestoreDBClusterFromSnapshot.go @@ -38,16 +38,15 @@ type RestoreDBClusterFromSnapshotInput struct { // The name of the cluster to create from the snapshot or cluster snapshot. This // parameter isn't case sensitive. Constraints: // - // * Must contain from 1 to 63 + // * Must contain from 1 to 63 // letters, numbers, or hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // + // * Cannot + // end with a hyphen or contain two consecutive hyphens. // - // * Cannot end with a hyphen or contain two consecutive hyphens. - // - // Example: - // my-snapshot-id + // Example: my-snapshot-id // // This member is required. DBClusterIdentifier *string @@ -62,8 +61,8 @@ type RestoreDBClusterFromSnapshotInput struct { // either the name or the Amazon Resource Name (ARN) to specify a cluster snapshot. // However, you can use only the ARN to specify a snapshot. Constraints: // - // * - // Must match the identifier of an existing snapshot. + // * Must + // match the identifier of an existing snapshot. // // This member is required. SnapshotIdentifier *string @@ -97,13 +96,13 @@ type RestoreDBClusterFromSnapshotInput struct { // AWS KMS encryption key. If you do not specify a value for the KmsKeyId // parameter, then the following occurs: // - // * If the snapshot or cluster snapshot - // in SnapshotIdentifier is encrypted, then the restored cluster is encrypted using - // the AWS KMS key that was used to encrypt the snapshot or the cluster snapshot. - // + // * If the snapshot or cluster snapshot in + // SnapshotIdentifier is encrypted, then the restored cluster is encrypted using + // the AWS KMS key that was used to encrypt the snapshot or the cluster + // snapshot. // - // * If the snapshot or the cluster snapshot in SnapshotIdentifier is not - // encrypted, then the restored DB cluster is not encrypted. + // * If the snapshot or the cluster snapshot in SnapshotIdentifier is + // not encrypted, then the restored DB cluster is not encrypted. KmsKeyId *string // The port number on which the new cluster accepts connections. Constraints: Must diff --git a/service/docdb/api_op_RestoreDBClusterToPointInTime.go b/service/docdb/api_op_RestoreDBClusterToPointInTime.go index a56005a6134..f56d242698f 100644 --- a/service/docdb/api_op_RestoreDBClusterToPointInTime.go +++ b/service/docdb/api_op_RestoreDBClusterToPointInTime.go @@ -37,21 +37,21 @@ type RestoreDBClusterToPointInTimeInput struct { // The name of the new cluster to be created. Constraints: // - // * Must contain from - // 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 + // to 63 letters, numbers, or hyphens. // - // * The first character must be a - // letter. + // * The first character must be a letter. // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * + // Cannot end with a hyphen or contain two consecutive hyphens. // // This member is required. DBClusterIdentifier *string // The identifier of the source cluster from which to restore. Constraints: // - // * - // Must match the identifier of an existing DBCluster. + // * Must + // match the identifier of an existing DBCluster. // // This member is required. SourceDBClusterIdentifier *string @@ -80,15 +80,15 @@ type RestoreDBClusterToPointInTimeInput struct { // by the KmsKeyId parameter. If you do not specify a value for the KmsKeyId // parameter, then the following occurs: // - // * If the cluster is encrypted, then - // the restored cluster is encrypted using the AWS KMS key that was used to encrypt - // the source cluster. + // * If the cluster is encrypted, then the + // restored cluster is encrypted using the AWS KMS key that was used to encrypt the + // source cluster. // - // * If the cluster is not encrypted, then the restored - // cluster is not encrypted. + // * If the cluster is not encrypted, then the restored cluster is + // not encrypted. // - // If DBClusterIdentifier refers to a cluster that is - // not encrypted, then the restore request is rejected. + // If DBClusterIdentifier refers to a cluster that is not + // encrypted, then the restore request is rejected. KmsKeyId *string // The port number on which the new cluster accepts connections. Constraints: Must @@ -98,16 +98,16 @@ type RestoreDBClusterToPointInTimeInput struct { // The date and time to restore the cluster to. Valid values: A time in Universal // Coordinated Time (UTC) format. Constraints: // - // * Must be before the latest + // * Must be before the latest // restorable time for the instance. // - // * Must be specified if the + // * Must be specified if the // UseLatestRestorableTime parameter is not provided. // - // * Cannot be specified if - // the UseLatestRestorableTime parameter is true. + // * Cannot be specified if the + // UseLatestRestorableTime parameter is true. // - // * Cannot be specified if the + // * Cannot be specified if the // RestoreType parameter is copy-on-write. // // Example: 2015-03-07T23:45:00Z diff --git a/service/docdb/types/types.go b/service/docdb/types/types.go index 0cf581447e8..9a9af676bb1 100644 --- a/service/docdb/types/types.go +++ b/service/docdb/types/types.go @@ -217,16 +217,15 @@ type DBClusterRole struct { // Describes the state of association between the IAM role and the cluster. The // Status property returns one of the following values: // - // * ACTIVE - The IAM - // role ARN is associated with the cluster and can be used to access other AWS - // services on your behalf. + // * ACTIVE - The IAM role + // ARN is associated with the cluster and can be used to access other AWS services + // on your behalf. // - // * PENDING - The IAM role ARN is being associated - // with the DB cluster. + // * PENDING - The IAM role ARN is being associated with the DB + // cluster. // - // * INVALID - The IAM role ARN is associated with the - // cluster, but the cluster cannot assume the IAM role to access other AWS services - // on your behalf. + // * INVALID - The IAM role ARN is associated with the cluster, but the + // cluster cannot assume the IAM role to access other AWS services on your behalf. Status *string } diff --git a/service/dynamodb/api_op_BatchGetItem.go b/service/dynamodb/api_op_BatchGetItem.go index 3c6a5e1da1d..47286ed86f9 100644 --- a/service/dynamodb/api_op_BatchGetItem.go +++ b/service/dynamodb/api_op_BatchGetItem.go @@ -74,58 +74,57 @@ type BatchGetItemInput struct { // per BatchGetItem request. Each element in the map of items to retrieve consists // of the following: // - // * ConsistentRead - If true, a strongly consistent read is + // * ConsistentRead - If true, a strongly consistent read is // used; if false (the default), an eventually consistent read is used. // - // * + // * // ExpressionAttributeNames - One or more substitution tokens for attribute names // in the ProjectionExpression parameter. The following are some use cases for // using ExpressionAttributeNames: // - // * To access an attribute whose name - // conflicts with a DynamoDB reserved word. + // * To access an attribute whose name conflicts + // with a DynamoDB reserved word. // - // * To create a placeholder for - // repeating occurrences of an attribute name in an expression. + // * To create a placeholder for repeating + // occurrences of an attribute name in an expression. // - // * To - // prevent special characters in an attribute name from being misinterpreted in an - // expression. + // * To prevent special + // characters in an attribute name from being misinterpreted in an expression. // - // Use the # character in an expression to dereference an - // attribute name. For example, consider the following attribute name: + // Use + // the # character in an expression to dereference an attribute name. For example, + // consider the following attribute name: // - // * - // Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved word, so it - // cannot be used directly in an expression. (For the complete list of reserved - // words, see Reserved Words + // The name of this attribute + // conflicts with a reserved word, so it cannot be used directly in an expression. + // (For the complete list of reserved words, see Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // + // You could + // then use this substitution in an expression, as in this example: // - // You could then use this substitution in an expression, as in this example: + // * #P = + // :val // - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute - // values, which are placeholders for the actual value at runtime. For more - // information about expression attribute names, see Accessing Item Attributes + // Tokens that begin with the : character are expression attribute values, + // which are placeholders for the actual value at runtime. For more information + // about expression attribute names, see Accessing Item Attributes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // * Keys - An array of primary key + // * Keys - An array of primary key // attribute values that define specific items in the table. For each primary key, // you must provide all of the key attributes. For example, with a simple primary // key, you only need to provide the partition key value. For a composite key, you // must provide both the partition key value and the sort key value. // - // * + // * // ProjectionExpression - A string that identifies one or more attributes to // retrieve from the table. These attributes can include scalars, sets, or elements // of a JSON document. The attributes in the expression must be separated by @@ -135,8 +134,8 @@ type BatchGetItemInput struct { // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // * AttributesToGet - This is a - // legacy parameter. Use ProjectionExpression instead. For more information, see + // * AttributesToGet - This is a legacy + // parameter. Use ProjectionExpression instead. For more information, see // AttributesToGet // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) // in the Amazon DynamoDB Developer Guide. @@ -147,17 +146,17 @@ type BatchGetItemInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). - // + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity } @@ -168,10 +167,10 @@ type BatchGetItemOutput struct { // The read capacity units consumed by the entire BatchGetItem operation. Each // element consists of: // - // * TableName - The table that consumed the provisioned + // * TableName - The table that consumed the provisioned // throughput. // - // * CapacityUnits - The total number of capacity units consumed. + // * CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []*types.ConsumedCapacity // A map of table name to a list of items. Each object in Responses consists of a @@ -185,20 +184,20 @@ type BatchGetItemOutput struct { // For more information, see RequestItems in the Request Parameters section. Each // element consists of: // - // * Keys - An array of primary key attribute values that + // * Keys - An array of primary key attribute values that // define specific items in the table. // - // * ProjectionExpression - One or more + // * ProjectionExpression - One or more // attributes to be retrieved from the table or index. By default, all attributes // are returned. If a requested attribute is not found, it does not appear in the // result. // - // * ConsistentRead - The consistency of a read operation. If set to - // true, then a strongly consistent read is used; otherwise, an eventually - // consistent read is used. + // * ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. // - // If there are no unprocessed keys remaining, the - // response contains an empty UnprocessedKeys map. + // If there are no unprocessed keys remaining, the response contains + // an empty UnprocessedKeys map. UnprocessedKeys map[string]*types.KeysAndAttributes // Metadata pertaining to the operation's result. diff --git a/service/dynamodb/api_op_BatchWriteItem.go b/service/dynamodb/api_op_BatchWriteItem.go index 98b4fffe4de..4732bac7e34 100644 --- a/service/dynamodb/api_op_BatchWriteItem.go +++ b/service/dynamodb/api_op_BatchWriteItem.go @@ -52,27 +52,27 @@ import ( // one or more of the following is true, DynamoDB rejects the entire batch write // operation: // -// * One or more tables specified in the BatchWriteItem request -// does not exist. +// * One or more tables specified in the BatchWriteItem request does +// not exist. // -// * Primary key attributes specified on an item in the -// request do not match those in the corresponding table's primary key schema. +// * Primary key attributes specified on an item in the request do not +// match those in the corresponding table's primary key schema. // +// * You try to +// perform multiple operations on the same item in the same BatchWriteItem request. +// For example, you cannot put and delete the same item in the same BatchWriteItem +// request. // -// * You try to perform multiple operations on the same item in the same -// BatchWriteItem request. For example, you cannot put and delete the same item in -// the same BatchWriteItem request. +// * Your request contains at least two items with identical hash and +// range keys (which essentially is two put operations). // -// * Your request contains at least two items -// with identical hash and range keys (which essentially is two put operations). +// * There are more than 25 +// requests in the batch. // +// * Any individual item in a batch exceeds 400 KB. // -// * There are more than 25 requests in the batch. -// -// * Any individual item in a -// batch exceeds 400 KB. -// -// * The total request size exceeds 16 MB. +// * The +// total request size exceeds 16 MB. func (c *Client) BatchWriteItem(ctx context.Context, params *BatchWriteItemInput, optFns ...func(*Options)) (*BatchWriteItemOutput, error) { if params == nil { params = &BatchWriteItemInput{} @@ -95,28 +95,28 @@ type BatchWriteItemInput struct { // performed (DeleteRequest or PutRequest). Each element in the map consists of the // following: // - // * DeleteRequest - Perform a DeleteItem operation on the - // specified item. The item to be deleted is identified by a Key subelement: - // - // - // * Key - A map of primary key attribute values that uniquely identify the item. - // Each entry in this map consists of an attribute name and an attribute value. For - // each primary key, you must provide all of the key attributes. For example, with - // a simple primary key, you only need to provide a value for the partition key. - // For a composite primary key, you must provide values for both the partition key - // and the sort key. - // - // * PutRequest - Perform a PutItem operation on the - // specified item. The item to be put is identified by an Item subelement: - // - // - // * Item - A map of attributes and their values. Each entry in this map consists - // of an attribute name and an attribute value. Attribute values must not be null; - // string and binary type attributes must have lengths greater than zero; and set - // type attributes must not be empty. Requests that contain empty values are - // rejected with a ValidationException exception. If you specify any attributes - // that are part of an index key, then the data types for those attributes must - // match those of the schema in the table's attribute definition. + // * DeleteRequest - Perform a DeleteItem operation on the specified + // item. The item to be deleted is identified by a Key subelement: + // + // * Key - A map + // of primary key attribute values that uniquely identify the item. Each entry in + // this map consists of an attribute name and an attribute value. For each primary + // key, you must provide all of the key attributes. For example, with a simple + // primary key, you only need to provide a value for the partition key. For a + // composite primary key, you must provide values for both the partition key and + // the sort key. + // + // * PutRequest - Perform a PutItem operation on the specified item. + // The item to be put is identified by an Item subelement: + // + // * Item - A map of + // attributes and their values. Each entry in this map consists of an attribute + // name and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values are rejected + // with a ValidationException exception. If you specify any attributes that are + // part of an index key, then the data types for those attributes must match those + // of the schema in the table's attribute definition. // // This member is required. RequestItems map[string][]*types.WriteRequest @@ -124,17 +124,17 @@ type BatchWriteItemInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). - // + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -151,10 +151,10 @@ type BatchWriteItemOutput struct { // The capacity units consumed by the entire BatchWriteItem operation. Each element // consists of: // - // * TableName - The table that consumed the provisioned + // * TableName - The table that consumed the provisioned // throughput. // - // * CapacityUnits - The total number of capacity units consumed. + // * CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []*types.ConsumedCapacity // A list of tables that were processed by BatchWriteItem and, for each table, @@ -162,10 +162,10 @@ type BatchWriteItemOutput struct { // DeleteItem or PutItem operations. Each entry consists of the following // subelements: // - // * ItemCollectionKey - The partition key value of the item + // * ItemCollectionKey - The partition key value of the item // collection. This is the same as the partition key value of the item. // - // * + // * // SizeEstimateRangeGB - An estimate of item collection size, expressed in GB. This // is a two-element array containing a lower bound and an upper bound for the // estimate. The estimate includes the size of all the items in the table, plus the @@ -182,29 +182,29 @@ type BatchWriteItemOutput struct { // UnprocessedItems entry consists of a table name and, for that table, a list of // operations to perform (DeleteRequest or PutRequest). // - // * DeleteRequest - - // Perform a DeleteItem operation on the specified item. The item to be deleted is + // * DeleteRequest - Perform + // a DeleteItem operation on the specified item. The item to be deleted is // identified by a Key subelement: // - // * Key - A map of primary key attribute - // values that uniquely identify the item. Each entry in this map consists of an - // attribute name and an attribute value. - // - // * PutRequest - Perform a PutItem - // operation on the specified item. The item to be put is identified by an Item - // subelement: - // - // * Item - A map of attributes and their values. Each entry - // in this map consists of an attribute name and an attribute value. Attribute - // values must not be null; string and binary type attributes must have lengths - // greater than zero; and set type attributes must not be empty. Requests that - // contain empty values will be rejected with a ValidationException exception. If - // you specify any attributes that are part of an index key, then the data types - // for those attributes must match those of the schema in the table's attribute - // definition. - // - // If there are no unprocessed items remaining, the response contains - // an empty UnprocessedItems map. + // * Key - A map of primary key attribute values + // that uniquely identify the item. Each entry in this map consists of an attribute + // name and an attribute value. + // + // * PutRequest - Perform a PutItem operation on the + // specified item. The item to be put is identified by an Item subelement: + // + // * Item + // - A map of attributes and their values. Each entry in this map consists of an + // attribute name and an attribute value. Attribute values must not be null; string + // and binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values will be + // rejected with a ValidationException exception. If you specify any attributes + // that are part of an index key, then the data types for those attributes must + // match those of the schema in the table's attribute definition. + // + // If there are no + // unprocessed items remaining, the response contains an empty UnprocessedItems + // map. UnprocessedItems map[string][]*types.WriteRequest // Metadata pertaining to the operation's result. diff --git a/service/dynamodb/api_op_CreateBackup.go b/service/dynamodb/api_op_CreateBackup.go index 60217420f1f..14b09809313 100644 --- a/service/dynamodb/api_op_CreateBackup.go +++ b/service/dynamodb/api_op_CreateBackup.go @@ -26,15 +26,14 @@ import ( // does not support causal consistency. Along with data, the following are also // included on the backups: // -// * Global secondary indexes (GSIs) +// * Global secondary indexes (GSIs) // -// * Local -// secondary indexes (LSIs) +// * Local secondary +// indexes (LSIs) // -// * Streams +// * Streams // -// * Provisioned read and write -// capacity +// * Provisioned read and write capacity func (c *Client) CreateBackup(ctx context.Context, params *CreateBackupInput, optFns ...func(*Options)) (*CreateBackupOutput, error) { if params == nil { params = &CreateBackupInput{} diff --git a/service/dynamodb/api_op_CreateGlobalTable.go b/service/dynamodb/api_op_CreateGlobalTable.go index 3d8d23749ed..95df9e0e4f3 100644 --- a/service/dynamodb/api_op_CreateGlobalTable.go +++ b/service/dynamodb/api_op_CreateGlobalTable.go @@ -18,44 +18,44 @@ import ( // of global tables. If you want to add a new replica table to a global table, each // of the following conditions must be true: // -// * The table must have the same +// * The table must have the same // primary key as all of the other replicas. // -// * The table must have the same -// name as all of the other replicas. +// * The table must have the same name +// as all of the other replicas. // -// * The table must have DynamoDB Streams -// enabled, with the stream containing both the new and the old images of the -// item. +// * The table must have DynamoDB Streams enabled, +// with the stream containing both the new and the old images of the item. // -// * None of the replica tables in the global table can contain any -// data. +// * None +// of the replica tables in the global table can contain any data. // -// If global secondary indexes are specified, then the following conditions -// must also be met: +// If global +// secondary indexes are specified, then the following conditions must also be +// met: // -// * The global secondary indexes must have the same name. +// * The global secondary indexes must have the same name. // +// * The global +// secondary indexes must have the same hash key and sort key (if present). // -// * The global secondary indexes must have the same hash key and sort key (if -// present). +// If +// local secondary indexes are specified, then the following conditions must also +// be met: // -// If local secondary indexes are specified, then the following -// conditions must also be met: +// * The local secondary indexes must have the same name. // -// * The local secondary indexes must have the -// same name. +// * The local +// secondary indexes must have the same hash key and sort key (if present). // -// * The local secondary indexes must have the same hash key and -// sort key (if present). -// -// Write capacity settings should be set consistently -// across your replica tables and secondary indexes. DynamoDB strongly recommends -// enabling auto scaling to manage the write capacity settings for all of your -// global tables replicas and indexes. If you prefer to manage write capacity -// settings manually, you should provision equal replicated write capacity units to -// your replica tables. You should also provision equal replicated write capacity -// units to matching secondary indexes across your global table. +// Write +// capacity settings should be set consistently across your replica tables and +// secondary indexes. DynamoDB strongly recommends enabling auto scaling to manage +// the write capacity settings for all of your global tables replicas and indexes. +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should also +// provision equal replicated write capacity units to matching secondary indexes +// across your global table. func (c *Client) CreateGlobalTable(ctx context.Context, params *CreateGlobalTableInput, optFns ...func(*Options)) (*CreateGlobalTableOutput, error) { if params == nil { params = &CreateGlobalTableInput{} diff --git a/service/dynamodb/api_op_CreateTable.go b/service/dynamodb/api_op_CreateTable.go index 373ea5bac49..b2c14a37581 100644 --- a/service/dynamodb/api_op_CreateTable.go +++ b/service/dynamodb/api_op_CreateTable.go @@ -53,28 +53,27 @@ type CreateTableInput struct { // in the Amazon DynamoDB Developer Guide. Each KeySchemaElement in the array is // composed of: // - // * AttributeName - The name of this key attribute. - // - // * - // KeyType - The role that the key attribute will assume: - // - // * HASH - - // partition key - // - // * RANGE - sort key - // - // The partition key of an item is also - // known as its hash attribute. The term "hash attribute" derives from the DynamoDB - // usage of an internal hash function to evenly distribute data items across - // partitions, based on their partition key values. The sort key of an item is also - // known as its range attribute. The term "range attribute" derives from the way - // DynamoDB stores items with the same partition key physically close together, in - // sorted order by the sort key value. For a simple primary key (partition key), - // you must provide exactly one element with a KeyType of HASH. For a composite - // primary key (partition key and sort key), you must provide exactly two elements, - // in this order: The first element must have a KeyType of HASH, and the second - // element must have a KeyType of RANGE. For more information, see Working with - // Tables + // * AttributeName - The name of this key attribute. + // + // * KeyType - The + // role that the key attribute will assume: + // + // * HASH - partition key + // + // * RANGE - sort + // key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from the DynamoDB usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. + // For a simple primary key (partition key), you must provide exactly one element + // with a KeyType of HASH. For a composite primary key (partition key and sort + // key), you must provide exactly two elements, in this order: The first element + // must have a KeyType of HASH, and the second element must have a KeyType of + // RANGE. For more information, see Working with Tables // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) // in the Amazon DynamoDB Developer Guide. // @@ -89,13 +88,13 @@ type CreateTableInput struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. // - // * PROVISIONED - We recommend - // using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode - // to Provisioned Mode + // * PROVISIONED - We recommend using + // PROVISIONED for predictable workloads. PROVISIONED sets the billing mode to + // Provisioned Mode // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). // - // - // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // * + // PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). BillingMode types.BillingMode @@ -103,39 +102,38 @@ type CreateTableInput struct { // One or more global secondary indexes (the maximum is 20) to be created on the // table. Each global secondary index in the array includes the following: // - // * + // * // IndexName - The name of the global secondary index. Must be unique only for this // table. // - // * KeySchema - Specifies the key schema for the global secondary + // * KeySchema - Specifies the key schema for the global secondary // index. // - // * Projection - Specifies attributes that are copied (projected) from - // the table into the index. These are in addition to the primary key attributes - // and index key attributes, which are automatically projected. Each attribute + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. Each attribute // specification is composed of: // - // * ProjectionType - One of the - // following: + // * ProjectionType - One of the following: // - // * KEYS_ONLY - Only the index and primary keys are - // projected into the index. + // * + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // * INCLUDE - Only the specified table - // attributes are projected into the index. The list of projected attributes is in - // NonKeyAttributes. + // * + // INCLUDE - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. // - // * ALL - All of the table attributes are projected - // into the index. + // * ALL - All of the table + // attributes are projected into the index. // - // * NonKeyAttributes - A list of one or more non-key - // attribute names that are projected into the secondary index. The total count of - // attributes provided in NonKeyAttributes, summed across all of the secondary - // indexes, must not exceed 100. If you project the same attribute into two - // different indexes, this counts as two distinct attributes when determining the - // total. + // * NonKeyAttributes - A list of one or + // more non-key attribute names that are projected into the secondary index. The + // total count of attributes provided in NonKeyAttributes, summed across all of the + // secondary indexes, must not exceed 100. If you project the same attribute into + // two different indexes, this counts as two distinct attributes when determining + // the total. // - // * ProvisionedThroughput - The provisioned throughput settings for + // * ProvisionedThroughput - The provisioned throughput settings for // the global secondary index, consisting of read and write capacity units. GlobalSecondaryIndexes []*types.GlobalSecondaryIndex @@ -145,37 +143,36 @@ type CreateTableInput struct { // index is unconstrained. Each local secondary index in the array includes the // following: // - // * IndexName - The name of the local secondary index. Must be - // unique only for this table. + // * IndexName - The name of the local secondary index. Must be unique + // only for this table. // - // * KeySchema - Specifies the key schema for the - // local secondary index. The key schema must begin with the same partition key as - // the table. + // * KeySchema - Specifies the key schema for the local + // secondary index. The key schema must begin with the same partition key as the + // table. // - // * Projection - Specifies attributes that are copied (projected) - // from the table into the index. These are in addition to the primary key - // attributes and index key attributes, which are automatically projected. Each - // attribute specification is composed of: + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes and + // index key attributes, which are automatically projected. Each attribute + // specification is composed of: // - // * ProjectionType - One of the - // following: + // * ProjectionType - One of the following: // - // * KEYS_ONLY - Only the index and primary keys are - // projected into the index. + // * + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // * INCLUDE - Only the specified table - // attributes are projected into the index. The list of projected attributes is in - // NonKeyAttributes. + // * + // INCLUDE - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. // - // * ALL - All of the table attributes are projected - // into the index. + // * ALL - All of the table + // attributes are projected into the index. // - // * NonKeyAttributes - A list of one or more non-key - // attribute names that are projected into the secondary index. The total count of - // attributes provided in NonKeyAttributes, summed across all of the secondary - // indexes, must not exceed 100. If you project the same attribute into two - // different indexes, this counts as two distinct attributes when determining the - // total. + // * NonKeyAttributes - A list of one or + // more non-key attribute names that are projected into the secondary index. The + // total count of attributes provided in NonKeyAttributes, summed across all of the + // secondary indexes, must not exceed 100. If you project the same attribute into + // two different indexes, this counts as two distinct attributes when determining + // the total. LocalSecondaryIndexes []*types.LocalSecondaryIndex // Represents the provisioned throughput settings for a specified table or index. @@ -193,26 +190,26 @@ type CreateTableInput struct { // The settings for DynamoDB Streams on the table. These settings consist of: // - // - // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) or + // * + // StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) or // disabled (false). // - // * StreamViewType - When an item in the table is modified, + // * StreamViewType - When an item in the table is modified, // StreamViewType determines what information is written to the table's stream. // Valid values for StreamViewType are: // - // * KEYS_ONLY - Only the key - // attributes of the modified item are written to the stream. + // * KEYS_ONLY - Only the key attributes of + // the modified item are written to the stream. // - // * NEW_IMAGE - // - The entire item, as it appears after it was modified, is written to the - // stream. + // * NEW_IMAGE - The entire item, as + // it appears after it was modified, is written to the stream. // - // * OLD_IMAGE - The entire item, as it appeared before it was - // modified, is written to the stream. + // * OLD_IMAGE - The + // entire item, as it appeared before it was modified, is written to the stream. // - // * NEW_AND_OLD_IMAGES - Both the new - // and the old item images of the item are written to the stream. + // * + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // written to the stream. StreamSpecification *types.StreamSpecification // A list of key-value pairs to label the table. For more information, see Tagging diff --git a/service/dynamodb/api_op_DeleteItem.go b/service/dynamodb/api_op_DeleteItem.go index c810c67fcd2..9e7831b6aeb 100644 --- a/service/dynamodb/api_op_DeleteItem.go +++ b/service/dynamodb/api_op_DeleteItem.go @@ -55,15 +55,15 @@ type DeleteItemInput struct { // A condition that must be satisfied in order for a conditional DeleteItem to // succeed. An expression can contain any of the following: // - // * Functions: + // * Functions: // attribute_exists | attribute_not_exists | attribute_type | contains | // begins_with | size These function names are case-sensitive. // - // * Comparison + // * Comparison // operators: = | <> | < | > | <= | >= | BETWEEN | IN // - // * Logical operators: AND - // | OR | NOT + // * Logical operators: AND | + // OR | NOT // // For more information about condition expressions, see Condition // Expressions @@ -86,36 +86,36 @@ type DeleteItemInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information on @@ -140,17 +140,17 @@ type DeleteItemInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. - // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -163,11 +163,11 @@ type DeleteItemInput struct { // Use ReturnValues if you want to get the item attributes as they appeared before // they were deleted. For DeleteItem, the valid values are: // - // * NONE - If + // * NONE - If // ReturnValues is not specified, or if its value is NONE, then nothing is // returned. (This setting is the default for ReturnValues.) // - // * ALL_OLD - The + // * ALL_OLD - The // content of the old item is returned. // // The ReturnValues parameter is used by @@ -199,11 +199,11 @@ type DeleteItemOutput struct { // any local secondary indexes, this information is not returned in the response. // Each ItemCollectionMetrics element consists of: // - // * ItemCollectionKey - The + // * ItemCollectionKey - The // partition key value of the item collection. This is the same as the partition // key value of the item itself. // - // * SizeEstimateRangeGB - An estimate of item + // * SizeEstimateRangeGB - An estimate of item // collection size, in gigabytes. This value is a two-element array containing a // lower bound and an upper bound for the estimate. The estimate includes the size // of all the items in the table, plus the size of all attributes projected into diff --git a/service/dynamodb/api_op_DescribeContributorInsights.go b/service/dynamodb/api_op_DescribeContributorInsights.go index 4248c954f6c..2f853efb6be 100644 --- a/service/dynamodb/api_op_DescribeContributorInsights.go +++ b/service/dynamodb/api_op_DescribeContributorInsights.go @@ -51,19 +51,19 @@ type DescribeContributorInsightsOutput struct { // Returns information about the last failure that encountered. The most common // exceptions for a FAILED status are: // - // * LimitExceededException - Per-account + // * LimitExceededException - Per-account // Amazon CloudWatch Contributor Insights rule limit reached. Please disable // Contributor Insights for other tables/indexes OR disable Contributor Insights // rules before retrying. // - // * AccessDeniedException - Amazon CloudWatch - // Contributor Insights rules cannot be modified due to insufficient permissions. + // * AccessDeniedException - Amazon CloudWatch Contributor + // Insights rules cannot be modified due to insufficient permissions. // - // - // * AccessDeniedException - Failed to create service-linked role for Contributor + // * + // AccessDeniedException - Failed to create service-linked role for Contributor // Insights due to insufficient permissions. // - // * InternalServerError - Failed to + // * InternalServerError - Failed to // create Amazon CloudWatch Contributor Insights rules. Please retry request. FailureException *types.FailureException diff --git a/service/dynamodb/api_op_DescribeLimits.go b/service/dynamodb/api_op_DescribeLimits.go index bcee8ad53c3..994893d802e 100644 --- a/service/dynamodb/api_op_DescribeLimits.go +++ b/service/dynamodb/api_op_DescribeLimits.go @@ -26,45 +26,44 @@ import ( // you have enough time to apply for an increase before you hit a quota. For // example, you could use one of the AWS SDKs to do the following: // -// * Call +// * Call // DescribeLimits for a particular Region to obtain your current account quotas on // provisioned capacity there. // -// * Create a variable to hold the aggregate read +// * Create a variable to hold the aggregate read // capacity units provisioned for all your tables in that Region, and one to hold // the aggregate write capacity units. Zero them both. // -// * Call ListTables to -// obtain a list of all your DynamoDB tables. +// * Call ListTables to obtain +// a list of all your DynamoDB tables. // -// * For each table name listed by -// ListTables, do the following: +// * For each table name listed by ListTables, +// do the following: // -// * Call DescribeTable with the table -// name. +// * Call DescribeTable with the table name. // -// * Use the data returned by DescribeTable to add the read capacity -// units and write capacity units provisioned for the table itself to your -// variables. +// * Use the data +// returned by DescribeTable to add the read capacity units and write capacity +// units provisioned for the table itself to your variables. // -// * If the table has one or more global secondary indexes -// (GSIs), loop over these GSIs and add their provisioned capacity values to your -// variables as well. +// * If the table has +// one or more global secondary indexes (GSIs), loop over these GSIs and add their +// provisioned capacity values to your variables as well. // -// * Report the account quotas for that Region returned by -// DescribeLimits, along with the total current provisioned capacity levels you -// have calculated. +// * Report the account +// quotas for that Region returned by DescribeLimits, along with the total current +// provisioned capacity levels you have calculated. // -// This will let you see whether you are getting close to your -// account-level quotas. The per-table quotas apply only when you are creating a -// new table. They restrict the sum of the provisioned capacity of the new table -// itself and all its global secondary indexes. For existing tables and their GSIs, -// DynamoDB doesn't let you increase provisioned capacity extremely rapidly, but -// the only quota that applies is that the aggregate provisioned capacity over all -// your tables and GSIs cannot exceed either of the per-account quotas. -// DescribeLimits should only be called periodically. You can expect throttling -// errors if you call it more than once in a minute. The DescribeLimits Request -// element has no content. +// This will let you see whether +// you are getting close to your account-level quotas. The per-table quotas apply +// only when you are creating a new table. They restrict the sum of the provisioned +// capacity of the new table itself and all its global secondary indexes. For +// existing tables and their GSIs, DynamoDB doesn't let you increase provisioned +// capacity extremely rapidly, but the only quota that applies is that the +// aggregate provisioned capacity over all your tables and GSIs cannot exceed +// either of the per-account quotas. DescribeLimits should only be called +// periodically. You can expect throttling errors if you call it more than once in +// a minute. The DescribeLimits Request element has no content. func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) { if params == nil { params = &DescribeLimitsInput{} diff --git a/service/dynamodb/api_op_GetItem.go b/service/dynamodb/api_op_GetItem.go index 9c6362cf74a..f4fb9768909 100644 --- a/service/dynamodb/api_op_GetItem.go +++ b/service/dynamodb/api_op_GetItem.go @@ -64,36 +64,36 @@ type GetItemInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information on @@ -115,17 +115,17 @@ type GetItemInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. - // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity } diff --git a/service/dynamodb/api_op_ListBackups.go b/service/dynamodb/api_op_ListBackups.go index ec223c3cd40..b1ce784a31d 100644 --- a/service/dynamodb/api_op_ListBackups.go +++ b/service/dynamodb/api_op_ListBackups.go @@ -39,13 +39,13 @@ type ListBackupsInput struct { // The backups from the table specified by BackupType are listed. Where BackupType // can be: // - // * USER - On-demand backup created by you. + // * USER - On-demand backup created by you. // - // * SYSTEM - On-demand - // backup automatically created by DynamoDB. + // * SYSTEM - On-demand backup + // automatically created by DynamoDB. // - // * ALL - All types of on-demand - // backups (USER and SYSTEM). + // * ALL - All types of on-demand backups (USER + // and SYSTEM). BackupType types.BackupTypeFilter // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last diff --git a/service/dynamodb/api_op_PutItem.go b/service/dynamodb/api_op_PutItem.go index f655075c536..16e71f03a6c 100644 --- a/service/dynamodb/api_op_PutItem.go +++ b/service/dynamodb/api_op_PutItem.go @@ -21,39 +21,39 @@ import ( // information on how to call the PutItem API using the AWS SDK in specific // languages, see the following: // -// * PutItem in the AWS Command Line Interface +// * PutItem in the AWS Command Line Interface // (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) // -// * -// PutItem in the AWS SDK for .NET +// * PutItem +// in the AWS SDK for .NET // (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) // -// * +// * // PutItem in the AWS SDK for C++ // (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) // -// * +// * // PutItem in the AWS SDK for Go // (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) // -// * +// * // PutItem in the AWS SDK for Java // (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) // -// * +// * // PutItem in the AWS SDK for JavaScript // (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) // -// -// * PutItem in the AWS SDK for PHP V3 +// * +// PutItem in the AWS SDK for PHP V3 // (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) // -// * +// * // PutItem in the AWS SDK for Python // (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) // -// * -// PutItem in the AWS SDK for Ruby V2 +// * PutItem +// in the AWS SDK for Ruby V2 // (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) // // When @@ -115,15 +115,15 @@ type PutItemInput struct { // A condition that must be satisfied in order for a conditional PutItem operation // to succeed. An expression can contain any of the following: // - // * Functions: + // * Functions: // attribute_exists | attribute_not_exists | attribute_type | contains | // begins_with | size These function names are case-sensitive. // - // * Comparison + // * Comparison // operators: = | <> | < | > | <= | >= | BETWEEN | IN // - // * Logical operators: AND - // | OR | NOT + // * Logical operators: AND | + // OR | NOT // // For more information on condition expressions, see Condition // Expressions @@ -146,36 +146,36 @@ type PutItemInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information on @@ -200,17 +200,17 @@ type PutItemInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). - // + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -221,18 +221,19 @@ type PutItemInput struct { ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics // Use ReturnValues if you want to get the item attributes as they appeared before - // they were updated with the PutItem request. For PutItem, the valid values are: - // + // they were updated with the PutItem request. For PutItem, the valid values + // are: // - // * NONE - If ReturnValues is not specified, or if its value is NONE, then nothing - // is returned. (This setting is the default for ReturnValues.) + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) // - // * ALL_OLD - If - // PutItem overwrote an attribute name-value pair, then the content of the old item - // is returned. + // * ALL_OLD + // - If PutItem overwrote an attribute name-value pair, then the content of the old + // item is returned. // - // The ReturnValues parameter is used by several DynamoDB operations; - // however, PutItem does not recognize any values other than NONE or ALL_OLD. + // The ReturnValues parameter is used by several DynamoDB + // operations; however, PutItem does not recognize any values other than NONE or + // ALL_OLD. ReturnValues types.ReturnValue } @@ -259,11 +260,11 @@ type PutItemOutput struct { // any local secondary indexes, this information is not returned in the response. // Each ItemCollectionMetrics element consists of: // - // * ItemCollectionKey - The + // * ItemCollectionKey - The // partition key value of the item collection. This is the same as the partition // key value of the item itself. // - // * SizeEstimateRangeGB - An estimate of item + // * SizeEstimateRangeGB - An estimate of item // collection size, in gigabytes. This value is a two-element array containing a // lower bound and an upper bound for the estimate. The estimate includes the size // of all the items in the table, plus the size of all attributes projected into diff --git a/service/dynamodb/api_op_Query.go b/service/dynamodb/api_op_Query.go index 915f84be16e..8878e065164 100644 --- a/service/dynamodb/api_op_Query.go +++ b/service/dynamodb/api_op_Query.go @@ -99,36 +99,36 @@ type QueryInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information on @@ -190,49 +190,50 @@ type QueryInput struct { // // :sortkeyval Valid comparisons for the sort key condition are as follows: // + // * + // sortKeyName=:sortkeyval - true if the sort key value is equal to :sortkeyval. // - // * sortKeyName=:sortkeyval - true if the sort key value is equal to + // * + // sortKeyName<:sortkeyval - true if the sort key value is less than // :sortkeyval. // - // * sortKeyName<:sortkeyval - true if the sort key value is less - // than :sortkeyval. - // - // * sortKeyName<=:sortkeyval - true if the sort key value - // is less than or equal to :sortkeyval. + // * sortKeyName<=:sortkeyval - true if the sort key value is less + // than or equal to :sortkeyval. // - // * sortKeyName>:sortkeyval - true if - // the sort key value is greater than :sortkeyval. + // * sortKeyName>:sortkeyval - true if the sort key + // value is greater than :sortkeyval. // - // * sortKeyName>= :sortkeyval - // - true if the sort key value is greater than or equal to :sortkeyval. + // * sortKeyName>= :sortkeyval - true if the + // sort key value is greater than or equal to :sortkeyval. // - // * + // * // sortKeyNameBETWEEN:sortkeyval1AND:sortkeyval2 - true if the sort key value is - // greater than or equal to :sortkeyval1, and less than or equal to :sortkeyval2. - // + // greater than or equal to :sortkeyval1, and less than or equal to + // :sortkeyval2. // - // * begins_with (sortKeyName, :sortkeyval) - true if the sort key value begins - // with a particular operand. (You cannot use this function with a sort key that is - // of type Number.) Note that the function name begins_with is case-sensitive. + // * begins_with (sortKeyName, :sortkeyval) - true if the sort key + // value begins with a particular operand. (You cannot use this function with a + // sort key that is of type Number.) Note that the function name begins_with is + // case-sensitive. // - // Use - // the ExpressionAttributeValues parameter to replace tokens such as :partitionval - // and :sortval with actual values at runtime. You can optionally use the - // ExpressionAttributeNames parameter to replace the names of the partition key and - // sort key with placeholder tokens. This option might be necessary if an attribute - // name conflicts with a DynamoDB reserved word. For example, the following - // KeyConditionExpression parameter causes an error because Size is a reserved - // word: + // Use the ExpressionAttributeValues parameter to replace tokens + // such as :partitionval and :sortval with actual values at runtime. You can + // optionally use the ExpressionAttributeNames parameter to replace the names of + // the partition key and sort key with placeholder tokens. This option might be + // necessary if an attribute name conflicts with a DynamoDB reserved word. For + // example, the following KeyConditionExpression parameter causes an error because + // Size is a reserved word: // - // * Size = :myval + // * Size = :myval // - // To work around this, define a placeholder (such a - // #S) to represent the attribute name Size. KeyConditionExpression then is as - // follows: + // To work around this, define a + // placeholder (such a #S) to represent the attribute name Size. + // KeyConditionExpression then is as follows: // - // * #S = :myval + // * #S = :myval // - // For a list of reserved words, see Reserved Words + // For a list of + // reserved words, see Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide. For more information on // ExpressionAttributeNames and ExpressionAttributeValues, see Using Placeholders @@ -279,17 +280,17 @@ type QueryInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). - // + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -309,42 +310,41 @@ type QueryInput struct { // attributes, specific item attributes, the count of matching items, or in the // case of an index, some or all of the attributes projected into the index. // - // * + // * // ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or // index. If you query a local secondary index, then for each matching item in the // index, DynamoDB fetches the entire item from the parent table. If the index is // configured to project all item attributes, then all of the data can be obtained // from the local secondary index, and no fetching is required. // - // * + // * // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all // attributes that have been projected into the index. If the index is configured // to project all attributes, this return value is equivalent to specifying // ALL_ATTRIBUTES. // - // * COUNT - Returns the number of matching items, rather than - // the matching items themselves. - // - // * SPECIFIC_ATTRIBUTES - Returns only the - // attributes listed in AttributesToGet. This return value is equivalent to - // specifying AttributesToGet without specifying any value for Select. If you query - // or scan a local secondary index and request only attributes that are projected - // into that index, the operation will read only the index and not the table. If - // any of the requested attributes are not projected into the local secondary - // index, DynamoDB fetches each of these attributes from the parent table. This - // extra fetching incurs additional throughput cost and latency. If you query or - // scan a global secondary index, you can only request attributes that are - // projected into the index. Global secondary index queries cannot fetch attributes - // from the parent table. - // - // If neither Select nor AttributesToGet are specified, - // DynamoDB defaults to ALL_ATTRIBUTES when accessing a table, and - // ALL_PROJECTED_ATTRIBUTES when accessing an index. You cannot use both Select and - // AttributesToGet together in a single request, unless the value for Select is - // SPECIFIC_ATTRIBUTES. (This usage is equivalent to specifying AttributesToGet - // without any value for Select.) If you use the ProjectionExpression parameter, - // then the value for Select can only be SPECIFIC_ATTRIBUTES. Any other value for - // Select will return an error. + // * COUNT - Returns the number of matching items, rather than the + // matching items themselves. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes + // listed in AttributesToGet. This return value is equivalent to specifying + // AttributesToGet without specifying any value for Select. If you query or scan a + // local secondary index and request only attributes that are projected into that + // index, the operation will read only the index and not the table. If any of the + // requested attributes are not projected into the local secondary index, DynamoDB + // fetches each of these attributes from the parent table. This extra fetching + // incurs additional throughput cost and latency. If you query or scan a global + // secondary index, you can only request attributes that are projected into the + // index. Global secondary index queries cannot fetch attributes from the parent + // table. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together in a + // single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage + // is equivalent to specifying AttributesToGet without any value for Select.) If + // you use the ProjectionExpression parameter, then the value for Select can only + // be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. Select types.Select } diff --git a/service/dynamodb/api_op_RestoreTableFromBackup.go b/service/dynamodb/api_op_RestoreTableFromBackup.go index f770c3fe81d..85f07378454 100644 --- a/service/dynamodb/api_op_RestoreTableFromBackup.go +++ b/service/dynamodb/api_op_RestoreTableFromBackup.go @@ -16,19 +16,19 @@ import ( // RestoreTableFromBackup at a maximum rate of 10 times per second. You must // manually set up the following on the restored table: // -// * Auto scaling -// policies +// * Auto scaling policies // -// * IAM policies +// * +// IAM policies // -// * Amazon CloudWatch metrics and alarms +// * Amazon CloudWatch metrics and alarms // -// * -// Tags +// * Tags // -// * Stream settings +// * Stream +// settings // -// * Time to Live (TTL) settings +// * Time to Live (TTL) settings func (c *Client) RestoreTableFromBackup(ctx context.Context, params *RestoreTableFromBackupInput, optFns ...func(*Options)) (*RestoreTableFromBackupOutput, error) { if params == nil { params = &RestoreTableFromBackupInput{} diff --git a/service/dynamodb/api_op_RestoreTableToPointInTime.go b/service/dynamodb/api_op_RestoreTableToPointInTime.go index 0ff9d256202..e858d2744d2 100644 --- a/service/dynamodb/api_op_RestoreTableToPointInTime.go +++ b/service/dynamodb/api_op_RestoreTableToPointInTime.go @@ -21,35 +21,35 @@ import ( // new table. Along with data, the following are also included on the new restored // table using point in time recovery: // -// * Global secondary indexes (GSIs) +// * Global secondary indexes (GSIs) // +// * Local +// secondary indexes (LSIs) // -// * Local secondary indexes (LSIs) +// * Provisioned read and write capacity // -// * Provisioned read and write capacity +// * Encryption +// settings All these settings come from the current settings of the source table +// at the time of restore. // +// You must manually set up the following on the restored +// table: // -// * Encryption settings All these settings come from the current settings of the -// source table at the time of restore. +// * Auto scaling policies // -// You must manually set up the following on -// the restored table: +// * IAM policies // -// * Auto scaling policies +// * Amazon CloudWatch metrics and +// alarms // -// * IAM policies +// * Tags // -// * -// Amazon CloudWatch metrics and alarms +// * Stream settings // -// * Tags +// * Time to Live (TTL) settings // -// * Stream settings -// -// * -// Time to Live (TTL) settings -// -// * Point in time recovery settings +// * Point in +// time recovery settings func (c *Client) RestoreTableToPointInTime(ctx context.Context, params *RestoreTableToPointInTimeInput, optFns ...func(*Options)) (*RestoreTableToPointInTimeOutput, error) { if params == nil { params = &RestoreTableToPointInTimeInput{} diff --git a/service/dynamodb/api_op_Scan.go b/service/dynamodb/api_op_Scan.go index 650d0bc6c41..29ebce7e948 100644 --- a/service/dynamodb/api_op_Scan.go +++ b/service/dynamodb/api_op_Scan.go @@ -72,19 +72,19 @@ type ScanInput struct { // A Boolean value that determines the read consistency model during the scan: // - // - // * If ConsistentRead is false, then the data returned from Scan might not contain + // * + // If ConsistentRead is false, then the data returned from Scan might not contain // the results from other recently completed write operations (PutItem, UpdateItem, // or DeleteItem). // - // * If ConsistentRead is true, then all of the write - // operations that completed before the Scan began are guaranteed to be contained - // in the Scan response. + // * If ConsistentRead is true, then all of the write operations + // that completed before the Scan began are guaranteed to be contained in the Scan + // response. // - // The default setting for ConsistentRead is false. The - // ConsistentRead parameter is not supported on global secondary indexes. If you - // scan a global secondary index with ConsistentRead set to true, you will receive - // a ValidationException. + // The default setting for ConsistentRead is false. The ConsistentRead + // parameter is not supported on global secondary indexes. If you scan a global + // secondary index with ConsistentRead set to true, you will receive a + // ValidationException. ConsistentRead *bool // The primary key of the first item that this operation will evaluate. Use the @@ -98,36 +98,36 @@ type ScanInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information on @@ -189,17 +189,17 @@ type ScanInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). - // + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -224,41 +224,41 @@ type ScanInput struct { // attributes, specific item attributes, the count of matching items, or in the // case of an index, some or all of the attributes projected into the index. // - // * + // * // ALL_ATTRIBUTES - Returns all of the item attributes from the specified table or // index. If you query a local secondary index, then for each matching item in the // index, DynamoDB fetches the entire item from the parent table. If the index is // configured to project all item attributes, then all of the data can be obtained // from the local secondary index, and no fetching is required. // - // * + // * // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves all // attributes that have been projected into the index. If the index is configured // to project all attributes, this return value is equivalent to specifying // ALL_ATTRIBUTES. // - // * COUNT - Returns the number of matching items, rather than - // the matching items themselves. + // * COUNT - Returns the number of matching items, rather than the + // matching items themselves. // - // * SPECIFIC_ATTRIBUTES - Returns only the - // attributes listed in AttributesToGet. This return value is equivalent to - // specifying AttributesToGet without specifying any value for Select. If you query - // or scan a local secondary index and request only attributes that are projected - // into that index, the operation reads only the index and not the table. If any of - // the requested attributes are not projected into the local secondary index, - // DynamoDB fetches each of these attributes from the parent table. This extra - // fetching incurs additional throughput cost and latency. If you query or scan a - // global secondary index, you can only request attributes that are projected into - // the index. Global secondary index queries cannot fetch attributes from the - // parent table. + // * SPECIFIC_ATTRIBUTES - Returns only the attributes + // listed in AttributesToGet. This return value is equivalent to specifying + // AttributesToGet without specifying any value for Select. If you query or scan a + // local secondary index and request only attributes that are projected into that + // index, the operation reads only the index and not the table. If any of the + // requested attributes are not projected into the local secondary index, DynamoDB + // fetches each of these attributes from the parent table. This extra fetching + // incurs additional throughput cost and latency. If you query or scan a global + // secondary index, you can only request attributes that are projected into the + // index. Global secondary index queries cannot fetch attributes from the parent + // table. // - // If neither Select nor AttributesToGet are specified, DynamoDB - // defaults to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES - // when accessing an index. You cannot use both Select and AttributesToGet together - // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This - // usage is equivalent to specifying AttributesToGet without any value for Select.) - // If you use the ProjectionExpression parameter, then the value for Select can - // only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. + // If neither Select nor AttributesToGet are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together in a + // single request, unless the value for Select is SPECIFIC_ATTRIBUTES. (This usage + // is equivalent to specifying AttributesToGet without any value for Select.) If + // you use the ProjectionExpression parameter, then the value for Select can only + // be SPECIFIC_ATTRIBUTES. Any other value for Select will return an error. Select types.Select // For a parallel Scan request, TotalSegments represents the total number of diff --git a/service/dynamodb/api_op_TransactGetItems.go b/service/dynamodb/api_op_TransactGetItems.go index 60fa2baef9c..1f07634a9cd 100644 --- a/service/dynamodb/api_op_TransactGetItems.go +++ b/service/dynamodb/api_op_TransactGetItems.go @@ -20,17 +20,17 @@ import ( // the items in the transaction cannot exceed 4 MB. DynamoDB rejects the entire // TransactGetItems request if any of the following is true: // -// * A conflicting +// * A conflicting // operation is in the process of updating an item to be read. // -// * There is +// * There is // insufficient provisioned capacity for the transaction to be completed. // -// * -// There is a user error, such as an invalid data format. +// * There +// is a user error, such as an invalid data format. // -// * The aggregate size -// of the items in the transaction cannot exceed 4 MB. +// * The aggregate size of the +// items in the transaction cannot exceed 4 MB. func (c *Client) TransactGetItems(ctx context.Context, params *TransactGetItemsInput, optFns ...func(*Options)) (*TransactGetItemsOutput, error) { if params == nil { params = &TransactGetItemsInput{} diff --git a/service/dynamodb/api_op_TransactWriteItems.go b/service/dynamodb/api_op_TransactWriteItems.go index 7d6c3b20bc8..e01cda5cb42 100644 --- a/service/dynamodb/api_op_TransactWriteItems.go +++ b/service/dynamodb/api_op_TransactWriteItems.go @@ -20,55 +20,55 @@ import ( // are completed atomically so that either all of them succeed, or all of them // fail. They are defined by the following objects: // -// * Put — Initiates a -// PutItem operation to write a new item. This structure specifies the primary key -// of the item to be written, the name of the table to write it in, an optional -// condition expression that must be satisfied for the write to succeed, a list of -// the item's attributes, and a field indicating whether to retrieve the item's -// attributes if the condition is not met. -// -// * Update — Initiates an UpdateItem -// operation to update an existing item. This structure specifies the primary key -// of the item to be updated, the name of the table where it resides, an optional -// condition expression that must be satisfied for the update to succeed, an -// expression that defines one or more attributes to be updated, and a field -// indicating whether to retrieve the item's attributes if the condition is not -// met. +// * Put — Initiates a PutItem +// operation to write a new item. This structure specifies the primary key of the +// item to be written, the name of the table to write it in, an optional condition +// expression that must be satisfied for the write to succeed, a list of the item's +// attributes, and a field indicating whether to retrieve the item's attributes if +// the condition is not met. // -// * Delete — Initiates a DeleteItem operation to delete an existing -// item. This structure specifies the primary key of the item to be deleted, the -// name of the table where it resides, an optional condition expression that must -// be satisfied for the deletion to succeed, and a field indicating whether to +// * Update — Initiates an UpdateItem operation to +// update an existing item. This structure specifies the primary key of the item to +// be updated, the name of the table where it resides, an optional condition +// expression that must be satisfied for the update to succeed, an expression that +// defines one or more attributes to be updated, and a field indicating whether to // retrieve the item's attributes if the condition is not met. // -// * -// ConditionCheck — Applies a condition to an item that is not being modified by -// the transaction. This structure specifies the primary key of the item to be -// checked, the name of the table where it resides, a condition expression that -// must be satisfied for the transaction to succeed, and a field indicating whether -// to retrieve the item's attributes if the condition is not met. +// * Delete — +// Initiates a DeleteItem operation to delete an existing item. This structure +// specifies the primary key of the item to be deleted, the name of the table where +// it resides, an optional condition expression that must be satisfied for the +// deletion to succeed, and a field indicating whether to retrieve the item's +// attributes if the condition is not met. // -// DynamoDB rejects -// the entire TransactWriteItems request if any of the following is true: +// * ConditionCheck — Applies a condition +// to an item that is not being modified by the transaction. This structure +// specifies the primary key of the item to be checked, the name of the table where +// it resides, a condition expression that must be satisfied for the transaction to +// succeed, and a field indicating whether to retrieve the item's attributes if the +// condition is not met. // -// * A -// condition in one of the condition expressions is not met. +// DynamoDB rejects the entire TransactWriteItems request if +// any of the following is true: // -// * An ongoing -// operation is in the process of updating the same item. +// * A condition in one of the condition expressions +// is not met. // -// * There is -// insufficient provisioned capacity for the transaction to be completed. +// * An ongoing operation is in the process of updating the same +// item. // -// * An -// item size becomes too large (bigger than 400 KB), a local secondary index (LSI) -// becomes too large, or a similar validation error occurs because of changes made -// by the transaction. +// * There is insufficient provisioned capacity for the transaction to be +// completed. // -// * The aggregate size of the items in the transaction -// exceeds 4 MB. +// * An item size becomes too large (bigger than 400 KB), a local +// secondary index (LSI) becomes too large, or a similar validation error occurs +// because of changes made by the transaction. // -// * There is a user error, such as an invalid data format. +// * The aggregate size of the items +// in the transaction exceeds 4 MB. +// +// * There is a user error, such as an invalid +// data format. func (c *Client) TransactWriteItems(ctx context.Context, params *TransactWriteItemsInput, optFns ...func(*Options)) (*TransactWriteItemsOutput, error) { if params == nil { params = &TransactWriteItemsInput{} @@ -114,17 +114,17 @@ type TransactWriteItemsInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). - // + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity diff --git a/service/dynamodb/api_op_UpdateGlobalTable.go b/service/dynamodb/api_op_UpdateGlobalTable.go index d61a30f2159..054137d4d6c 100644 --- a/service/dynamodb/api_op_UpdateGlobalTable.go +++ b/service/dynamodb/api_op_UpdateGlobalTable.go @@ -20,15 +20,14 @@ import ( // separate requests for adding or removing replicas. If global secondary indexes // are specified, then the following conditions must also be met: // -// * The global +// * The global // secondary indexes must have the same name. // -// * The global secondary indexes -// must have the same hash key and sort key (if present). +// * The global secondary indexes must +// have the same hash key and sort key (if present). // -// * The global -// secondary indexes must have the same provisioned and maximum write capacity -// units. +// * The global secondary +// indexes must have the same provisioned and maximum write capacity units. func (c *Client) UpdateGlobalTable(ctx context.Context, params *UpdateGlobalTableInput, optFns ...func(*Options)) (*UpdateGlobalTableOutput, error) { if params == nil { params = &UpdateGlobalTableInput{} diff --git a/service/dynamodb/api_op_UpdateGlobalTableSettings.go b/service/dynamodb/api_op_UpdateGlobalTableSettings.go index 3b01313d380..4e0233c503c 100644 --- a/service/dynamodb/api_op_UpdateGlobalTableSettings.go +++ b/service/dynamodb/api_op_UpdateGlobalTableSettings.go @@ -37,13 +37,13 @@ type UpdateGlobalTableSettingsInput struct { // The billing mode of the global table. If GlobalTableBillingMode is not // specified, the global table defaults to PROVISIONED capacity billing mode. // - // - // * PROVISIONED - We recommend using PROVISIONED for predictable workloads. + // * + // PROVISIONED - We recommend using PROVISIONED for predictable workloads. // PROVISIONED sets the billing mode to Provisioned Mode // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). // - // - // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // * + // PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). GlobalTableBillingMode types.BillingMode diff --git a/service/dynamodb/api_op_UpdateItem.go b/service/dynamodb/api_op_UpdateItem.go index 0036e3894c4..eb6e878fa28 100644 --- a/service/dynamodb/api_op_UpdateItem.go +++ b/service/dynamodb/api_op_UpdateItem.go @@ -59,18 +59,17 @@ type UpdateItemInput struct { // A condition that must be satisfied in order for a conditional update to succeed. // An expression can contain any of the following: // - // * Functions: - // attribute_exists | attribute_not_exists | attribute_type | contains | - // begins_with | size These function names are case-sensitive. + // * Functions: attribute_exists | + // attribute_not_exists | attribute_type | contains | begins_with | size These + // function names are case-sensitive. // - // * Comparison - // operators: = | <> | < | > | <= | >= | BETWEEN | IN + // * Comparison operators: = | <> | < | > | <= + // | >= | BETWEEN | IN // - // * Logical operators: AND - // | OR | NOT + // * Logical operators: AND | OR | NOT // - // For more information about condition expressions, see Specifying - // Conditions + // For more information + // about condition expressions, see Specifying Conditions // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. ConditionExpression *string @@ -90,36 +89,36 @@ type UpdateItemInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information @@ -144,17 +143,17 @@ type UpdateItemInput struct { // Determines the level of detail about provisioned throughput consumption that is // returned in the response: // - // * INDEXES - The response includes the aggregate + // * INDEXES - The response includes the aggregate // ConsumedCapacity for the operation, together with ConsumedCapacity for each // table and secondary index that was accessed. Note that some operations, such as // GetItem and BatchGetItem, do not access any indexes at all. In these cases, - // specifying INDEXES will only return ConsumedCapacity information for table(s). + // specifying INDEXES will only return ConsumedCapacity information for + // table(s). // + // * TOTAL - The response includes only the aggregate ConsumedCapacity + // for the operation. // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. - // - // * NONE - No ConsumedCapacity details are included in the + // * NONE - No ConsumedCapacity details are included in the // response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -167,22 +166,22 @@ type UpdateItemInput struct { // Use ReturnValues if you want to get the item attributes as they appear before or // after they are updated. For UpdateItem, the valid values are: // - // * NONE - If + // * NONE - If // ReturnValues is not specified, or if its value is NONE, then nothing is // returned. (This setting is the default for ReturnValues.) // - // * ALL_OLD - - // Returns all of the attributes of the item, as they appeared before the - // UpdateItem operation. - // - // * UPDATED_OLD - Returns only the updated attributes, - // as they appeared before the UpdateItem operation. + // * ALL_OLD - Returns + // all of the attributes of the item, as they appeared before the UpdateItem + // operation. // - // * ALL_NEW - Returns all - // of the attributes of the item, as they appear after the UpdateItem operation. + // * UPDATED_OLD - Returns only the updated attributes, as they + // appeared before the UpdateItem operation. // + // * ALL_NEW - Returns all of the + // attributes of the item, as they appear after the UpdateItem operation. // - // * UPDATED_NEW - Returns only the updated attributes, as they appear after the + // * + // UPDATED_NEW - Returns only the updated attributes, as they appear after the // UpdateItem operation. // // There is no additional cost associated with requesting a @@ -195,59 +194,58 @@ type UpdateItemInput struct { // be performed on them, and new values for them. The following action values are // available for UpdateExpression. // - // * SET - Adds one or more attributes and - // values to an item. If any of these attributes already exist, they are replaced - // by the new values. You can also use SET to add or subtract from an attribute - // that is of type Number. For example: SET myNum = myNum + :valSET supports the - // following functions: - // - // * if_not_exists (path, operand) - if the item does - // not contain an attribute at the specified path, then if_not_exists evaluates to - // operand; otherwise, it evaluates to path. You can use this function to avoid - // overwriting an attribute that may already be present in the item. - // - // * - // list_append (operand, operand) - evaluates to a list with a new element added to - // it. You can append the new element to the start or the end of the list by - // reversing the order of the operands. - // - // These function names are - // case-sensitive. - // - // * REMOVE - Removes one or more attributes from an item. - // - // - // * ADD - Adds the specified value to the item, if the attribute does not already - // exist. If the attribute does exist, then the behavior of ADD depends on the data - // type of the attribute: - // - // * If the existing attribute is a number, and if - // Value is also a number, then Value is mathematically added to the existing - // attribute. If Value is a negative number, then it is subtracted from the - // existing attribute. If you use ADD to increment or decrement a number value for - // an item that doesn't exist before the update, DynamoDB uses 0 as the initial - // value. Similarly, if you use ADD for an existing item to increment or decrement - // an attribute value that doesn't exist before the update, DynamoDB uses 0 as the - // initial value. For example, suppose that the item you want to update doesn't - // have an attribute named itemcount, but you decide to ADD the number 3 to this - // attribute anyway. DynamoDB will create the itemcount attribute, set its initial - // value to 0, and finally add 3 to it. The result will be a new itemcount - // attribute in the item, with a value of 3. - // - // * If the existing data type - // is a set and if Value is also a set, then Value is added to the existing set. - // For example, if the attribute value is the set [1,2], and the ADD action - // specified [3], then the final attribute value is [1,2,3]. An error occurs if an - // ADD action is specified for a set attribute and the attribute type specified - // does not match the existing set type. Both sets must have the same primitive - // data type. For example, if the existing data type is a set of strings, the Value - // must also be a set of strings. - // - // The ADD action only supports Number and set - // data types. In addition, ADD can only be used on top-level attributes, not - // nested attributes. - // - // * DELETE - Deletes an element from a set. If a set of + // * SET - Adds one or more attributes and values + // to an item. If any of these attributes already exist, they are replaced by the + // new values. You can also use SET to add or subtract from an attribute that is of + // type Number. For example: SET myNum = myNum + :valSET supports the following + // functions: + // + // * if_not_exists (path, operand) - if the item does not contain an + // attribute at the specified path, then if_not_exists evaluates to operand; + // otherwise, it evaluates to path. You can use this function to avoid overwriting + // an attribute that may already be present in the item. + // + // * list_append (operand, + // operand) - evaluates to a list with a new element added to it. You can append + // the new element to the start or the end of the list by reversing the order of + // the operands. + // + // These function names are case-sensitive. + // + // * REMOVE - Removes one + // or more attributes from an item. + // + // * ADD - Adds the specified value to the item, + // if the attribute does not already exist. If the attribute does exist, then the + // behavior of ADD depends on the data type of the attribute: + // + // * If the existing + // attribute is a number, and if Value is also a number, then Value is + // mathematically added to the existing attribute. If Value is a negative number, + // then it is subtracted from the existing attribute. If you use ADD to increment + // or decrement a number value for an item that doesn't exist before the update, + // DynamoDB uses 0 as the initial value. Similarly, if you use ADD for an existing + // item to increment or decrement an attribute value that doesn't exist before the + // update, DynamoDB uses 0 as the initial value. For example, suppose that the item + // you want to update doesn't have an attribute named itemcount, but you decide to + // ADD the number 3 to this attribute anyway. DynamoDB will create the itemcount + // attribute, set its initial value to 0, and finally add 3 to it. The result will + // be a new itemcount attribute in the item, with a value of 3. + // + // * If the existing + // data type is a set and if Value is also a set, then Value is added to the + // existing set. For example, if the attribute value is the set [1,2], and the ADD + // action specified [3], then the final attribute value is [1,2,3]. An error occurs + // if an ADD action is specified for a set attribute and the attribute type + // specified does not match the existing set type. Both sets must have the same + // primitive data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. + // + // The ADD action only supports Number + // and set data types. In addition, ADD can only be used on top-level attributes, + // not nested attributes. + // + // * DELETE - Deletes an element from a set. If a set of // values is specified, then those values are subtracted from the old set. For // example, if the attribute value was the set [a,b,c] and the DELETE action // specifies [a,c], then the final attribute value is [b]. Specifying an empty set @@ -287,11 +285,11 @@ type UpdateItemOutput struct { // any local secondary indexes, this information is not returned in the response. // Each ItemCollectionMetrics element consists of: // - // * ItemCollectionKey - The + // * ItemCollectionKey - The // partition key value of the item collection. This is the same as the partition // key value of the item itself. // - // * SizeEstimateRangeGB - An estimate of item + // * SizeEstimateRangeGB - An estimate of item // collection size, in gigabytes. This value is a two-element array containing a // lower bound and an upper bound for the estimate. The estimate includes the size // of all the items in the table, plus the size of all attributes projected into diff --git a/service/dynamodb/api_op_UpdateTable.go b/service/dynamodb/api_op_UpdateTable.go index 125473f9e65..1eb522bc46f 100644 --- a/service/dynamodb/api_op_UpdateTable.go +++ b/service/dynamodb/api_op_UpdateTable.go @@ -15,23 +15,22 @@ import ( // DynamoDB Streams settings for a given table. You can only perform one of the // following operations at once: // -// * Modify the provisioned throughput settings -// of the table. +// * Modify the provisioned throughput settings of +// the table. // -// * Enable or disable DynamoDB Streams on the table. +// * Enable or disable DynamoDB Streams on the table. // -// * -// Remove a global secondary index from the table. +// * Remove a +// global secondary index from the table. // -// * Create a new global -// secondary index on the table. After the index begins backfilling, you can use -// UpdateTable to perform other operations. +// * Create a new global secondary index on +// the table. After the index begins backfilling, you can use UpdateTable to +// perform other operations. // -// UpdateTable is an asynchronous -// operation; while it is executing, the table status changes from ACTIVE to -// UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. -// When the table returns to the ACTIVE state, the UpdateTable operation is -// complete. +// UpdateTable is an asynchronous operation; while it is +// executing, the table status changes from ACTIVE to UPDATING. While it is +// UPDATING, you cannot issue another UpdateTable request. When the table returns +// to the ACTIVE state, the UpdateTable operation is complete. func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optFns ...func(*Options)) (*UpdateTableOutput, error) { if params == nil { params = &UpdateTableInput{} @@ -66,13 +65,13 @@ type UpdateTableInput struct { // are estimated based on the consumed read and write capacity of your table and // global secondary indexes over the past 30 minutes. // - // * PROVISIONED - We - // recommend using PROVISIONED for predictable workloads. PROVISIONED sets the - // billing mode to Provisioned Mode + // * PROVISIONED - We recommend + // using PROVISIONED for predictable workloads. PROVISIONED sets the billing mode + // to Provisioned Mode // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual). // - // - // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable + // * + // PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand). BillingMode types.BillingMode @@ -80,18 +79,18 @@ type UpdateTableInput struct { // An array of one or more global secondary indexes for the table. For each index // in the array, you can request one action: // - // * Create - add a new global - // secondary index to the table. + // * Create - add a new global secondary + // index to the table. // - // * Update - modify the provisioned throughput - // settings of an existing global secondary index. + // * Update - modify the provisioned throughput settings of an + // existing global secondary index. // - // * Delete - remove a global - // secondary index from the table. + // * Delete - remove a global secondary index + // from the table. // - // You can create or delete only one global - // secondary index per UpdateTable operation. For more information, see Managing - // Global Secondary Indexes + // You can create or delete only one global secondary index per + // UpdateTable operation. For more information, see Managing Global Secondary + // Indexes // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) // in the Amazon DynamoDB Developer Guide. GlobalSecondaryIndexUpdates []*types.GlobalSecondaryIndexUpdate diff --git a/service/dynamodb/types/enums.go b/service/dynamodb/types/enums.go index dcb9d81c807..a5a7a66d0ca 100644 --- a/service/dynamodb/types/enums.go +++ b/service/dynamodb/types/enums.go @@ -46,9 +46,9 @@ type BackupType string // Enum values for BackupType const ( - BackupTypeUser BackupType = "USER" - BackupTypeSystem BackupType = "SYSTEM" - BackupTypeAws_backup BackupType = "AWS_BACKUP" + BackupTypeUser BackupType = "USER" + BackupTypeSystem BackupType = "SYSTEM" + BackupTypeAwsBackup BackupType = "AWS_BACKUP" ) // Values returns all known values for BackupType. Note that this can be expanded @@ -66,10 +66,10 @@ type BackupTypeFilter string // Enum values for BackupTypeFilter const ( - BackupTypeFilterUser BackupTypeFilter = "USER" - BackupTypeFilterSystem BackupTypeFilter = "SYSTEM" - BackupTypeFilterAws_backup BackupTypeFilter = "AWS_BACKUP" - BackupTypeFilterAll BackupTypeFilter = "ALL" + BackupTypeFilterUser BackupTypeFilter = "USER" + BackupTypeFilterSystem BackupTypeFilter = "SYSTEM" + BackupTypeFilterAwsBackup BackupTypeFilter = "AWS_BACKUP" + BackupTypeFilterAll BackupTypeFilter = "ALL" ) // Values returns all known values for BackupTypeFilter. Note that this can be @@ -88,8 +88,8 @@ type BillingMode string // Enum values for BillingMode const ( - BillingModeProvisioned BillingMode = "PROVISIONED" - BillingModePay_per_request BillingMode = "PAY_PER_REQUEST" + BillingModeProvisioned BillingMode = "PROVISIONED" + BillingModePayPerRequest BillingMode = "PAY_PER_REQUEST" ) // Values returns all known values for BillingMode. Note that this can be expanded @@ -106,19 +106,19 @@ type ComparisonOperator string // Enum values for ComparisonOperator const ( - ComparisonOperatorEq ComparisonOperator = "EQ" - ComparisonOperatorNe ComparisonOperator = "NE" - ComparisonOperatorIn ComparisonOperator = "IN" - ComparisonOperatorLe ComparisonOperator = "LE" - ComparisonOperatorLt ComparisonOperator = "LT" - ComparisonOperatorGe ComparisonOperator = "GE" - ComparisonOperatorGt ComparisonOperator = "GT" - ComparisonOperatorBetween ComparisonOperator = "BETWEEN" - ComparisonOperatorNot_null ComparisonOperator = "NOT_NULL" - ComparisonOperatorNull ComparisonOperator = "NULL" - ComparisonOperatorContains ComparisonOperator = "CONTAINS" - ComparisonOperatorNot_contains ComparisonOperator = "NOT_CONTAINS" - ComparisonOperatorBegins_with ComparisonOperator = "BEGINS_WITH" + ComparisonOperatorEq ComparisonOperator = "EQ" + ComparisonOperatorNe ComparisonOperator = "NE" + ComparisonOperatorIn ComparisonOperator = "IN" + ComparisonOperatorLe ComparisonOperator = "LE" + ComparisonOperatorLt ComparisonOperator = "LT" + ComparisonOperatorGe ComparisonOperator = "GE" + ComparisonOperatorGt ComparisonOperator = "GT" + ComparisonOperatorBetween ComparisonOperator = "BETWEEN" + ComparisonOperatorNotNull ComparisonOperator = "NOT_NULL" + ComparisonOperatorNull ComparisonOperator = "NULL" + ComparisonOperatorContains ComparisonOperator = "CONTAINS" + ComparisonOperatorNotContains ComparisonOperator = "NOT_CONTAINS" + ComparisonOperatorBeginsWith ComparisonOperator = "BEGINS_WITH" ) // Values returns all known values for ComparisonOperator. Note that this can be @@ -304,9 +304,9 @@ type ProjectionType string // Enum values for ProjectionType const ( - ProjectionTypeAll ProjectionType = "ALL" - ProjectionTypeKeys_only ProjectionType = "KEYS_ONLY" - ProjectionTypeInclude ProjectionType = "INCLUDE" + ProjectionTypeAll ProjectionType = "ALL" + ProjectionTypeKeysOnly ProjectionType = "KEYS_ONLY" + ProjectionTypeInclude ProjectionType = "INCLUDE" ) // Values returns all known values for ProjectionType. Note that this can be @@ -324,12 +324,12 @@ type ReplicaStatus string // Enum values for ReplicaStatus const ( - ReplicaStatusCreating ReplicaStatus = "CREATING" - ReplicaStatusCreation_failed ReplicaStatus = "CREATION_FAILED" - ReplicaStatusUpdating ReplicaStatus = "UPDATING" - ReplicaStatusDeleting ReplicaStatus = "DELETING" - ReplicaStatusActive ReplicaStatus = "ACTIVE" - ReplicaStatusRegion_disabled ReplicaStatus = "REGION_DISABLED" + ReplicaStatusCreating ReplicaStatus = "CREATING" + ReplicaStatusCreationFailed ReplicaStatus = "CREATION_FAILED" + ReplicaStatusUpdating ReplicaStatus = "UPDATING" + ReplicaStatusDeleting ReplicaStatus = "DELETING" + ReplicaStatusActive ReplicaStatus = "ACTIVE" + ReplicaStatusRegionDisabled ReplicaStatus = "REGION_DISABLED" ) // Values returns all known values for ReplicaStatus. Note that this can be @@ -388,11 +388,11 @@ type ReturnValue string // Enum values for ReturnValue const ( - ReturnValueNone ReturnValue = "NONE" - ReturnValueAll_old ReturnValue = "ALL_OLD" - ReturnValueUpdated_old ReturnValue = "UPDATED_OLD" - ReturnValueAll_new ReturnValue = "ALL_NEW" - ReturnValueUpdated_new ReturnValue = "UPDATED_NEW" + ReturnValueNone ReturnValue = "NONE" + ReturnValueAllOld ReturnValue = "ALL_OLD" + ReturnValueUpdatedOld ReturnValue = "UPDATED_OLD" + ReturnValueAllNew ReturnValue = "ALL_NEW" + ReturnValueUpdatedNew ReturnValue = "UPDATED_NEW" ) // Values returns all known values for ReturnValue. Note that this can be expanded @@ -412,8 +412,8 @@ type ReturnValuesOnConditionCheckFailure string // Enum values for ReturnValuesOnConditionCheckFailure const ( - ReturnValuesOnConditionCheckFailureAll_old ReturnValuesOnConditionCheckFailure = "ALL_OLD" - ReturnValuesOnConditionCheckFailureNone ReturnValuesOnConditionCheckFailure = "NONE" + ReturnValuesOnConditionCheckFailureAllOld ReturnValuesOnConditionCheckFailure = "ALL_OLD" + ReturnValuesOnConditionCheckFailureNone ReturnValuesOnConditionCheckFailure = "NONE" ) // Values returns all known values for ReturnValuesOnConditionCheckFailure. Note @@ -451,10 +451,10 @@ type Select string // Enum values for Select const ( - SelectAll_attributes Select = "ALL_ATTRIBUTES" - SelectAll_projected_attributes Select = "ALL_PROJECTED_ATTRIBUTES" - SelectSpecific_attributes Select = "SPECIFIC_ATTRIBUTES" - SelectCount Select = "COUNT" + SelectAllAttributes Select = "ALL_ATTRIBUTES" + SelectAllProjectedAttributes Select = "ALL_PROJECTED_ATTRIBUTES" + SelectSpecificAttributes Select = "SPECIFIC_ATTRIBUTES" + SelectCount Select = "COUNT" ) // Values returns all known values for Select. Note that this can be expanded in @@ -515,10 +515,10 @@ type StreamViewType string // Enum values for StreamViewType const ( - StreamViewTypeNew_image StreamViewType = "NEW_IMAGE" - StreamViewTypeOld_image StreamViewType = "OLD_IMAGE" - StreamViewTypeNew_and_old_images StreamViewType = "NEW_AND_OLD_IMAGES" - StreamViewTypeKeys_only StreamViewType = "KEYS_ONLY" + StreamViewTypeNewImage StreamViewType = "NEW_IMAGE" + StreamViewTypeOldImage StreamViewType = "OLD_IMAGE" + StreamViewTypeNewAndOldImages StreamViewType = "NEW_AND_OLD_IMAGES" + StreamViewTypeKeysOnly StreamViewType = "KEYS_ONLY" ) // Values returns all known values for StreamViewType. Note that this can be @@ -537,13 +537,13 @@ type TableStatus string // Enum values for TableStatus const ( - TableStatusCreating TableStatus = "CREATING" - TableStatusUpdating TableStatus = "UPDATING" - TableStatusDeleting TableStatus = "DELETING" - TableStatusActive TableStatus = "ACTIVE" - TableStatusInaccessible_encryption_credentials TableStatus = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" - TableStatusArchiving TableStatus = "ARCHIVING" - TableStatusArchived TableStatus = "ARCHIVED" + TableStatusCreating TableStatus = "CREATING" + TableStatusUpdating TableStatus = "UPDATING" + TableStatusDeleting TableStatus = "DELETING" + TableStatusActive TableStatus = "ACTIVE" + TableStatusInaccessibleEncryptionCredentials TableStatus = "INACCESSIBLE_ENCRYPTION_CREDENTIALS" + TableStatusArchiving TableStatus = "ARCHIVING" + TableStatusArchived TableStatus = "ARCHIVED" ) // Values returns all known values for TableStatus. Note that this can be expanded diff --git a/service/dynamodb/types/errors.go b/service/dynamodb/types/errors.go index e3d5cb0dd8e..f1e3a261789 100644 --- a/service/dynamodb/types/errors.go +++ b/service/dynamodb/types/errors.go @@ -447,154 +447,150 @@ func (e *TableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy. // The entire transaction request was canceled. DynamoDB cancels a // TransactWriteItems request under the following circumstances: // -// * A condition -// in one of the condition expressions is not met. +// * A condition in +// one of the condition expressions is not met. // -// * A table in the +// * A table in the // TransactWriteItems request is in a different account or region. // -// * More than -// one action in the TransactWriteItems operation targets the same item. +// * More than one +// action in the TransactWriteItems operation targets the same item. // -// * -// There is insufficient provisioned capacity for the transaction to be -// completed. +// * There is +// insufficient provisioned capacity for the transaction to be completed. // -// * An item size becomes too large (larger than 400 KB), or a -// local secondary index (LSI) becomes too large, or a similar validation error -// occurs because of changes made by the transaction. +// * An +// item size becomes too large (larger than 400 KB), or a local secondary index +// (LSI) becomes too large, or a similar validation error occurs because of changes +// made by the transaction. // -// * There is a user error, -// such as an invalid data format. +// * There is a user error, such as an invalid data +// format. // -// DynamoDB cancels a TransactGetItems request -// under the following circumstances: +// DynamoDB cancels a TransactGetItems request under the following +// circumstances: // -// * There is an ongoing TransactGetItems -// operation that conflicts with a concurrent PutItem, UpdateItem, DeleteItem or -// TransactWriteItems request. In this case the TransactGetItems operation fails -// with a TransactionCanceledException. +// * There is an ongoing TransactGetItems operation that conflicts +// with a concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a +// TransactionCanceledException. // -// * A table in the TransactGetItems -// request is in a different account or region. +// * A table in the TransactGetItems request is in a +// different account or region. // -// * There is insufficient -// provisioned capacity for the transaction to be completed. +// * There is insufficient provisioned capacity for +// the transaction to be completed. // -// * There is a user -// error, such as an invalid data format. +// * There is a user error, such as an invalid +// data format. // -// If using Java, DynamoDB lists the -// cancellation reasons on the CancellationReasons property. This property is not -// set for other languages. Transaction cancellation reasons are ordered in the -// order of requested items, if an item has no error it will have NONE code and -// Null message. Cancellation reason codes and possible error messages: +// If using Java, DynamoDB lists the cancellation reasons on the +// CancellationReasons property. This property is not set for other languages. +// Transaction cancellation reasons are ordered in the order of requested items, if +// an item has no error it will have NONE code and Null message. Cancellation +// reason codes and possible error messages: // -// * No -// Errors: +// * No Errors: // -// * Code: NONE +// * Code: NONE // -// * Message: null +// * +// Message: null // -// * Conditional Check -// Failed: +// * Conditional Check Failed: // -// * Code: ConditionalCheckFailed +// * Code: ConditionalCheckFailed // -// * Message: The -// conditional request failed. -// -// * Item Collection Size Limit Exceeded: +// * +// Message: The conditional request failed. // +// * Item Collection Size Limit +// Exceeded: // // * Code: ItemCollectionSizeLimitExceeded // -// * Message: Collection size +// * Message: Collection size // exceeded. // -// * Transaction Conflict: +// * Transaction Conflict: // -// * Code: TransactionConflict +// * Code: TransactionConflict // +// * Message: +// Transaction is ongoing for the item. // -// * Message: Transaction is ongoing for the item. -// -// * Provisioned Throughput -// Exceeded: +// * Provisioned Throughput Exceeded: // -// * Code: ProvisionedThroughputExceeded +// * +// Code: ProvisionedThroughputExceeded // -// * Messages: +// * Messages: // +// * The level of configured +// provisioned throughput for the table was exceeded. Consider increasing your +// provisioning level with the UpdateTable API. This Message is received when +// provisioned throughput is exceeded is on a provisioned DynamoDB table. // -// * The level of configured provisioned throughput for the table was exceeded. -// Consider increasing your provisioning level with the UpdateTable API. This -// Message is received when provisioned throughput is exceeded is on a provisioned -// DynamoDB table. -// -// * The level of configured provisioned throughput -// for one or more global secondary indexes of the table was exceeded. Consider -// increasing your provisioning level for the under-provisioned global secondary -// indexes with the UpdateTable API. This message is returned when provisioned -// throughput is exceeded is on a provisioned GSI. -// -// * Throttling Error: +// * The +// level of configured provisioned throughput for one or more global secondary +// indexes of the table was exceeded. Consider increasing your provisioning level +// for the under-provisioned global secondary indexes with the UpdateTable API. +// This message is returned when provisioned throughput is exceeded is on a +// provisioned GSI. // +// * Throttling Error: // // * Code: ThrottlingError // -// * Messages: +// * Messages: // -// * Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically scaling -// your table or index so please try again shortly. If exceptions persist, check if -// you have a hot key: +// * +// Throughput exceeds the current capacity of your table or index. DynamoDB is +// automatically scaling your table or index so please try again shortly. If +// exceptions persist, check if you have a hot key: // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. // This message is returned when writes get throttled on an On-Demand table as // DynamoDB is automatically scaling the table. // -// * Throughput exceeds -// the current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message is -// returned when when writes get throttled on an On-Demand GSI as DynamoDB is -// automatically scaling the GSI. +// * Throughput exceeds the current +// capacity for one or more global secondary indexes. DynamoDB is automatically +// scaling your index so please try again shortly. This message is returned when +// when writes get throttled on an On-Demand GSI as DynamoDB is automatically +// scaling the GSI. // -// * Validation Error: +// * Validation Error: // -// * Code: -// ValidationError +// * Code: ValidationError // -// * Messages: +// * Messages: // -// * One or more parameter values -// were invalid. +// * +// One or more parameter values were invalid. // -// * The update expression attempted to update the -// secondary index key beyond allowed size limits. +// * The update expression attempted to +// update the secondary index key beyond allowed size limits. // -// * The update +// * The update // expression attempted to update the secondary index key to unsupported type. // +// * +// An operand in the update expression has an incorrect data type. // -// * An operand in the update expression has an incorrect data type. -// -// * -// Item size to update has exceeded the maximum allowed size. +// * Item size to +// update has exceeded the maximum allowed size. // -// * Number -// overflow. Attempting to store a number with magnitude larger than supported -// range. +// * Number overflow. Attempting to +// store a number with magnitude larger than supported range. // -// * Type mismatch for attribute to update. +// * Type mismatch for +// attribute to update. // -// * -// Nesting Levels have exceeded supported limits. +// * Nesting Levels have exceeded supported limits. // -// * The document path -// provided in the update expression is invalid for update. +// * The +// document path provided in the update expression is invalid for update. // -// * The +// * The // provided expression refers to an attribute that does not exist in the item. type TransactionCanceledException struct { Message *string diff --git a/service/dynamodb/types/types.go b/service/dynamodb/types/types.go index dce9b0d25a2..795a32d4b42 100644 --- a/service/dynamodb/types/types.go +++ b/service/dynamodb/types/types.go @@ -18,8 +18,8 @@ type ArchivalSummary struct { // time format. ArchivalDateTime *time.Time - // The reason DynamoDB archived the table. Currently, the only possible value is: - // + // The reason DynamoDB archived the table. Currently, the only possible value + // is: // // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to the // table's AWS KMS key being inaccessible for more than seven days. An On-Demand @@ -37,13 +37,13 @@ type AttributeDefinition struct { // The data type for the attribute, where: // - // * S - the attribute is of type + // * S - the attribute is of type // String // - // * N - the attribute is of type Number + // * N - the attribute is of type Number // - // * B - the attribute is of - // type Binary + // * B - the attribute is of type + // Binary // // This member is required. AttributeType ScalarAttributeType @@ -111,63 +111,62 @@ type AttributeValueUpdate struct { // ADD. The behavior depends on whether the specified primary key already exists in // the table. If an item with the specified Key is found in the table: // - // * PUT - + // * PUT - // Adds the specified attribute to the item. If the attribute already exists, it is // replaced by the new value. // - // * DELETE - If no value is specified, the - // attribute and its value are removed from the item. The data type of the - // specified value must match the existing value's data type. If a set of values is - // specified, then those values are subtracted from the old set. For example, if - // the attribute value was the set [a,b,c] and the DELETE action specified [a,c], - // then the final attribute value would be [b]. Specifying an empty set is an - // error. - // - // * ADD - If the attribute does not already exist, then the attribute - // and its values are added to the item. If the attribute does exist, then the - // behavior of ADD depends on the data type of the attribute: - // - // * If the - // existing attribute is a number, and if Value is also a number, then the Value is - // mathematically added to the existing attribute. If Value is a negative number, - // then it is subtracted from the existing attribute. If you use ADD to increment - // or decrement a number value for an item that doesn't exist before the update, - // DynamoDB uses 0 as the initial value. In addition, if you use ADD to update an - // existing item, and intend to increment or decrement an attribute value which - // does not yet exist, DynamoDB uses 0 as the initial value. For example, suppose - // that the item you want to update does not yet have an attribute named itemcount, - // but you decide to ADD the number 3 to this attribute anyway, even though it - // currently does not exist. DynamoDB will create the itemcount attribute, set its - // initial value to 0, and finally add 3 to it. The result will be a new itemcount - // attribute in the item, with a value of 3. - // - // * If the existing data type - // is a set, and if the Value is also a set, then the Value is added to the - // existing set. (This is a set operation, not mathematical addition.) For example, - // if the attribute value was the set [1,2], and the ADD action specified [3], then - // the final attribute value would be [1,2,3]. An error occurs if an Add action is - // specified for a set attribute and the attribute type specified does not match - // the existing set type. Both sets must have the same primitive data type. For - // example, if the existing data type is a set of strings, the Value must also be a - // set of strings. The same holds true for number sets and binary sets. - // - // This - // action is only valid for an existing attribute whose data type is number or is a - // set. Do not use ADD for any other data types. - // - // If no item with the specified Key - // is found: - // - // * PUT - DynamoDB creates a new item with the specified primary - // key, and then adds the attribute. - // - // * DELETE - Nothing happens; there is no - // attribute to delete. - // - // * ADD - DynamoDB creates an item with the supplied - // primary key and number (or set of numbers) for the attribute value. The only - // data types allowed are number and number set; no other data types can be - // specified. + // * DELETE - If no value is specified, the attribute + // and its value are removed from the item. The data type of the specified value + // must match the existing value's data type. If a set of values is specified, then + // those values are subtracted from the old set. For example, if the attribute + // value was the set [a,b,c] and the DELETE action specified [a,c], then the final + // attribute value would be [b]. Specifying an empty set is an error. + // + // * ADD - If + // the attribute does not already exist, then the attribute and its values are + // added to the item. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: + // + // * If the existing attribute is a number, and + // if Value is also a number, then the Value is mathematically added to the + // existing attribute. If Value is a negative number, then it is subtracted from + // the existing attribute. If you use ADD to increment or decrement a number value + // for an item that doesn't exist before the update, DynamoDB uses 0 as the initial + // value. In addition, if you use ADD to update an existing item, and intend to + // increment or decrement an attribute value which does not yet exist, DynamoDB + // uses 0 as the initial value. For example, suppose that the item you want to + // update does not yet have an attribute named itemcount, but you decide to ADD the + // number 3 to this attribute anyway, even though it currently does not exist. + // DynamoDB will create the itemcount attribute, set its initial value to 0, and + // finally add 3 to it. The result will be a new itemcount attribute in the item, + // with a value of 3. + // + // * If the existing data type is a set, and if the Value is + // also a set, then the Value is added to the existing set. (This is a set + // operation, not mathematical addition.) For example, if the attribute value was + // the set [1,2], and the ADD action specified [3], then the final attribute value + // would be [1,2,3]. An error occurs if an Add action is specified for a set + // attribute and the attribute type specified does not match the existing set type. + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The same + // holds true for number sets and binary sets. + // + // This action is only valid for an + // existing attribute whose data type is number or is a set. Do not use ADD for any + // other data types. + // + // If no item with the specified Key is found: + // + // * PUT - DynamoDB + // creates a new item with the specified primary key, and then adds the + // attribute. + // + // * DELETE - Nothing happens; there is no attribute to delete. + // + // * ADD + // - DynamoDB creates an item with the supplied primary key and number (or set of + // numbers) for the attribute value. The only data types allowed are number and + // number set; no other data types can be specified. Action AttributeAction // Represents the data for an attribute. Each attribute value is described as a @@ -349,16 +348,16 @@ type BackupDetails struct { // BackupType: // - // * USER - You create and manage these using the on-demand backup + // * USER - You create and manage these using the on-demand backup // feature. // - // * SYSTEM - If you delete a table with point-in-time recovery - // enabled, a SYSTEM backup is automatically created and is retained for 35 days - // (at no additional cost). System backups allow you to restore the deleted table - // to the state it was in just before the point of deletion. + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at no + // additional cost). System backups allow you to restore the deleted table to the + // state it was in just before the point of deletion. // - // * AWS_BACKUP - - // On-demand backup created by you from AWS Backup service. + // * AWS_BACKUP - On-demand + // backup created by you from AWS Backup service. // // This member is required. BackupType BackupType @@ -395,16 +394,16 @@ type BackupSummary struct { // BackupType: // - // * USER - You create and manage these using the on-demand backup + // * USER - You create and manage these using the on-demand backup // feature. // - // * SYSTEM - If you delete a table with point-in-time recovery - // enabled, a SYSTEM backup is automatically created and is retained for 35 days - // (at no additional cost). System backups allow you to restore the deleted table - // to the state it was in just before the point of deletion. + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at no + // additional cost). System backups allow you to restore the deleted table to the + // state it was in just before the point of deletion. // - // * AWS_BACKUP - - // On-demand backup created by you from AWS Backup service. + // * AWS_BACKUP - On-demand + // backup created by you from AWS Backup service. BackupType BackupType // ARN associated with the table. @@ -423,13 +422,12 @@ type BillingModeSummary struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. // - // * PROVISIONED - Sets the + // * PROVISIONED - Sets the // read/write capacity mode to PROVISIONED. We recommend using PROVISIONED for // predictable workloads. // - // * PAY_PER_REQUEST - Sets the read/write capacity - // mode to PAY_PER_REQUEST. We recommend using PAY_PER_REQUEST for unpredictable - // workloads. + // * PAY_PER_REQUEST - Sets the read/write capacity mode to + // PAY_PER_REQUEST. We recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode BillingMode // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity @@ -470,14 +468,14 @@ type Capacity struct { // Represents the selection criteria for a Query or Scan operation: // -// * For a -// Query operation, Condition is used for specifying the KeyConditions to use when +// * For a Query +// operation, Condition is used for specifying the KeyConditions to use when // querying a table or an index. For KeyConditions, only the following comparison // operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEENCondition // is also used in a QueryFilter, which evaluates the query results and returns // only the desired values. // -// * For a Scan operation, Condition is used in a +// * For a Scan operation, Condition is used in a // ScanFilter, which evaluates the scan results and returns only the desired // values. type Condition struct { @@ -487,76 +485,76 @@ type Condition struct { // GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN // The following are descriptions of each comparison operator. // - // * EQ : Equal. - // EQ is supported for all data types, including lists and maps. AttributeValueList - // can contain only one AttributeValue element of type String, Number, Binary, - // String Set, Number Set, or Binary Set. If an item contains an AttributeValue - // element of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does - // not equal {"NS":["6", "2", "1"]}. - // - // * NE : Not equal. NE is supported for all - // data types, including lists and maps. AttributeValueList can contain only one - // AttributeValue of type String, Number, Binary, String Set, Number Set, or Binary - // Set. If an item contains an AttributeValue of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. - // + // * EQ : Equal. EQ is + // supported for all data types, including lists and maps. AttributeValueList can + // contain only one AttributeValue element of type String, Number, Binary, String + // Set, Number Set, or Binary Set. If an item contains an AttributeValue element of + // a different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not equal + // {"NS":["6", "2", "1"]}. // - // * LE : Less than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If an - // item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]}. - // - // * LT : Less than. AttributeValueList can contain only one - // AttributeValue of type String, Number, or Binary (not a set type). If an item - // contains an AttributeValue element of a different type than the one provided in - // the request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]}. - // - // * GE - // : Greater than or equal. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in the + // * NE : Not equal. NE is supported for all data types, + // including lists and maps. AttributeValueList can contain only one AttributeValue + // of type String, Number, Binary, String Set, Number Set, or Binary Set. If an + // item contains an AttributeValue of a different type than the one provided in the // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]}. + // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // * GT - // : Greater than. AttributeValueList can contain only one AttributeValue element - // of type String, Number, or Binary (not a set type). If an item contains an + // * LE : Less + // than or equal. AttributeValueList can contain only one AttributeValue element of + // type String, Number, or Binary (not a set type). If an item contains an // AttributeValue element of a different type than the one provided in the request, // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, // {"N":"6"} does not compare to {"NS":["6", "2", "1"]}. // - // * NOT_NULL : The - // attribute exists. NOT_NULL is supported for all data types, including lists and - // maps. This operator tests for the existence of an attribute, not its data type. - // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, - // the result is a Boolean true. This result is because the attribute "a" exists; - // its data type is not relevant to the NOT_NULL comparison operator. - // - // * NULL : - // The attribute does not exist. NULL is supported for all data types, including - // lists and maps. This operator tests for the nonexistence of an attribute, not - // its data type. If the data type of attribute "a" is null, and you evaluate it - // using NULL, the result is a Boolean false. This is because the attribute "a" - // exists; its data type is not relevant to the NULL comparison operator. - // - // * - // CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList can - // contain only one AttributeValue element of type String, Number, or Binary (not a - // set type). If the target attribute of the comparison is of type String, then the - // operator checks for a substring match. If the target attribute of the comparison - // is of type Binary, then the operator looks for a subsequence of the target that + // * LT : Less than. + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]}. + // + // * GE : Greater than or equal. + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or Binary + // (not a set type). If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is + // supported for all data types, including lists and maps. This operator tests for + // the existence of an attribute, not its data type. If the data type of attribute + // "a" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not relevant + // to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. + // NULL is supported for all data types, including lists and maps. This operator + // tests for the nonexistence of an attribute, not its data type. If the data type + // of attribute "a" is null, and you evaluate it using NULL, the result is a + // Boolean false. This is because the attribute "a" exists; its data type is not + // relevant to the NULL comparison operator. + // + // * CONTAINS : Checks for a + // subsequence, or value in a set. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). If + // the target attribute of the comparison is of type String, then the operator + // checks for a substring match. If the target attribute of the comparison is of + // type Binary, then the operator looks for a subsequence of the target that // matches the input. If the target attribute of the comparison is a set ("SS", // "NS", or "BS"), then the operator evaluates to true if it finds an exact match // with any member of the set. CONTAINS is supported for lists: When evaluating "a // CONTAINS b", "a" can be a list; however, "b" cannot be a set, a map, or a // list. // - // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a // value in a set. AttributeValueList can contain only one AttributeValue element // of type String, Number, or Binary (not a set type). If the target attribute of // the comparison is a String, then the operator checks for the absence of a @@ -568,19 +566,19 @@ type Condition struct { // CONTAINS b", "a" can be a list; however, "b" cannot be a set, a map, or a // list. // - // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain - // only one AttributeValue of type String or Binary (not a Number or a set type). - // The target attribute of the comparison must be of type String or Binary (not a + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). The + // target attribute of the comparison must be of type String or Binary (not a // Number or a set type). // - // * IN : Checks for matching elements in a list. + // * IN : Checks for matching elements in a list. // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary. These attributes are compared against an existing // attribute of an item. If any elements of the input are equal to the item // attribute, the expression evaluates to true. // - // * BETWEEN : Greater than or - // equal to the first value, and less than or equal to the second value. + // * BETWEEN : Greater than or equal + // to the first value, and less than or equal to the second value. // AttributeValueList must contain two AttributeValue elements of the same type, // either String, Number, or Binary (not a set type). A target attribute matches if // the target value is greater than, or equal to, the first element and less than, @@ -845,14 +843,14 @@ type Endpoint struct { // comparison evaluates to true, the operation succeeds; if not, the operation // fails. You can use ExpectedAttributeValue in one of two different ways: // -// * -// Use AttributeValueList to specify one or more values to compare against an +// * Use +// AttributeValueList to specify one or more values to compare against an // attribute. Use ComparisonOperator to specify how you want to perform the // comparison. If the comparison evaluates to true, then the conditional operation // succeeds. // -// * Use Value to specify a value that DynamoDB will compare against -// an attribute. If the values match, then ExpectedAttributeValue evaluates to true +// * Use Value to specify a value that DynamoDB will compare against an +// attribute. If the values match, then ExpectedAttributeValue evaluates to true // and the conditional operation succeeds. Optionally, you can also set Exists to // false, indicating that you do not expect to find the attribute value in the // table. In this case, the conditional operation succeeds only if the comparison @@ -882,64 +880,64 @@ type ExpectedAttributeValue struct { // NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each // comparison operator. // - // * EQ : Equal. EQ is supported for all data types, + // * EQ : Equal. EQ is supported for all data types, // including lists and maps. AttributeValueList can contain only one AttributeValue // element of type String, Number, Binary, String Set, Number Set, or Binary Set. // If an item contains an AttributeValue element of a different type than the one // provided in the request, the value does not match. For example, {"S":"6"} does // not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // - // * NE : Not equal. NE is supported for all data types, including lists and maps. + // * + // NE : Not equal. NE is supported for all data types, including lists and maps. // AttributeValueList can contain only one AttributeValue of type String, Number, // Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue of a different type than the one provided in the request, the // value does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, // {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // * LE : Less than or equal. + // * LE : Less than or equal. // AttributeValueList can contain only one AttributeValue element of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value does // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does // not compare to {"NS":["6", "2", "1"]}. // - // * LT : Less than. AttributeValueList - // can contain only one AttributeValue of type String, Number, or Binary (not a set + // * LT : Less than. AttributeValueList can + // contain only one AttributeValue of type String, Number, or Binary (not a set // type). If an item contains an AttributeValue element of a different type than // the one provided in the request, the value does not match. For example, // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to // {"NS":["6", "2", "1"]}. // - // * GE : Greater than or equal. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or Binary - // (not a set type). If an item contains an AttributeValue element of a different - // type than the one provided in the request, the value does not match. For - // example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // * GE : Greater than or equal. AttributeValueList can + // contain only one AttributeValue element of type String, Number, or Binary (not a + // set type). If an item contains an AttributeValue element of a different type + // than the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to // {"NS":["6", "2", "1"]}. // - // * GT : Greater than. AttributeValueList can contain + // * GT : Greater than. AttributeValueList can contain // only one AttributeValue element of type String, Number, or Binary (not a set // type). If an item contains an AttributeValue element of a different type than // the one provided in the request, the value does not match. For example, // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to // {"NS":["6", "2", "1"]}. // - // * NOT_NULL : The attribute exists. NOT_NULL is + // * NOT_NULL : The attribute exists. NOT_NULL is // supported for all data types, including lists and maps. This operator tests for // the existence of an attribute, not its data type. If the data type of attribute // "a" is null, and you evaluate it using NOT_NULL, the result is a Boolean true. // This result is because the attribute "a" exists; its data type is not relevant // to the NOT_NULL comparison operator. // - // * NULL : The attribute does not exist. + // * NULL : The attribute does not exist. // NULL is supported for all data types, including lists and maps. This operator // tests for the nonexistence of an attribute, not its data type. If the data type // of attribute "a" is null, and you evaluate it using NULL, the result is a // Boolean false. This is because the attribute "a" exists; its data type is not // relevant to the NULL comparison operator. // - // * CONTAINS : Checks for a + // * CONTAINS : Checks for a // subsequence, or value in a set. AttributeValueList can contain only one // AttributeValue element of type String, Number, or Binary (not a set type). If // the target attribute of the comparison is of type String, then the operator @@ -951,7 +949,7 @@ type ExpectedAttributeValue struct { // CONTAINS b", "a" can be a list; however, "b" cannot be a set, a map, or a // list. // - // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a // value in a set. AttributeValueList can contain only one AttributeValue element // of type String, Number, or Binary (not a set type). If the target attribute of // the comparison is a String, then the operator checks for the absence of a @@ -963,19 +961,19 @@ type ExpectedAttributeValue struct { // CONTAINS b", "a" can be a list; however, "b" cannot be a set, a map, or a // list. // - // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain - // only one AttributeValue of type String or Binary (not a Number or a set type). - // The target attribute of the comparison must be of type String or Binary (not a + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). The + // target attribute of the comparison must be of type String or Binary (not a // Number or a set type). // - // * IN : Checks for matching elements in a list. + // * IN : Checks for matching elements in a list. // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary. These attributes are compared against an existing // attribute of an item. If any elements of the input are equal to the item // attribute, the expression evaluates to true. // - // * BETWEEN : Greater than or - // equal to the first value, and less than or equal to the second value. + // * BETWEEN : Greater than or equal + // to the first value, and less than or equal to the second value. // AttributeValueList must contain two AttributeValue elements of the same type, // either String, Number, or Binary (not a set type). A target attribute matches if // the target value is greater than, or equal to, the first element and less than, @@ -988,28 +986,28 @@ type ExpectedAttributeValue struct { // Causes DynamoDB to evaluate the value before attempting a conditional // operation: // - // * If Exists is true, DynamoDB will check to see if that - // attribute value already exists in the table. If it is found, then the operation - // succeeds. If it is not found, the operation fails with a - // ConditionCheckFailedException. + // * If Exists is true, DynamoDB will check to see if that attribute + // value already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionCheckFailedException. // - // * If Exists is false, DynamoDB assumes that - // the attribute value does not exist in the table. If in fact the value does not - // exist, then the assumption is valid and the operation succeeds. If the value is - // found, despite the assumption that it does not exist, the operation fails with a - // ConditionCheckFailedException. + // * + // If Exists is false, DynamoDB assumes that the attribute value does not exist in + // the table. If in fact the value does not exist, then the assumption is valid and + // the operation succeeds. If the value is found, despite the assumption that it + // does not exist, the operation fails with a ConditionCheckFailedException. // - // The default setting for Exists is true. If you - // supply a Value all by itself, DynamoDB assumes the attribute exists: You don't - // have to set Exists to true, because it is implied. DynamoDB returns a - // ValidationException if: + // The + // default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. DynamoDB returns a ValidationException if: // - // * Exists is true but there is no Value to check. - // (You expect a value to exist, but don't specify what that value is.) + // * Exists is + // true but there is no Value to check. (You expect a value to exist, but don't + // specify what that value is.) // - // * - // Exists is false but you also provide a Value. (You cannot expect an attribute to - // have a value, while also expecting it not to exist.) + // * Exists is false but you also provide a Value. + // (You cannot expect an attribute to have a value, while also expecting it not to + // exist.) Exists *bool // Represents the data for the expected attribute. Each attribute value is @@ -1069,18 +1067,17 @@ type GlobalSecondaryIndex struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: // - // * HASH - partition key + // * HASH - partition key // - // * - // RANGE - sort key + // * RANGE - + // sort key // - // The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. // // This member is required. KeySchema []*KeySchemaElement @@ -1141,15 +1138,15 @@ type GlobalSecondaryIndexDescription struct { // The current state of the global secondary index: // - // * CREATING - The index is + // * CREATING - The index is // being created. // - // * UPDATING - The index is being updated. + // * UPDATING - The index is being updated. // - // * DELETING - - // The index is being deleted. + // * DELETING - The index + // is being deleted. // - // * ACTIVE - The index is ready for use. + // * ACTIVE - The index is ready for use. IndexStatus IndexStatus // The number of items in the specified index. DynamoDB updates this value @@ -1160,18 +1157,17 @@ type GlobalSecondaryIndexDescription struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: // - // * HASH - partition key + // * HASH - partition key // - // * - // RANGE - sort key + // * RANGE - + // sort key // - // The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement // Represents attributes that are copied (projected) from the table into the global @@ -1197,18 +1193,17 @@ type GlobalSecondaryIndexInfo struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: // - // * HASH - partition key + // * HASH - partition key // - // * - // RANGE - sort key + // * RANGE - + // sort key // - // The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement // Represents attributes that are copied (projected) from the table into the global @@ -1223,29 +1218,29 @@ type GlobalSecondaryIndexInfo struct { // Represents one of the following: // -// * A new global secondary index to be added -// to an existing table. +// * A new global secondary index to be added to +// an existing table. // -// * New provisioned throughput parameters for an -// existing global secondary index. +// * New provisioned throughput parameters for an existing +// global secondary index. // -// * An existing global secondary index to be -// removed from an existing table. +// * An existing global secondary index to be removed from +// an existing table. type GlobalSecondaryIndexUpdate struct { // The parameters required for creating a global secondary index on an existing // table: // - // * IndexName + // * IndexName // - // * KeySchema + // * KeySchema // - // * AttributeDefinitions + // * AttributeDefinitions // - // * - // Projection + // * Projection // - // * ProvisionedThroughput + // * + // ProvisionedThroughput Create *CreateGlobalSecondaryIndexAction // The name of an existing global secondary index to be removed. @@ -1280,16 +1275,15 @@ type GlobalTableDescription struct { // The current state of the global table: // - // * CREATING - The global table is - // being created. + // * CREATING - The global table is being + // created. // - // * UPDATING - The global table is being updated. + // * UPDATING - The global table is being updated. // - // * - // DELETING - The global table is being deleted. + // * DELETING - The + // global table is being deleted. // - // * ACTIVE - The global table - // is ready for use. + // * ACTIVE - The global table is ready for use. GlobalTableStatus GlobalTableStatus // The Regions where the global table has replicas. @@ -1368,36 +1362,36 @@ type KeysAndAttributes struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames: // - // * To - // access an attribute whose name conflicts with a DynamoDB reserved word. + // * To access an + // attribute whose name conflicts with a DynamoDB reserved word. // - // * - // To create a placeholder for repeating occurrences of an attribute name in an - // expression. + // * To create a + // placeholder for repeating occurrences of an attribute name in an expression. // - // * To prevent special characters in an attribute name from being - // misinterpreted in an expression. + // * + // To prevent special characters in an attribute name from being misinterpreted in + // an expression. // - // Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: + // Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: // - // * Percentile + // * Percentile // - // The name of this attribute conflicts with a reserved - // word, so it cannot be used directly in an expression. (For the complete list of - // reserved words, see Reserved Words + // The + // name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see + // Reserved Words // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // * {"#P":"Percentile"} + // * {"#P":"Percentile"} // - // You - // could then use this substitution in an expression, as in this example: + // You could + // then use this substitution in an expression, as in this example: // - // * #P - // = :val + // * #P = + // :val // // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. For more information on @@ -1435,18 +1429,17 @@ type KeySchemaElement struct { // The role that this key attribute will assume: // - // * HASH - partition key + // * HASH - partition key // - // * - // RANGE - sort key + // * RANGE - + // sort key // - // The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. // // This member is required. KeyType KeyType @@ -1464,18 +1457,17 @@ type LocalSecondaryIndex struct { // The complete key schema for the local secondary index, consisting of one or more // pairs of attribute names and key types: // - // * HASH - partition key + // * HASH - partition key // - // * RANGE - // - sort key + // * RANGE - sort + // key // - // The partition key of an item is also known as its hash attribute. - // The term "hash attribute" derives from DynamoDB's usage of an internal hash - // function to evenly distribute data items across partitions, based on their - // partition key values. The sort key of an item is also known as its range - // attribute. The term "range attribute" derives from the way DynamoDB stores items - // with the same partition key physically close together, in sorted order by the - // sort key value. + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. // // This member is required. KeySchema []*KeySchemaElement @@ -1510,18 +1502,17 @@ type LocalSecondaryIndexDescription struct { // The complete key schema for the local secondary index, consisting of one or more // pairs of attribute names and key types: // - // * HASH - partition key + // * HASH - partition key // - // * RANGE - // - sort key + // * RANGE - sort + // key // - // The partition key of an item is also known as its hash attribute. - // The term "hash attribute" derives from DynamoDB's usage of an internal hash - // function to evenly distribute data items across partitions, based on their - // partition key values. The sort key of an item is also known as its range - // attribute. The term "range attribute" derives from the way DynamoDB stores items - // with the same partition key physically close together, in sorted order by the - // sort key value. + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement // Represents attributes that are copied (projected) from the table into the global @@ -1540,18 +1531,17 @@ type LocalSecondaryIndexInfo struct { // The complete key schema for a local secondary index, which consists of one or // more pairs of attribute names and key types: // - // * HASH - partition key + // * HASH - partition key // - // * - // RANGE - sort key + // * RANGE - + // sort key // - // The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. KeySchema []*KeySchemaElement // Represents attributes that are copied (projected) from the table into the global @@ -1572,13 +1562,13 @@ type PointInTimeRecoveryDescription struct { // The current state of point in time recovery: // - // * ENABLING - Point in time + // * ENABLING - Point in time // recovery is being enabled. // - // * ENABLED - Point in time recovery is enabled. + // * ENABLED - Point in time recovery is enabled. // - // - // * DISABLED - Point in time recovery is disabled. + // * + // DISABLED - Point in time recovery is disabled. PointInTimeRecoveryStatus PointInTimeRecoveryStatus } @@ -1606,15 +1596,15 @@ type Projection struct { // The set of attributes that are projected into the index: // - // * KEYS_ONLY - Only - // the index and primary keys are projected into the index. + // * KEYS_ONLY - Only the + // index and primary keys are projected into the index. // - // * INCLUDE - In - // addition to the attributes described in KEYS_ONLY, the secondary index will - // include other non-key attributes that you specify. + // * INCLUDE - In addition to + // the attributes described in KEYS_ONLY, the secondary index will include other + // non-key attributes that you specify. // - // * ALL - All of the table - // attributes are projected into the index. + // * ALL - All of the table attributes are + // projected into the index. ProjectionType ProjectionType } @@ -1755,15 +1745,15 @@ type ReplicaAutoScalingDescription struct { // The current state of the replica: // - // * CREATING - The replica is being - // created. + // * CREATING - The replica is being created. // - // * UPDATING - The replica is being updated. + // * + // UPDATING - The replica is being updated. // - // * DELETING - The - // replica is being deleted. + // * DELETING - The replica is being + // deleted. // - // * ACTIVE - The replica is ready for use. + // * ACTIVE - The replica is ready for use. ReplicaStatus ReplicaStatus } @@ -1807,21 +1797,21 @@ type ReplicaDescription struct { // The current state of the replica: // - // * CREATING - The replica is being - // created. + // * CREATING - The replica is being created. // - // * UPDATING - The replica is being updated. + // * + // UPDATING - The replica is being updated. // - // * DELETING - The - // replica is being deleted. + // * DELETING - The replica is being + // deleted. // - // * ACTIVE - The replica is ready for use. + // * ACTIVE - The replica is ready for use. // - // * - // REGION_DISABLED - The replica is inaccessible because the AWS Region has been - // disabled. If the AWS Region remains inaccessible for more than 20 hours, - // DynamoDB will remove this replica from the replication group. The replica will - // not be deleted and replication will stop from and to this region. + // * REGION_DISABLED - The + // replica is inaccessible because the AWS Region has been disabled. If the AWS + // Region remains inaccessible for more than 20 hours, DynamoDB will remove this + // replica from the replication group. The replica will not be deleted and + // replication will stop from and to this region. ReplicaStatus ReplicaStatus // Detailed information about the replica status. @@ -1853,16 +1843,15 @@ type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { // The current state of the replica global secondary index: // - // * CREATING - The - // index is being created. + // * CREATING - The index + // is being created. // - // * UPDATING - The index is being updated. + // * UPDATING - The index is being updated. // - // * - // DELETING - The index is being deleted. + // * DELETING - The + // index is being deleted. // - // * ACTIVE - The index is ready for - // use. + // * ACTIVE - The index is ready for use. IndexStatus IndexStatus // Represents the auto scaling settings for a global table or global secondary @@ -1907,16 +1896,16 @@ type ReplicaGlobalSecondaryIndexSettingsDescription struct { // The current status of the global secondary index: // - // * CREATING - The global + // * CREATING - The global // secondary index is being created. // - // * UPDATING - The global secondary index - // is being updated. + // * UPDATING - The global secondary index is + // being updated. // - // * DELETING - The global secondary index is being - // deleted. + // * DELETING - The global secondary index is being deleted. // - // * ACTIVE - The global secondary index is ready for use. + // * + // ACTIVE - The global secondary index is ready for use. IndexStatus IndexStatus // Auto scaling settings for a global secondary index replica's read capacity @@ -1991,15 +1980,15 @@ type ReplicaSettingsDescription struct { // The current state of the Region: // - // * CREATING - The Region is being - // created. + // * CREATING - The Region is being created. // - // * UPDATING - The Region is being updated. + // * + // UPDATING - The Region is being updated. // - // * DELETING - The - // Region is being deleted. + // * DELETING - The Region is being + // deleted. // - // * ACTIVE - The Region is ready for use. + // * ACTIVE - The Region is ready for use. ReplicaStatus ReplicaStatus } @@ -2028,14 +2017,14 @@ type ReplicaSettingsUpdate struct { // Represents one of the following: // -// * A new replica to be added to an existing +// * A new replica to be added to an existing // regional table or global table. This request invokes the CreateTableReplica // action in the destination Region. // -// * New parameters for an existing replica. +// * New parameters for an existing replica. // This request invokes the UpdateTable action in the destination Region. // -// * An +// * An // existing replica to be deleted. The request invokes the DeleteTableReplica // action in the destination Region, deleting the replica and all if its items in // the destination Region. @@ -2053,13 +2042,13 @@ type ReplicationGroupUpdate struct { // Represents one of the following: // -// * A new replica to be added to an existing +// * A new replica to be added to an existing // global table. // -// * New parameters for an existing replica. +// * New parameters for an existing replica. // -// * An existing -// replica to be removed from an existing global table. +// * An existing replica +// to be removed from an existing global table. type ReplicaUpdate struct { // The parameters required for creating a replica on an existing global table. @@ -2120,13 +2109,12 @@ type SourceTableDetails struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. // - // * PROVISIONED - Sets the + // * PROVISIONED - Sets the // read/write capacity mode to PROVISIONED. We recommend using PROVISIONED for // predictable workloads. // - // * PAY_PER_REQUEST - Sets the read/write capacity - // mode to PAY_PER_REQUEST. We recommend using PAY_PER_REQUEST for unpredictable - // workloads. + // * PAY_PER_REQUEST - Sets the read/write capacity mode to + // PAY_PER_REQUEST. We recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode BillingMode // Number of items in the table. Note that this is an approximate value. @@ -2179,18 +2167,18 @@ type SSEDescription struct { // Server-side encryption type. The only supported value is: // - // * KMS - - // Server-side encryption that uses AWS Key Management Service. The key is stored - // in your account and is managed by AWS KMS (AWS KMS charges apply). + // * KMS - Server-side + // encryption that uses AWS Key Management Service. The key is stored in your + // account and is managed by AWS KMS (AWS KMS charges apply). SSEType SSEType // Represents the current state of server-side encryption. The only supported // values are: // - // * ENABLED - Server-side encryption is enabled. + // * ENABLED - Server-side encryption is enabled. // - // * UPDATING - // - Server-side encryption is being updated. + // * UPDATING - + // Server-side encryption is being updated. Status SSEStatus } @@ -2211,9 +2199,9 @@ type SSESpecification struct { // Server-side encryption type. The only supported value is: // - // * KMS - - // Server-side encryption that uses AWS Key Management Service. The key is stored - // in your account and is managed by AWS KMS (AWS KMS charges apply). + // * KMS - Server-side + // encryption that uses AWS Key Management Service. The key is stored in your + // account and is managed by AWS KMS (AWS KMS charges apply). SSEType SSEType } @@ -2230,18 +2218,17 @@ type StreamSpecification struct { // information is written to the stream for this table. Valid values for // StreamViewType are: // - // * KEYS_ONLY - Only the key attributes of the modified - // item are written to the stream. - // - // * NEW_IMAGE - The entire item, as it - // appears after it was modified, is written to the stream. + // * KEYS_ONLY - Only the key attributes of the modified item + // are written to the stream. // - // * OLD_IMAGE - The - // entire item, as it appeared before it was modified, is written to the stream. + // * NEW_IMAGE - The entire item, as it appears after + // it was modified, is written to the stream. // + // * OLD_IMAGE - The entire item, as it + // appeared before it was modified, is written to the stream. // - // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are - // written to the stream. + // * NEW_AND_OLD_IMAGES + // - Both the new and the old item images of the item are written to the stream. StreamViewType StreamViewType } @@ -2256,15 +2243,15 @@ type TableAutoScalingDescription struct { // The current state of the table: // - // * CREATING - The table is being created. + // * CREATING - The table is being created. // + // * + // UPDATING - The table is being updated. // - // * UPDATING - The table is being updated. - // - // * DELETING - The table is being + // * DELETING - The table is being // deleted. // - // * ACTIVE - The table is ready for use. + // * ACTIVE - The table is ready for use. TableStatus TableStatus } @@ -2278,10 +2265,10 @@ type TableDescription struct { // attribute in the table and index key schema. Each AttributeDefinition object in // this array is composed of: // - // * AttributeName - The name of the attribute. - // + // * AttributeName - The name of the attribute. // - // * AttributeType - The data type for the attribute. + // * + // AttributeType - The data type for the attribute. AttributeDefinitions []*AttributeDefinition // Contains the details for the read/write capacity mode. @@ -2294,7 +2281,7 @@ type TableDescription struct { // The global secondary indexes, if any, on the table. Each index is scoped to a // given partition key value. Each element is composed of: // - // * Backfilling - If + // * Backfilling - If // true, then the index is currently in the backfilling phase. Backfilling occurs // only when a new global secondary index is added to the table. It is the process // by which DynamoDB populates the new index with data from the table. (This @@ -2305,67 +2292,66 @@ type TableDescription struct { // Backfilling is false. (This attribute does not appear for indexes that were // created during a CreateTable operation.) // - // * IndexName - The name of the - // global secondary index. - // - // * IndexSizeBytes - The total size of the global - // secondary index, in bytes. DynamoDB updates this value approximately every six - // hours. Recent changes might not be reflected in this value. + // * IndexName - The name of the global + // secondary index. // - // * IndexStatus - - // The current status of the global secondary index: + // * IndexSizeBytes - The total size of the global secondary + // index, in bytes. DynamoDB updates this value approximately every six hours. + // Recent changes might not be reflected in this value. // - // * CREATING - The - // index is being created. + // * IndexStatus - The + // current status of the global secondary index: // - // * UPDATING - The index is being updated. + // * CREATING - The index is being + // created. // + // * UPDATING - The index is being updated. // - // * DELETING - The index is being deleted. + // * DELETING - The index is + // being deleted. // - // * ACTIVE - The index is ready - // for use. + // * ACTIVE - The index is ready for use. // - // * ItemCount - The number of items in the global secondary index. - // DynamoDB updates this value approximately every six hours. Recent changes might - // not be reflected in this value. + // * ItemCount - The number + // of items in the global secondary index. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. // - // * KeySchema - Specifies the complete index - // key schema. The attribute names in the key schema must be between 1 and 255 - // characters (inclusive). The key schema must begin with the same partition key as - // the table. + // * KeySchema - Specifies the complete index key schema. The attribute + // names in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. // - // * Projection - Specifies attributes that are copied (projected) - // from the table into the index. These are in addition to the primary key - // attributes and index key attributes, which are automatically projected. Each - // attribute specification is composed of: + // * Projection - + // Specifies attributes that are copied (projected) from the table into the index. + // These are in addition to the primary key attributes and index key attributes, + // which are automatically projected. Each attribute specification is composed + // of: // - // * ProjectionType - One of the - // following: + // * ProjectionType - One of the following: // - // * KEYS_ONLY - Only the index and primary keys are - // projected into the index. + // * KEYS_ONLY - Only the index and + // primary keys are projected into the index. // - // * INCLUDE - In addition to the attributes - // described in KEYS_ONLY, the secondary index will include other non-key - // attributes that you specify. + // * INCLUDE - In addition to the + // attributes described in KEYS_ONLY, the secondary index will include other + // non-key attributes that you specify. // - // * ALL - All of the table attributes - // are projected into the index. + // * ALL - All of the table attributes are + // projected into the index. // - // * NonKeyAttributes - A list of one or - // more non-key attribute names that are projected into the secondary index. The - // total count of attributes provided in NonKeyAttributes, summed across all of the - // secondary indexes, must not exceed 20. If you project the same attribute into - // two different indexes, this counts as two distinct attributes when determining - // the total. + // * NonKeyAttributes - A list of one or more non-key + // attribute names that are projected into the secondary index. The total count of + // attributes provided in NonKeyAttributes, summed across all of the secondary + // indexes, must not exceed 20. If you project the same attribute into two + // different indexes, this counts as two distinct attributes when determining the + // total. // - // * ProvisionedThroughput - The provisioned throughput settings - // for the global secondary index, consisting of read and write capacity units, - // along with data about increases and decreases. + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along with + // data about increases and decreases. // - // If the table is in the DELETING - // state, no information about indexes will be returned. + // If the table is in the DELETING state, no + // information about indexes will be returned. GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription // Represents the version of global tables @@ -2380,26 +2366,26 @@ type TableDescription struct { // The primary key structure for the table. Each KeySchemaElement consists of: // + // * + // AttributeName - The name of the attribute. // - // * AttributeName - The name of the attribute. - // - // * KeyType - The role of the + // * KeyType - The role of the // attribute: // - // * HASH - partition key + // * HASH - partition key // - // * RANGE - sort key + // * RANGE - sort key // - // The - // partition key of an item is also known as its hash attribute. The term "hash - // attribute" derives from DynamoDB's usage of an internal hash function to evenly - // distribute data items across partitions, based on their partition key values. - // The sort key of an item is also known as its range attribute. The term "range - // attribute" derives from the way DynamoDB stores items with the same partition - // key physically close together, in sorted order by the sort key value. + // The partition key of an + // item is also known as its hash attribute. The term "hash attribute" derives from + // DynamoDB's usage of an internal hash function to evenly distribute data items + // across partitions, based on their partition key values. The sort key of an item + // is also known as its range attribute. The term "range attribute" derives from + // the way DynamoDB stores items with the same partition key physically close + // together, in sorted order by the sort key value. // - // For more - // information about primary keys, see Primary Key + // For more information about + // primary keys, see Primary Key // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) // in the Amazon DynamoDB Developer Guide. KeySchema []*KeySchemaElement @@ -2413,12 +2399,12 @@ type TableDescription struct { // from another table might have the same timestamp. However, the combination of // the following three elements is guaranteed to be unique: // - // * AWS customer - // ID + // * AWS customer ID // - // * Table name + // * + // Table name // - // * StreamLabel + // * StreamLabel LatestStreamLabel *string // Represents one or more local secondary indexes on the table. Each index is @@ -2427,43 +2413,42 @@ type TableDescription struct { // within a given item collection cannot exceed 10 GB. Each element is composed // of: // - // * IndexName - The name of the local secondary index. + // * IndexName - The name of the local secondary index. // - // * KeySchema - + // * KeySchema - // Specifies the complete index key schema. The attribute names in the key schema // must be between 1 and 255 characters (inclusive). The key schema must begin with // the same partition key as the table. // - // * Projection - Specifies attributes - // that are copied (projected) from the table into the index. These are in addition - // to the primary key attributes and index key attributes, which are automatically + // * Projection - Specifies attributes that + // are copied (projected) from the table into the index. These are in addition to + // the primary key attributes and index key attributes, which are automatically // projected. Each attribute specification is composed of: // - // * - // ProjectionType - One of the following: + // * ProjectionType - One + // of the following: // - // * KEYS_ONLY - Only the index - // and primary keys are projected into the index. + // * KEYS_ONLY - Only the index and primary keys are projected + // into the index. // - // * INCLUDE - Only the - // specified table attributes are projected into the index. The list of projected - // attributes is in NonKeyAttributes. + // * INCLUDE - Only the specified table attributes are projected + // into the index. The list of projected attributes is in NonKeyAttributes. // - // * ALL - All of the table - // attributes are projected into the index. + // * ALL + // - All of the table attributes are projected into the index. // - // * NonKeyAttributes - A list of - // one or more non-key attribute names that are projected into the secondary index. - // The total count of attributes provided in NonKeyAttributes, summed across all of - // the secondary indexes, must not exceed 20. If you project the same attribute - // into two different indexes, this counts as two distinct attributes when - // determining the total. + // * NonKeyAttributes + // - A list of one or more non-key attribute names that are projected into the + // secondary index. The total count of attributes provided in NonKeyAttributes, + // summed across all of the secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. // - // * IndexSizeBytes - Represents the total size of the - // index, in bytes. DynamoDB updates this value approximately every six hours. - // Recent changes might not be reflected in this value. + // * IndexSizeBytes - Represents the total + // size of the index, in bytes. DynamoDB updates this value approximately every six + // hours. Recent changes might not be reflected in this value. // - // * ItemCount - + // * ItemCount - // Represents the number of items in the index. DynamoDB updates this value // approximately every six hours. Recent changes might not be reflected in this // value. @@ -2504,26 +2489,26 @@ type TableDescription struct { // The current state of the table: // - // * CREATING - The table is being created. - // + // * CREATING - The table is being created. // - // * UPDATING - The table is being updated. + // * + // UPDATING - The table is being updated. // - // * DELETING - The table is being + // * DELETING - The table is being // deleted. // - // * ACTIVE - The table is ready for use. + // * ACTIVE - The table is ready for use. // - // * + // * // INACCESSIBLE_ENCRYPTION_CREDENTIALS - The AWS KMS key used to encrypt the table // in inaccessible. Table operations may fail due to failure to use the AWS KMS // key. DynamoDB will initiate the table archival process when a table's AWS KMS // key remains inaccessible for more than seven days. // - // * ARCHIVING - The table - // is being archived. Operations are not allowed until archival is complete. + // * ARCHIVING - The table is + // being archived. Operations are not allowed until archival is complete. // - // * + // * // ARCHIVED - The table has been archived. See the ArchivalReason for more // information. TableStatus TableStatus diff --git a/service/dynamodbstreams/api_op_GetShardIterator.go b/service/dynamodbstreams/api_op_GetShardIterator.go index d703ba6fbec..958bc0c9eb7 100644 --- a/service/dynamodbstreams/api_op_GetShardIterator.go +++ b/service/dynamodbstreams/api_op_GetShardIterator.go @@ -41,21 +41,21 @@ type GetShardIteratorInput struct { // Determines how the shard iterator is used to start reading stream records from // the shard: // - // * AT_SEQUENCE_NUMBER - Start reading exactly from the position + // * AT_SEQUENCE_NUMBER - Start reading exactly from the position // denoted by a specific sequence number. // - // * AFTER_SEQUENCE_NUMBER - Start - // reading right after the position denoted by a specific sequence number. + // * AFTER_SEQUENCE_NUMBER - Start reading + // right after the position denoted by a specific sequence number. // - // * - // TRIM_HORIZON - Start reading at the last (untrimmed) stream record, which is the - // oldest record in the shard. In DynamoDB Streams, there is a 24 hour limit on - // data retention. Stream records whose age exceeds this limit are subject to - // removal (trimming) from the stream. + // * TRIM_HORIZON + // - Start reading at the last (untrimmed) stream record, which is the oldest + // record in the shard. In DynamoDB Streams, there is a 24 hour limit on data + // retention. Stream records whose age exceeds this limit are subject to removal + // (trimming) from the stream. // - // * LATEST - Start reading just after the - // most recent stream record in the shard, so that you always read the most recent - // data in the shard. + // * LATEST - Start reading just after the most recent + // stream record in the shard, so that you always read the most recent data in the + // shard. // // This member is required. ShardIteratorType types.ShardIteratorType diff --git a/service/dynamodbstreams/types/enums.go b/service/dynamodbstreams/types/enums.go index fbd944ea873..02d35c61637 100644 --- a/service/dynamodbstreams/types/enums.go +++ b/service/dynamodbstreams/types/enums.go @@ -44,10 +44,10 @@ type ShardIteratorType string // Enum values for ShardIteratorType const ( - ShardIteratorTypeTrim_horizon ShardIteratorType = "TRIM_HORIZON" - ShardIteratorTypeLatest ShardIteratorType = "LATEST" - ShardIteratorTypeAt_sequence_number ShardIteratorType = "AT_SEQUENCE_NUMBER" - ShardIteratorTypeAfter_sequence_number ShardIteratorType = "AFTER_SEQUENCE_NUMBER" + ShardIteratorTypeTrimHorizon ShardIteratorType = "TRIM_HORIZON" + ShardIteratorTypeLatest ShardIteratorType = "LATEST" + ShardIteratorTypeAtSequenceNumber ShardIteratorType = "AT_SEQUENCE_NUMBER" + ShardIteratorTypeAfterSequenceNumber ShardIteratorType = "AFTER_SEQUENCE_NUMBER" ) // Values returns all known values for ShardIteratorType. Note that this can be @@ -88,10 +88,10 @@ type StreamViewType string // Enum values for StreamViewType const ( - StreamViewTypeNew_image StreamViewType = "NEW_IMAGE" - StreamViewTypeOld_image StreamViewType = "OLD_IMAGE" - StreamViewTypeNew_and_old_images StreamViewType = "NEW_AND_OLD_IMAGES" - StreamViewTypeKeys_only StreamViewType = "KEYS_ONLY" + StreamViewTypeNewImage StreamViewType = "NEW_IMAGE" + StreamViewTypeOldImage StreamViewType = "OLD_IMAGE" + StreamViewTypeNewAndOldImages StreamViewType = "NEW_AND_OLD_IMAGES" + StreamViewTypeKeysOnly StreamViewType = "KEYS_ONLY" ) // Values returns all known values for StreamViewType. Note that this can be diff --git a/service/dynamodbstreams/types/errors.go b/service/dynamodbstreams/types/errors.go index cad0c190a90..e8a78fda434 100644 --- a/service/dynamodbstreams/types/errors.go +++ b/service/dynamodbstreams/types/errors.go @@ -90,10 +90,10 @@ func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smit // whose age exceeds this limit are subject to removal (trimming) from the stream. // You might receive a TrimmedDataAccessException if: // -// * You request a shard +// * You request a shard // iterator with a sequence number older than the trim point (24 hours). // -// * You +// * You // obtain a shard iterator, but before you use the iterator in a GetRecords // request, a stream record in the shard exceeds the 24 hour period and is trimmed. // This causes the iterator to access a record that no longer exists. diff --git a/service/dynamodbstreams/types/types.go b/service/dynamodbstreams/types/types.go index 5e9e7075b28..482b5863f74 100644 --- a/service/dynamodbstreams/types/types.go +++ b/service/dynamodbstreams/types/types.go @@ -84,18 +84,17 @@ type KeySchemaElement struct { // The role that this key attribute will assume: // - // * HASH - partition key + // * HASH - partition key // - // * - // RANGE - sort key + // * RANGE - + // sort key // - // The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. + // The partition key of an item is also known as its hash attribute. The + // term "hash attribute" derives from DynamoDB's usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition key + // values. The sort key of an item is also known as its range attribute. The term + // "range attribute" derives from the way DynamoDB stores items with the same + // partition key physically close together, in sorted order by the sort key value. // // This member is required. KeyType KeyType @@ -117,14 +116,14 @@ type Record struct { // The type of data modification that was performed on the DynamoDB table: // - // * + // * // INSERT - a new item was added to the table. // - // * MODIFY - one or more of an + // * MODIFY - one or more of an // existing item's attributes were modified. // - // * REMOVE - the item was deleted - // from the table + // * REMOVE - the item was deleted from + // the table EventName OperationType // The AWS service from which the stream record originated. For DynamoDB Streams, @@ -141,9 +140,9 @@ type Record struct { // Items that are deleted by the Time to Live process after expiration have the // following fields: // - // * Records[].userIdentity.type "Service" + // * Records[].userIdentity.type "Service" // - // * + // * // Records[].userIdentity.principalId "dynamodb.amazonaws.com" UserIdentity *Identity } @@ -185,12 +184,12 @@ type Stream struct { // from another table might have the same timestamp. However, the combination of // the following three elements is guaranteed to be unique: // - // * the AWS customer + // * the AWS customer // ID. // - // * the table name + // * the table name // - // * the StreamLabel + // * the StreamLabel StreamLabel *string // The DynamoDB table with which the stream is associated. @@ -226,42 +225,42 @@ type StreamDescription struct { // from another table might have the same timestamp. However, the combination of // the following three elements is guaranteed to be unique: // - // * the AWS customer + // * the AWS customer // ID. // - // * the table name + // * the table name // - // * the StreamLabel + // * the StreamLabel StreamLabel *string // Indicates the current status of the stream: // - // * ENABLING - Streams is - // currently being enabled on the DynamoDB table. + // * ENABLING - Streams is currently + // being enabled on the DynamoDB table. // - // * ENABLED - the stream is - // enabled. + // * ENABLED - the stream is enabled. // - // * DISABLING - Streams is currently being disabled on the DynamoDB - // table. + // * + // DISABLING - Streams is currently being disabled on the DynamoDB table. // - // * DISABLED - the stream is disabled. + // * + // DISABLED - the stream is disabled. StreamStatus StreamStatus // Indicates the format of the records within this stream: // - // * KEYS_ONLY - only - // the key attributes of items that were modified in the DynamoDB table. + // * KEYS_ONLY - only the + // key attributes of items that were modified in the DynamoDB table. // - // * - // NEW_IMAGE - entire items from the table, as they appeared after they were - // modified. + // * NEW_IMAGE - + // entire items from the table, as they appeared after they were modified. // - // * OLD_IMAGE - entire items from the table, as they appeared - // before they were modified. + // * + // OLD_IMAGE - entire items from the table, as they appeared before they were + // modified. // - // * NEW_AND_OLD_IMAGES - both the new and the old - // images of the items from the table. + // * NEW_AND_OLD_IMAGES - both the new and the old images of the items + // from the table. StreamViewType StreamViewType // The DynamoDB table with which the stream is associated. @@ -294,16 +293,15 @@ type StreamRecord struct { // The type of data from the modified DynamoDB item that was captured in this // stream record: // - // * KEYS_ONLY - only the key attributes of the modified - // item. + // * KEYS_ONLY - only the key attributes of the modified item. // - // * NEW_IMAGE - the entire item, as it appeared after it was - // modified. + // * + // NEW_IMAGE - the entire item, as it appeared after it was modified. // - // * OLD_IMAGE - the entire item, as it appeared before it was - // modified. + // * OLD_IMAGE + // - the entire item, as it appeared before it was modified. // - // * NEW_AND_OLD_IMAGES - both the new and the old item images of - // the item. + // * NEW_AND_OLD_IMAGES + // - both the new and the old item images of the item. StreamViewType StreamViewType } diff --git a/service/ebs/api_op_StartSnapshot.go b/service/ebs/api_op_StartSnapshot.go index 1f85d8638bc..3b1cedc15ea 100644 --- a/service/ebs/api_op_StartSnapshot.go +++ b/service/ebs/api_op_StartSnapshot.go @@ -90,13 +90,13 @@ type StartSnapshotInput struct { // The amount of time (in minutes) after which the snapshot is automatically // cancelled if: // - // * No blocks are written to the snapshot. + // * No blocks are written to the snapshot. // - // * The snapshot - // is not completed after writing the last block of data. + // * The snapshot is not + // completed after writing the last block of data. // - // If no value is - // specified, the timeout defaults to 60 minutes. + // If no value is specified, the + // timeout defaults to 60 minutes. Timeout *int32 } diff --git a/service/ebs/types/enums.go b/service/ebs/types/enums.go index 355b46d4fc5..1efd35395ed 100644 --- a/service/ebs/types/enums.go +++ b/service/ebs/types/enums.go @@ -6,8 +6,8 @@ type AccessDeniedExceptionReason string // Enum values for AccessDeniedExceptionReason const ( - AccessDeniedExceptionReasonUnauthorized_account AccessDeniedExceptionReason = "UNAUTHORIZED_ACCOUNT" - AccessDeniedExceptionReasonDependency_access_denied AccessDeniedExceptionReason = "DEPENDENCY_ACCESS_DENIED" + AccessDeniedExceptionReasonUnauthorizedAccount AccessDeniedExceptionReason = "UNAUTHORIZED_ACCOUNT" + AccessDeniedExceptionReasonDependencyAccessDenied AccessDeniedExceptionReason = "DEPENDENCY_ACCESS_DENIED" ) // Values returns all known values for AccessDeniedExceptionReason. Note that this @@ -24,7 +24,7 @@ type ChecksumAggregationMethod string // Enum values for ChecksumAggregationMethod const ( - ChecksumAggregationMethodChecksum_aggregation_linear ChecksumAggregationMethod = "LINEAR" + ChecksumAggregationMethodChecksumAggregationLinear ChecksumAggregationMethod = "LINEAR" ) // Values returns all known values for ChecksumAggregationMethod. Note that this @@ -40,7 +40,7 @@ type ChecksumAlgorithm string // Enum values for ChecksumAlgorithm const ( - ChecksumAlgorithmChecksum_algorithm_sha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" ) // Values returns all known values for ChecksumAlgorithm. Note that this can be @@ -56,8 +56,8 @@ type RequestThrottledExceptionReason string // Enum values for RequestThrottledExceptionReason const ( - RequestThrottledExceptionReasonAccount_throttled RequestThrottledExceptionReason = "ACCOUNT_THROTTLED" - RequestThrottledExceptionReasonDependency_request_throttled RequestThrottledExceptionReason = "DEPENDENCY_REQUEST_THROTTLED" + RequestThrottledExceptionReasonAccountThrottled RequestThrottledExceptionReason = "ACCOUNT_THROTTLED" + RequestThrottledExceptionReasonDependencyRequestThrottled RequestThrottledExceptionReason = "DEPENDENCY_REQUEST_THROTTLED" ) // Values returns all known values for RequestThrottledExceptionReason. Note that @@ -75,8 +75,8 @@ type ResourceNotFoundExceptionReason string // Enum values for ResourceNotFoundExceptionReason const ( - ResourceNotFoundExceptionReasonSnapshot_not_found ResourceNotFoundExceptionReason = "SNAPSHOT_NOT_FOUND" - ResourceNotFoundExceptionReasonDependency_resource_not_found ResourceNotFoundExceptionReason = "DEPENDENCY_RESOURCE_NOT_FOUND" + ResourceNotFoundExceptionReasonSnapshotNotFound ResourceNotFoundExceptionReason = "SNAPSHOT_NOT_FOUND" + ResourceNotFoundExceptionReasonDependencyResourceNotFound ResourceNotFoundExceptionReason = "DEPENDENCY_RESOURCE_NOT_FOUND" ) // Values returns all known values for ResourceNotFoundExceptionReason. Note that @@ -94,7 +94,7 @@ type ServiceQuotaExceededExceptionReason string // Enum values for ServiceQuotaExceededExceptionReason const ( - ServiceQuotaExceededExceptionReasonDependency_service_quota_exceeded ServiceQuotaExceededExceptionReason = "DEPENDENCY_SERVICE_QUOTA_EXCEEDED" + ServiceQuotaExceededExceptionReasonDependencyServiceQuotaExceeded ServiceQuotaExceededExceptionReason = "DEPENDENCY_SERVICE_QUOTA_EXCEEDED" ) // Values returns all known values for ServiceQuotaExceededExceptionReason. Note @@ -131,17 +131,17 @@ type ValidationExceptionReason string // Enum values for ValidationExceptionReason const ( - ValidationExceptionReasonInvalid_customer_key ValidationExceptionReason = "INVALID_CUSTOMER_KEY" - ValidationExceptionReasonInvalid_page_token ValidationExceptionReason = "INVALID_PAGE_TOKEN" - ValidationExceptionReasonInvalid_block_token ValidationExceptionReason = "INVALID_BLOCK_TOKEN" - ValidationExceptionReasonInvalid_snapshot_id ValidationExceptionReason = "INVALID_SNAPSHOT_ID" - ValidationExceptionReasonUnrelated_snapshots ValidationExceptionReason = "UNRELATED_SNAPSHOTS" - ValidationExceptionReasonInvalid_block ValidationExceptionReason = "INVALID_BLOCK" - ValidationExceptionReasonInvalid_content_encoding ValidationExceptionReason = "INVALID_CONTENT_ENCODING" - ValidationExceptionReasonInvalid_tag ValidationExceptionReason = "INVALID_TAG" - ValidationExceptionReasonInvalid_dependency_request ValidationExceptionReason = "INVALID_DEPENDENCY_REQUEST" - ValidationExceptionReasonInvalid_parameter_value ValidationExceptionReason = "INVALID_PARAMETER_VALUE" - ValidationExceptionReasonInvalid_volume_size ValidationExceptionReason = "INVALID_VOLUME_SIZE" + ValidationExceptionReasonInvalidCustomerKey ValidationExceptionReason = "INVALID_CUSTOMER_KEY" + ValidationExceptionReasonInvalidPageToken ValidationExceptionReason = "INVALID_PAGE_TOKEN" + ValidationExceptionReasonInvalidBlockToken ValidationExceptionReason = "INVALID_BLOCK_TOKEN" + ValidationExceptionReasonInvalidSnapshotId ValidationExceptionReason = "INVALID_SNAPSHOT_ID" + ValidationExceptionReasonUnrelatedSnapshots ValidationExceptionReason = "UNRELATED_SNAPSHOTS" + ValidationExceptionReasonInvalidBlock ValidationExceptionReason = "INVALID_BLOCK" + ValidationExceptionReasonInvalidContentEncoding ValidationExceptionReason = "INVALID_CONTENT_ENCODING" + ValidationExceptionReasonInvalidTag ValidationExceptionReason = "INVALID_TAG" + ValidationExceptionReasonInvalidDependencyRequest ValidationExceptionReason = "INVALID_DEPENDENCY_REQUEST" + ValidationExceptionReasonInvalidParameterValue ValidationExceptionReason = "INVALID_PARAMETER_VALUE" + ValidationExceptionReasonInvalidVolumeSize ValidationExceptionReason = "INVALID_VOLUME_SIZE" ) // Values returns all known values for ValidationExceptionReason. Note that this diff --git a/service/ec2/api_op_AttachVolume.go b/service/ec2/api_op_AttachVolume.go index 23e41d9db3e..7382d9fbc20 100644 --- a/service/ec2/api_op_AttachVolume.go +++ b/service/ec2/api_op_AttachVolume.go @@ -23,18 +23,18 @@ import ( // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). If // a volume has an AWS Marketplace product code: // -// * The volume can be attached -// only to a stopped instance. +// * The volume can be attached only +// to a stopped instance. // -// * AWS Marketplace product codes are copied from -// the volume to the instance. +// * AWS Marketplace product codes are copied from the +// volume to the instance. // -// * You must be subscribed to the product. +// * You must be subscribed to the product. // -// * -// The instance type and operating system of the instance must support the product. -// For example, you can't detach a volume from a Windows instance and attach it to -// a Linux instance. +// * The +// instance type and operating system of the instance must support the product. For +// example, you can't detach a volume from a Windows instance and attach it to a +// Linux instance. // // For more information, see Attaching Amazon EBS volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) diff --git a/service/ec2/api_op_CopyImage.go b/service/ec2/api_op_CopyImage.go index 5c9ca3aa7be..2e31a0ea39b 100644 --- a/service/ec2/api_op_CopyImage.go +++ b/service/ec2/api_op_CopyImage.go @@ -84,16 +84,16 @@ type CopyImageInput struct { // Resource Name (ARN), alias name, or alias ARN. When using an alias name, prefix // it with "alias/". For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // AWS parses KmsKeyId diff --git a/service/ec2/api_op_CopySnapshot.go b/service/ec2/api_op_CopySnapshot.go index 0aaff31a987..03bb45e1324 100644 --- a/service/ec2/api_op_CopySnapshot.go +++ b/service/ec2/api_op_CopySnapshot.go @@ -79,17 +79,17 @@ type CopySnapshotInput struct { // AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state // must be true. You can specify the CMK using any of the following: // - // * Key ID. - // For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For + // example, key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Key alias. For - // example, alias/ExampleAlias. + // * Key alias. For example, + // alias/ExampleAlias. // - // * Key ARN. For example, + // * Key ARN. For example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // - // - // * Alias ARN. For example, + // * + // Alias ARN. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // AWS authenticates the diff --git a/service/ec2/api_op_CreateCapacityReservation.go b/service/ec2/api_op_CreateCapacityReservation.go index 3573f45a745..60b33d402ef 100644 --- a/service/ec2/api_op_CreateCapacityReservation.go +++ b/service/ec2/api_op_CreateCapacityReservation.go @@ -106,13 +106,13 @@ type CreateCapacityReservationInput struct { // Indicates the way in which the Capacity Reservation ends. A Capacity Reservation // can have one of the following end types: // - // * unlimited - The Capacity - // Reservation remains active until you explicitly cancel it. Do not provide an - // EndDate if the EndDateType is unlimited. + // * unlimited - The Capacity Reservation + // remains active until you explicitly cancel it. Do not provide an EndDate if the + // EndDateType is unlimited. // - // * limited - The Capacity - // Reservation expires automatically at a specified date and time. You must provide - // an EndDate value if the EndDateType value is limited. + // * limited - The Capacity Reservation expires + // automatically at a specified date and time. You must provide an EndDate value if + // the EndDateType value is limited. EndDateType types.EndDateType // Indicates whether the Capacity Reservation supports instances with temporary, @@ -122,16 +122,16 @@ type CreateCapacityReservationInput struct { // Indicates the type of instance launches that the Capacity Reservation accepts. // The options include: // - // * open - The Capacity Reservation automatically - // matches all instances that have matching attributes (instance type, platform, - // and Availability Zone). Instances that have matching attributes run in the - // Capacity Reservation automatically without specifying any additional - // parameters. + // * open - The Capacity Reservation automatically matches + // all instances that have matching attributes (instance type, platform, and + // Availability Zone). Instances that have matching attributes run in the Capacity + // Reservation automatically without specifying any additional parameters. // - // * targeted - The Capacity Reservation only accepts instances - // that have matching attributes (instance type, platform, and Availability Zone), - // and explicitly target the Capacity Reservation. This ensures that only permitted - // instances can use the reserved capacity. + // * + // targeted - The Capacity Reservation only accepts instances that have matching + // attributes (instance type, platform, and Availability Zone), and explicitly + // target the Capacity Reservation. This ensures that only permitted instances can + // use the reserved capacity. // // Default: open InstanceMatchCriteria types.InstanceMatchCriteria @@ -142,11 +142,11 @@ type CreateCapacityReservationInput struct { // Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can // have one of the following tenancy settings: // - // * default - The Capacity + // * default - The Capacity // Reservation is created on hardware that is shared with other AWS accounts. // - // - // * dedicated - The Capacity Reservation is created on single-tenant hardware that + // * + // dedicated - The Capacity Reservation is created on single-tenant hardware that // is dedicated to a single AWS account. Tenancy types.CapacityReservationTenancy } diff --git a/service/ec2/api_op_CreateClientVpnEndpoint.go b/service/ec2/api_op_CreateClientVpnEndpoint.go index 619ce3c8515..8e532e5849c 100644 --- a/service/ec2/api_op_CreateClientVpnEndpoint.go +++ b/service/ec2/api_op_CreateClientVpnEndpoint.go @@ -50,16 +50,16 @@ type CreateClientVpnEndpointInput struct { // connection logging, data about client connections is sent to a Cloudwatch Logs // log stream. The following information is logged: // - // * Client connection + // * Client connection // requests // - // * Client connection results (successful and unsuccessful) + // * Client connection results (successful and unsuccessful) // - // * - // Reasons for unsuccessful client connection requests + // * Reasons + // for unsuccessful client connection requests // - // * Client connection - // termination time + // * Client connection termination + // time // // This member is required. ConnectionLogOptions *types.ConnectionLogOptions diff --git a/service/ec2/api_op_CreateClientVpnRoute.go b/service/ec2/api_op_CreateClientVpnRoute.go index 58eb1c37ea0..7b66bf4df94 100644 --- a/service/ec2/api_op_CreateClientVpnRoute.go +++ b/service/ec2/api_op_CreateClientVpnRoute.go @@ -41,17 +41,16 @@ type CreateClientVpnRouteInput struct { // The IPv4 address range, in CIDR notation, of the route destination. For // example: // - // * To add a route for Internet access, enter 0.0.0.0/0 + // * To add a route for Internet access, enter 0.0.0.0/0 // - // * To - // add a route for a peered VPC, enter the peered VPC's IPv4 CIDR range + // * To add a + // route for a peered VPC, enter the peered VPC's IPv4 CIDR range // - // * To - // add a route for an on-premises network, enter the AWS Site-to-Site VPN - // connection's IPv4 CIDR range + // * To add a route + // for an on-premises network, enter the AWS Site-to-Site VPN connection's IPv4 + // CIDR range // - // * To add a route for the local network, enter - // the client CIDR range + // * To add a route for the local network, enter the client CIDR range // // This member is required. DestinationCidrBlock *string diff --git a/service/ec2/api_op_CreateCustomerGateway.go b/service/ec2/api_op_CreateCustomerGateway.go index 151b5746a38..9778313bab7 100644 --- a/service/ec2/api_op_CreateCustomerGateway.go +++ b/service/ec2/api_op_CreateCustomerGateway.go @@ -23,15 +23,15 @@ import ( // 4-byte ASN numbers in the range of 1 - 2147483647, with the exception of the // following: // -// * 7224 - reserved in the us-east-1 Region +// * 7224 - reserved in the us-east-1 Region // -// * 9059 - reserved -// in the eu-west-1 Region +// * 9059 - reserved in the +// eu-west-1 Region // -// * 17943 - reserved in the ap-southeast-1 Region +// * 17943 - reserved in the ap-southeast-1 Region // -// -// * 10124 - reserved in the ap-northeast-1 Region +// * 10124 - +// reserved in the ap-northeast-1 Region // // For more information, see AWS // Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) in diff --git a/service/ec2/api_op_CreateDhcpOptions.go b/service/ec2/api_op_CreateDhcpOptions.go index 3c09f5015a6..05845e99f63 100644 --- a/service/ec2/api_op_CreateDhcpOptions.go +++ b/service/ec2/api_op_CreateDhcpOptions.go @@ -17,14 +17,14 @@ import ( // individual DHCP options you can specify. For more information about the options, // see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). // -// * domain-name-servers - -// The IP addresses of up to four domain name servers, or AmazonProvidedDNS. The +// * domain-name-servers - The +// IP addresses of up to four domain name servers, or AmazonProvidedDNS. The // default DHCP option set specifies AmazonProvidedDNS. If specifying more than one // domain name server, specify the IP addresses in a single parameter, separated by // commas. To have your instance receive a custom DNS hostname as specified in // domain-name, you must set domain-name-servers to a custom DNS server. // -// * +// * // domain-name - If you're using AmazonProvidedDNS in us-east-1, specify // ec2.internal. If you're using AmazonProvidedDNS in another Region, specify // region.compute.internal (for example, ap-northeast-1.compute.internal). @@ -35,15 +35,15 @@ import ( // in unexpected behavior. If your DHCP options set is associated with a VPC that // has instances with multiple operating systems, specify only one domain name. // -// -// * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) +// * +// ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. // -// * netbios-name-servers - The IP addresses of up to four NetBIOS -// name servers. +// * netbios-name-servers - The IP addresses of up to four NetBIOS name +// servers. // -// * netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). -// We recommend that you specify 2 (broadcast and multicast are not currently +// * netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We +// recommend that you specify 2 (broadcast and multicast are not currently // supported). For more information about these node types, see RFC 2132 // (http://www.ietf.org/rfc/rfc2132.txt). // diff --git a/service/ec2/api_op_CreateRoute.go b/service/ec2/api_op_CreateRoute.go index d7a5e2ddba8..e49899dd056 100644 --- a/service/ec2/api_op_CreateRoute.go +++ b/service/ec2/api_op_CreateRoute.go @@ -18,15 +18,15 @@ import ( // IPv4 address 192.0.2.3, and the route table includes the following two IPv4 // routes: // -// * 192.0.2.0/24 (goes to some target A) +// * 192.0.2.0/24 (goes to some target A) // -// * 192.0.2.0/28 (goes to -// some target B) +// * 192.0.2.0/28 (goes to some +// target B) // -// Both routes apply to the traffic destined for 192.0.2.3. -// However, the second route in the list covers a smaller number of IP addresses -// and is therefore more specific, so we use that route to determine where to -// target the traffic. For more information about route tables, see Route Tables +// Both routes apply to the traffic destined for 192.0.2.3. However, the +// second route in the list covers a smaller number of IP addresses and is +// therefore more specific, so we use that route to determine where to target the +// traffic. For more information about route tables, see Route Tables // (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html) in the // Amazon Virtual Private Cloud User Guide. func (c *Client) CreateRoute(ctx context.Context, params *CreateRouteInput, optFns ...func(*Options)) (*CreateRouteOutput, error) { diff --git a/service/ec2/api_op_CreateVolume.go b/service/ec2/api_op_CreateVolume.go index c023446f79b..0f1c7392cb0 100644 --- a/service/ec2/api_op_CreateVolume.go +++ b/service/ec2/api_op_CreateVolume.go @@ -85,17 +85,17 @@ type CreateVolumeInput struct { // AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state // must be true. You can specify the CMK using any of the following: // - // * Key ID. - // For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For + // example, key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Key alias. For - // example, alias/ExampleAlias. + // * Key alias. For example, + // alias/ExampleAlias. // - // * Key ARN. For example, + // * Key ARN. For example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // - // - // * Alias ARN. For example, + // * + // Alias ARN. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // AWS authenticates the diff --git a/service/ec2/api_op_DescribeAccountAttributes.go b/service/ec2/api_op_DescribeAccountAttributes.go index 9ae2781aa4f..968e6f114e0 100644 --- a/service/ec2/api_op_DescribeAccountAttributes.go +++ b/service/ec2/api_op_DescribeAccountAttributes.go @@ -14,29 +14,29 @@ import ( // Describes attributes of your AWS account. The following are the supported // account attributes: // -// * supported-platforms: Indicates whether your account -// can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC. +// * supported-platforms: Indicates whether your account can +// launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC. // -// * +// * // default-vpc: The ID of the default VPC for your account, or none. // -// * +// * // max-instances: This attribute is no longer supported. The returned value does // not reflect your actual vCPU limit for running On-Demand Instances. For more // information, see On-Demand Instance Limits // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-on-demand-instances.html#ec2-on-demand-instances-limits) // in the Amazon Elastic Compute Cloud User Guide. // -// * +// * // vpc-max-security-groups-per-interface: The maximum number of security groups // that you can assign to a network interface. // -// * max-elastic-ips: The maximum -// number of Elastic IP addresses that you can allocate for use with EC2-Classic. +// * max-elastic-ips: The maximum +// number of Elastic IP addresses that you can allocate for use with +// EC2-Classic. // -// -// * vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can -// allocate for use with EC2-VPC. +// * vpc-max-elastic-ips: The maximum number of Elastic IP addresses +// that you can allocate for use with EC2-VPC. func (c *Client) DescribeAccountAttributes(ctx context.Context, params *DescribeAccountAttributesInput, optFns ...func(*Options)) (*DescribeAccountAttributesOutput, error) { if params == nil { params = &DescribeAccountAttributesInput{} diff --git a/service/ec2/api_op_DescribeAddresses.go b/service/ec2/api_op_DescribeAddresses.go index 2e54c839f17..152d48ea09f 100644 --- a/service/ec2/api_op_DescribeAddresses.go +++ b/service/ec2/api_op_DescribeAddresses.go @@ -44,44 +44,43 @@ type DescribeAddressesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * + // * // allocation-id - [EC2-VPC] The allocation ID for the address. // - // * - // association-id - [EC2-VPC] The association ID for the address. + // * association-id - + // [EC2-VPC] The association ID for the address. // - // * domain - - // Indicates whether the address is for use in EC2-Classic (standard) or in a VPC - // (vpc). + // * domain - Indicates whether the + // address is for use in EC2-Classic (standard) or in a VPC (vpc). // - // * instance-id - The ID of the instance the address is associated - // with, if any. + // * instance-id - + // The ID of the instance the address is associated with, if any. // - // * network-border-group - A unique set of Availability Zones, - // Local Zones, or Wavelength Zones from where AWS advertises IP addresses. + // * + // network-border-group - A unique set of Availability Zones, Local Zones, or + // Wavelength Zones from where AWS advertises IP addresses. // - // * - // network-interface-id - [EC2-VPC] The ID of the network interface that the - // address is associated with, if any. + // * network-interface-id + // - [EC2-VPC] The ID of the network interface that the address is associated with, + // if any. // - // * network-interface-owner-id - The AWS - // account ID of the owner. + // * network-interface-owner-id - The AWS account ID of the owner. // - // * private-ip-address - [EC2-VPC] The private IP - // address associated with the Elastic IP address. + // * + // private-ip-address - [EC2-VPC] The private IP address associated with the + // Elastic IP address. // - // * public-ip - The Elastic - // IP address, or the carrier IP address. + // * public-ip - The Elastic IP address, or the carrier IP + // address. // - // * tag: - The key/value combination - // of a tag assigned to the resource. Use the tag key in the filter name and the - // tag value as the filter value. For example, to find all resources that have a - // tag with the key Owner and the value TeamA, specify tag:Owner for the filter - // name and TeamA for the filter value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. For + // example, to find all resources that have a tag with the key Owner and the value + // TeamA, specify tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned - // to the resource. Use this filter to find all resources assigned a tag with a - // specific key, regardless of the tag value. + // * + // tag-key - The key of a tag assigned to the resource. Use this filter to find all + // resources assigned a tag with a specific key, regardless of the tag value. Filters []*types.Filter // One or more Elastic IP addresses. Default: Describes all your Elastic IP diff --git a/service/ec2/api_op_DescribeAvailabilityZones.go b/service/ec2/api_op_DescribeAvailabilityZones.go index 000197208a5..ca8fbee3f6e 100644 --- a/service/ec2/api_op_DescribeAvailabilityZones.go +++ b/service/ec2/api_op_DescribeAvailabilityZones.go @@ -48,45 +48,45 @@ type DescribeAvailabilityZonesInput struct { // The filters. // - // * group-name - For Availability Zones, use the Region name. - // For Local Zones, use the name of the group associated with the Local Zone (for + // * group-name - For Availability Zones, use the Region name. For + // Local Zones, use the name of the group associated with the Local Zone (for // example, us-west-2-lax-1) For Wavelength Zones, use the name of the group // associated with the Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1). // + // * + // message - The Zone message. // - // * message - The Zone message. + // * opt-in-status - The opt-in status (opted-in, and + // not-opted-in | opt-in-not-required). // - // * opt-in-status - The opt-in status - // (opted-in, and not-opted-in | opt-in-not-required). + // * parent-zoneID - The ID of the zone that + // handles some of the Local Zone and Wavelength Zone control plane operations, + // such as API calls. // - // * parent-zoneID - The - // ID of the zone that handles some of the Local Zone and Wavelength Zone control - // plane operations, such as API calls. + // * parent-zoneName - The ID of the zone that handles some of + // the Local Zone and Wavelength Zone control plane operations, such as API + // calls. // - // * parent-zoneName - The ID of the zone - // that handles some of the Local Zone and Wavelength Zone control plane - // operations, such as API calls. + // * region-name - The name of the Region for the Zone (for example, + // us-east-1). // - // * region-name - The name of the Region for - // the Zone (for example, us-east-1). + // * state - The state of the Availability Zone, the Local Zone, or + // the Wavelength Zone (available | information | impaired | unavailable). // - // * state - The state of the Availability - // Zone, the Local Zone, or the Wavelength Zone (available | information | impaired - // | unavailable). + // * + // zone-id - The ID of the Availability Zone (for example, use1-az1), the Local + // Zone (for example, usw2-lax1-az1), or the Wavelength Zone (for example, + // us-east-1-wl1-bos-wlz-1). // - // * zone-id - The ID of the Availability Zone (for example, - // use1-az1), the Local Zone (for example, usw2-lax1-az1), or the Wavelength Zone - // (for example, us-east-1-wl1-bos-wlz-1). + // * zone-type - The type of zone, for example, + // local-zone. // - // * zone-type - The type of zone, for - // example, local-zone. + // * zone-name - The name of the Availability Zone (for example, + // us-east-1a), the Local Zone (for example, us-west-2-lax-1a), or the Wavelength + // Zone (for example, us-east-1-wl1-bos-wlz-1). // - // * zone-name - The name of the Availability Zone (for - // example, us-east-1a), the Local Zone (for example, us-west-2-lax-1a), or the - // Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1). - // - // * zone-type - The - // type of zone, for example, local-zone. + // * zone-type - The type of zone, + // for example, local-zone. Filters []*types.Filter // The IDs of the Availability Zones, Local Zones, and Wavelength Zones. diff --git a/service/ec2/api_op_DescribeBundleTasks.go b/service/ec2/api_op_DescribeBundleTasks.go index 9fb5433423b..b18b0b16c38 100644 --- a/service/ec2/api_op_DescribeBundleTasks.go +++ b/service/ec2/api_op_DescribeBundleTasks.go @@ -44,34 +44,33 @@ type DescribeBundleTasksInput struct { // The filters. // - // * bundle-id - The ID of the bundle task. + // * bundle-id - The ID of the bundle task. // - // * error-code - - // If the task failed, the error code returned. + // * error-code - If the + // task failed, the error code returned. // - // * error-message - If the task - // failed, the error message returned. + // * error-message - If the task failed, the + // error message returned. // - // * instance-id - The ID of the - // instance. + // * instance-id - The ID of the instance. // - // * progress - The level of task completion, as a percentage (for - // example, 20%). + // * progress - + // The level of task completion, as a percentage (for example, 20%). // - // * s3-bucket - The Amazon S3 bucket to store the AMI. + // * s3-bucket - + // The Amazon S3 bucket to store the AMI. // - // * - // s3-prefix - The beginning of the AMI name. + // * s3-prefix - The beginning of the AMI + // name. // - // * start-time - The time the task - // started (for example, 2013-09-15T17:15:20.000Z). + // * start-time - The time the task started (for example, + // 2013-09-15T17:15:20.000Z). // - // * state - The state of the - // task (pending | waiting-for-shutdown | bundling | storing | cancelling | - // complete | failed). + // * state - The state of the task (pending | + // waiting-for-shutdown | bundling | storing | cancelling | complete | failed). // - // * update-time - The time of the most recent update for - // the task. + // * + // update-time - The time of the most recent update for the task. Filters []*types.Filter } diff --git a/service/ec2/api_op_DescribeCapacityReservations.go b/service/ec2/api_op_DescribeCapacityReservations.go index e7352ba16c5..9cde0cffebf 100644 --- a/service/ec2/api_op_DescribeCapacityReservations.go +++ b/service/ec2/api_op_DescribeCapacityReservations.go @@ -41,85 +41,84 @@ type DescribeCapacityReservationsInput struct { // One or more filters. // - // * instance-type - The type of instance for which the + // * instance-type - The type of instance for which the // Capacity Reservation reserves capacity. // - // * owner-id - The ID of the AWS - // account that owns the Capacity Reservation. + // * owner-id - The ID of the AWS account + // that owns the Capacity Reservation. // - // * availability-zone-id - The - // Availability Zone ID of the Capacity Reservation. + // * availability-zone-id - The Availability + // Zone ID of the Capacity Reservation. // - // * instance-platform - The - // type of operating system for which the Capacity Reservation reserves capacity. + // * instance-platform - The type of + // operating system for which the Capacity Reservation reserves capacity. // + // * + // availability-zone - The Availability Zone ID of the Capacity Reservation. // - // * availability-zone - The Availability Zone ID of the Capacity Reservation. - // - // - // * tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity + // * + // tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity // Reservation can have one of the following tenancy settings: // - // * default - - // The Capacity Reservation is created on hardware that is shared with other AWS + // * default - The + // Capacity Reservation is created on hardware that is shared with other AWS // accounts. // - // * dedicated - The Capacity Reservation is created on - // single-tenant hardware that is dedicated to a single AWS account. - // - // * state - - // The current state of the Capacity Reservation. A Capacity Reservation can be in - // one of the following states: + // * dedicated - The Capacity Reservation is created on single-tenant + // hardware that is dedicated to a single AWS account. // - // * active- The Capacity Reservation is - // active and the capacity is available for your use. + // * state - The current state + // of the Capacity Reservation. A Capacity Reservation can be in one of the + // following states: // - // * expired - The - // Capacity Reservation expired automatically at the date and time specified in - // your request. The reserved capacity is no longer available for your use. + // * active- The Capacity Reservation is active and the capacity + // is available for your use. // - // - // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // * expired - The Capacity Reservation expired + // automatically at the date and time specified in your request. The reserved // capacity is no longer available for your use. // - // * pending - The Capacity - // Reservation request was successful but the capacity provisioning is still - // pending. + // * cancelled - The Capacity + // Reservation was manually cancelled. The reserved capacity is no longer available + // for your use. + // + // * pending - The Capacity Reservation request was successful but + // the capacity provisioning is still pending. // - // * failed - The Capacity Reservation request has failed. A - // request might fail due to invalid request parameters, capacity constraints, or - // instance limit constraints. Failed requests are retained for 60 minutes. + // * failed - The Capacity Reservation + // request has failed. A request might fail due to invalid request parameters, + // capacity constraints, or instance limit constraints. Failed requests are + // retained for 60 minutes. // - // * - // end-date - The date and time at which the Capacity Reservation expires. When a - // Capacity Reservation expires, the reserved capacity is released and you can no - // longer launch instances into it. The Capacity Reservation's state changes to - // expired when it reaches its end date and time. + // * end-date - The date and time at which the Capacity + // Reservation expires. When a Capacity Reservation expires, the reserved capacity + // is released and you can no longer launch instances into it. The Capacity + // Reservation's state changes to expired when it reaches its end date and time. // - // * end-date-type - Indicates - // the way in which the Capacity Reservation ends. A Capacity Reservation can have - // one of the following end types: + // * + // end-date-type - Indicates the way in which the Capacity Reservation ends. A + // Capacity Reservation can have one of the following end types: // - // * unlimited - The Capacity Reservation - // remains active until you explicitly cancel it. + // * unlimited - The + // Capacity Reservation remains active until you explicitly cancel it. // - // * limited - The Capacity - // Reservation expires automatically at a specified date and time. + // * limited - + // The Capacity Reservation expires automatically at a specified date and time. // - // * + // * // instance-match-criteria - Indicates the type of instance launches that the // Capacity Reservation accepts. The options include: // - // * open - The - // Capacity Reservation accepts all instances that have matching attributes - // (instance type, platform, and Availability Zone). Instances that have matching - // attributes launch into the Capacity Reservation automatically without specifying - // any additional parameters. + // * open - The Capacity + // Reservation accepts all instances that have matching attributes (instance type, + // platform, and Availability Zone). Instances that have matching attributes launch + // into the Capacity Reservation automatically without specifying any additional + // parameters. // - // * targeted - The Capacity Reservation only - // accepts instances that have matching attributes (instance type, platform, and - // Availability Zone), and explicitly target the Capacity Reservation. This ensures - // that only permitted instances can use the reserved capacity. + // * targeted - The Capacity Reservation only accepts instances that + // have matching attributes (instance type, platform, and Availability Zone), and + // explicitly target the Capacity Reservation. This ensures that only permitted + // instances can use the reserved capacity. Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeCarrierGateways.go b/service/ec2/api_op_DescribeCarrierGateways.go index 268cd291a32..17c5789fd54 100644 --- a/service/ec2/api_op_DescribeCarrierGateways.go +++ b/service/ec2/api_op_DescribeCarrierGateways.go @@ -40,27 +40,26 @@ type DescribeCarrierGatewaysInput struct { // One or more filters. // - // * carrier-gateway-id - The ID of the carrier - // gateway. + // * carrier-gateway-id - The ID of the carrier gateway. // - // * state - The state of the carrier gateway (pending | failed | - // available | deleting | deleted). + // * + // state - The state of the carrier gateway (pending | failed | available | + // deleting | deleted). // - // * owner-id - The AWS account ID of the - // owner of the carrier gateway. + // * owner-id - The AWS account ID of the owner of the + // carrier gateway. // - // * tag: - The key/value combination of a tag - // assigned to the resource. Use the tag key in the filter name and the tag value - // as the filter value. For example, to find all resources that have a tag with the - // key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA - // for the filter value. + // * tag: - The key/value combination of a tag assigned to the + // resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - The key of a tag assigned to the - // resource. Use this filter to find all resources assigned a tag with a specific - // key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. // - // * vpc-id - The ID of the VPC associated - // with the carrier gateway. + // * vpc-id - The ID of the VPC associated with the carrier gateway. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeClassicLinkInstances.go b/service/ec2/api_op_DescribeClassicLinkInstances.go index b1a85347471..67c9939e935 100644 --- a/service/ec2/api_op_DescribeClassicLinkInstances.go +++ b/service/ec2/api_op_DescribeClassicLinkInstances.go @@ -40,24 +40,24 @@ type DescribeClassicLinkInstancesInput struct { // One or more filters. // - // * group-id - The ID of a VPC security group that's + // * group-id - The ID of a VPC security group that's // associated with the instance. // - // * instance-id - The ID of the instance. + // * instance-id - The ID of the instance. // + // * tag: - + // The key/value combination of a tag assigned to the resource. Use the tag key in + // the filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * tag: - The key/value combination of a tag assigned to the resource. Use the - // tag key in the filter name and the tag value as the filter value. For example, - // to find all resources that have a tag with the key Owner and the value TeamA, - // specify tag:Owner for the filter name and TeamA for the filter value. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. // - // * - // tag-key - The key of a tag assigned to the resource. Use this filter to find all - // resources assigned a tag with a specific key, regardless of the tag value. - // - // - // * vpc-id - The ID of the VPC to which the instance is linked. vpc-id - The ID of - // the VPC that the instance is linked to. + // * vpc-id - The + // ID of the VPC to which the instance is linked. vpc-id - The ID of the VPC that + // the instance is linked to. Filters []*types.Filter // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. diff --git a/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go b/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go index 4298151973f..936fec169f9 100644 --- a/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go +++ b/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go @@ -42,15 +42,14 @@ type DescribeClientVpnAuthorizationRulesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * - // description - The description of the authorization rule. + // * description + // - The description of the authorization rule. // - // * destination-cidr - // - The CIDR of the network to which the authorization rule applies. + // * destination-cidr - The CIDR of + // the network to which the authorization rule applies. // - // * - // group-id - The ID of the Active Directory group to which the authorization rule - // grants access. + // * group-id - The ID of the + // Active Directory group to which the authorization rule grants access. Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeClientVpnConnections.go b/service/ec2/api_op_DescribeClientVpnConnections.go index 7b817037983..0dcc8d793b4 100644 --- a/service/ec2/api_op_DescribeClientVpnConnections.go +++ b/service/ec2/api_op_DescribeClientVpnConnections.go @@ -43,10 +43,10 @@ type DescribeClientVpnConnectionsInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * + // * // connection-id - The ID of the connection. // - // * username - For Active Directory + // * username - For Active Directory // client authentication, the user name of the client who established the client // connection. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeClientVpnEndpoints.go b/service/ec2/api_op_DescribeClientVpnEndpoints.go index 0e52317d915..b58bb52c6ea 100644 --- a/service/ec2/api_op_DescribeClientVpnEndpoints.go +++ b/service/ec2/api_op_DescribeClientVpnEndpoints.go @@ -40,11 +40,11 @@ type DescribeClientVpnEndpointsInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * - // endpoint-id - The ID of the Client VPN endpoint. + // * endpoint-id + // - The ID of the Client VPN endpoint. // - // * transport-protocol - The - // transport protocol (tcp | udp). + // * transport-protocol - The transport + // protocol (tcp | udp). Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeClientVpnRoutes.go b/service/ec2/api_op_DescribeClientVpnRoutes.go index f9b5d8ce867..541c86783bb 100644 --- a/service/ec2/api_op_DescribeClientVpnRoutes.go +++ b/service/ec2/api_op_DescribeClientVpnRoutes.go @@ -42,14 +42,14 @@ type DescribeClientVpnRoutesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * + // * // destination-cidr - The CIDR of the route destination. // - // * origin - How the - // route was associated with the Client VPN endpoint (associate | add-route). + // * origin - How the route + // was associated with the Client VPN endpoint (associate | add-route). // - // - // * target-subnet - The ID of the subnet through which traffic is routed. + // * + // target-subnet - The ID of the subnet through which traffic is routed. Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeClientVpnTargetNetworks.go b/service/ec2/api_op_DescribeClientVpnTargetNetworks.go index c5803e10c80..1460fb317b9 100644 --- a/service/ec2/api_op_DescribeClientVpnTargetNetworks.go +++ b/service/ec2/api_op_DescribeClientVpnTargetNetworks.go @@ -45,14 +45,14 @@ type DescribeClientVpnTargetNetworksInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * + // * // association-id - The ID of the association. // - // * target-network-id - The ID of - // the subnet specified as the target network. + // * target-network-id - The ID of the + // subnet specified as the target network. // - // * vpc-id - The ID of the VPC in - // which the target network is located. + // * vpc-id - The ID of the VPC in which + // the target network is located. Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeCoipPools.go b/service/ec2/api_op_DescribeCoipPools.go index 0c77e401221..58d8f719e5e 100644 --- a/service/ec2/api_op_DescribeCoipPools.go +++ b/service/ec2/api_op_DescribeCoipPools.go @@ -38,10 +38,10 @@ type DescribeCoipPoolsInput struct { // The filters. The following are the possible values: // - // * coip-pool.pool-id + // * coip-pool.pool-id // - // - // * coip-pool.local-gateway-route-table-id + // * + // coip-pool.local-gateway-route-table-id Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeCustomerGateways.go b/service/ec2/api_op_DescribeCustomerGateways.go index 5b395d496de..53906656715 100644 --- a/service/ec2/api_op_DescribeCustomerGateways.go +++ b/service/ec2/api_op_DescribeCustomerGateways.go @@ -44,30 +44,30 @@ type DescribeCustomerGatewaysInput struct { // One or more filters. // - // * bgp-asn - The customer gateway's Border Gateway - // Protocol (BGP) Autonomous System Number (ASN). + // * bgp-asn - The customer gateway's Border Gateway Protocol + // (BGP) Autonomous System Number (ASN). // - // * customer-gateway-id - The - // ID of the customer gateway. + // * customer-gateway-id - The ID of the + // customer gateway. // - // * ip-address - The IP address of the customer - // gateway's Internet-routable external interface. + // * ip-address - The IP address of the customer gateway's + // Internet-routable external interface. // - // * state - The state of the - // customer gateway (pending | available | deleting | deleted). + // * state - The state of the customer + // gateway (pending | available | deleting | deleted). // - // * type - The - // type of customer gateway. Currently, the only supported type is ipsec.1. + // * type - The type of + // customer gateway. Currently, the only supported type is ipsec.1. // - // * - // tag: - The key/value combination of a tag assigned to the resource. Use the tag - // key in the filter name and the tag value as the filter value. For example, to - // find all resources that have a tag with the key Owner and the value TeamA, - // specify tag:Owner for the filter name and TeamA for the filter value. + // * tag: - The + // key/value combination of a tag assigned to the resource. Use the tag key in the + // filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * - // tag-key - The key of a tag assigned to the resource. Use this filter to find all - // resources assigned a tag with a specific key, regardless of the tag value. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. Filters []*types.Filter } diff --git a/service/ec2/api_op_DescribeDhcpOptions.go b/service/ec2/api_op_DescribeDhcpOptions.go index 35d44669253..78f6681a483 100644 --- a/service/ec2/api_op_DescribeDhcpOptions.go +++ b/service/ec2/api_op_DescribeDhcpOptions.go @@ -44,26 +44,26 @@ type DescribeDhcpOptionsInput struct { // One or more filters. // - // * dhcp-options-id - The ID of a DHCP options set. + // * dhcp-options-id - The ID of a DHCP options set. // + // * key - + // The key for one of the options (for example, domain-name). // - // * key - The key for one of the options (for example, domain-name). + // * value - The value + // for one of the options. // - // * value - // - The value for one of the options. + // * owner-id - The ID of the AWS account that owns the + // DHCP options set. // - // * owner-id - The ID of the AWS account - // that owns the DHCP options set. + // * tag: - The key/value combination of a tag assigned to the + // resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag: - The key/value combination of a tag - // assigned to the resource. Use the tag key in the filter name and the tag value - // as the filter value. For example, to find all resources that have a tag with the - // key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA - // for the filter value. - // - // * tag-key - The key of a tag assigned to the - // resource. Use this filter to find all resources assigned a tag with a specific - // key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go b/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go index 88f96b2f144..98d6663b38c 100644 --- a/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go +++ b/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go @@ -40,15 +40,15 @@ type DescribeEgressOnlyInternetGatewaysInput struct { // One or more filters. // - // * tag: - The key/value combination of a tag assigned - // to the resource. Use the tag key in the filter name and the tag value as the - // filter value. For example, to find all resources that have a tag with the key - // Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - // the filter value. + // * tag: - The key/value combination of a tag assigned to + // the resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - The key of a tag assigned to the resource. - // Use this filter to find all resources assigned a tag with a specific key, - // regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeElasticGpus.go b/service/ec2/api_op_DescribeElasticGpus.go index 4eafcb40b42..e1211145da0 100644 --- a/service/ec2/api_op_DescribeElasticGpus.go +++ b/service/ec2/api_op_DescribeElasticGpus.go @@ -42,21 +42,20 @@ type DescribeElasticGpusInput struct { // The filters. // - // * availability-zone - The Availability Zone in which the - // Elastic Graphics accelerator resides. + // * availability-zone - The Availability Zone in which the Elastic + // Graphics accelerator resides. // - // * elastic-gpu-health - The status of - // the Elastic Graphics accelerator (OK | IMPAIRED). + // * elastic-gpu-health - The status of the Elastic + // Graphics accelerator (OK | IMPAIRED). // - // * elastic-gpu-state - The - // state of the Elastic Graphics accelerator (ATTACHED). + // * elastic-gpu-state - The state of the + // Elastic Graphics accelerator (ATTACHED). // - // * elastic-gpu-type - - // The type of Elastic Graphics accelerator; for example, eg1.medium. + // * elastic-gpu-type - The type of + // Elastic Graphics accelerator; for example, eg1.medium. // - // * - // instance-id - The ID of the instance to which the Elastic Graphics accelerator - // is associated. + // * instance-id - The ID + // of the instance to which the Elastic Graphics accelerator is associated. Filters []*types.Filter // The maximum number of results to return in a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeFastSnapshotRestores.go b/service/ec2/api_op_DescribeFastSnapshotRestores.go index 47afac9cfb4..810a78cb8b6 100644 --- a/service/ec2/api_op_DescribeFastSnapshotRestores.go +++ b/service/ec2/api_op_DescribeFastSnapshotRestores.go @@ -37,17 +37,17 @@ type DescribeFastSnapshotRestoresInput struct { // The filters. The possible values are: // - // * availability-zone: The Availability + // * availability-zone: The Availability // Zone of the snapshot. // - // * owner-id: The ID of the AWS account that enabled - // fast snapshot restore on the snapshot. + // * owner-id: The ID of the AWS account that enabled fast + // snapshot restore on the snapshot. // - // * snapshot-id: The ID of the - // snapshot. + // * snapshot-id: The ID of the snapshot. // - // * state: The state of fast snapshot restores for the snapshot - // (enabling | optimizing | enabled | disabling | disabled). + // * + // state: The state of fast snapshot restores for the snapshot (enabling | + // optimizing | enabled | disabling | disabled). Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeFleetInstances.go b/service/ec2/api_op_DescribeFleetInstances.go index 0fc5c358121..ca258c9d658 100644 --- a/service/ec2/api_op_DescribeFleetInstances.go +++ b/service/ec2/api_op_DescribeFleetInstances.go @@ -42,7 +42,7 @@ type DescribeFleetInstancesInput struct { // The filters. // - // * instance-type - The instance type. + // * instance-type - The instance type. Filters []*types.Filter // The maximum number of results to return in a single call. Specify a value diff --git a/service/ec2/api_op_DescribeFleets.go b/service/ec2/api_op_DescribeFleets.go index 050298b20fc..90e6e2dc05b 100644 --- a/service/ec2/api_op_DescribeFleets.go +++ b/service/ec2/api_op_DescribeFleets.go @@ -37,23 +37,23 @@ type DescribeFleetsInput struct { // The filters. // - // * activity-status - The progress of the EC2 Fleet ( error | + // * activity-status - The progress of the EC2 Fleet ( error | // pending-fulfillment | pending-termination | fulfilled). // - // * + // * // excess-capacity-termination-policy - Indicates whether to terminate running // instances if the target capacity is decreased below the current EC2 Fleet size // (true | false). // - // * fleet-state - The state of the EC2 Fleet (submitted | - // active | deleted | failed | deleted-running | deleted-terminating | - // modifying). + // * fleet-state - The state of the EC2 Fleet (submitted | active + // | deleted | failed | deleted-running | deleted-terminating | modifying). // - // * replace-unhealthy-instances - Indicates whether EC2 Fleet - // should replace unhealthy instances (true | false). + // * + // replace-unhealthy-instances - Indicates whether EC2 Fleet should replace + // unhealthy instances (true | false). // - // * type - The type of - // request (instant | request | maintain). + // * type - The type of request (instant | + // request | maintain). Filters []*types.Filter // The ID of the EC2 Fleets. diff --git a/service/ec2/api_op_DescribeFlowLogs.go b/service/ec2/api_op_DescribeFlowLogs.go index 297e614838d..7954d435db8 100644 --- a/service/ec2/api_op_DescribeFlowLogs.go +++ b/service/ec2/api_op_DescribeFlowLogs.go @@ -39,33 +39,32 @@ type DescribeFlowLogsInput struct { // One or more filters. // - // * deliver-log-status - The status of the logs delivery + // * deliver-log-status - The status of the logs delivery // (SUCCESS | FAILED). // - // * log-destination-type - The type of destination to - // which the flow log publishes data. Possible destination types include - // cloud-watch-logs and S3. + // * log-destination-type - The type of destination to which + // the flow log publishes data. Possible destination types include cloud-watch-logs + // and S3. // - // * flow-log-id - The ID of the flow log. + // * flow-log-id - The ID of the flow log. // - // * - // log-group-name - The name of the log group. + // * log-group-name - The name of + // the log group. // - // * resource-id - The ID of the - // VPC, subnet, or network interface. + // * resource-id - The ID of the VPC, subnet, or network + // interface. // - // * traffic-type - The type of traffic - // (ACCEPT | REJECT | ALL). + // * traffic-type - The type of traffic (ACCEPT | REJECT | ALL). // - // * tag: - The key/value combination of a tag - // assigned to the resource. Use the tag key in the filter name and the tag value - // as the filter value. For example, to find all resources that have a tag with the - // key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA - // for the filter value. + // * + // tag: - The key/value combination of a tag assigned to the resource. Use the tag + // key in the filter name and the tag value as the filter value. For example, to + // find all resources that have a tag with the key Owner and the value TeamA, + // specify tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the - // resource. Use this filter to find all resources assigned a tag with a specific - // key, regardless of the tag value. + // * tag-key + // - The key of a tag assigned to the resource. Use this filter to find all + // resources assigned a tag with a specific key, regardless of the tag value. Filter []*types.Filter // One or more flow log IDs. Constraint: Maximum of 1000 flow log IDs. diff --git a/service/ec2/api_op_DescribeFpgaImages.go b/service/ec2/api_op_DescribeFpgaImages.go index bcd64a34eef..5bcb9a65b4f 100644 --- a/service/ec2/api_op_DescribeFpgaImages.go +++ b/service/ec2/api_op_DescribeFpgaImages.go @@ -39,40 +39,39 @@ type DescribeFpgaImagesInput struct { // The filters. // - // * create-time - The creation time of the AFI. + // * create-time - The creation time of the AFI. // - // * - // fpga-image-id - The FPGA image identifier (AFI ID). + // * fpga-image-id - + // The FPGA image identifier (AFI ID). // - // * fpga-image-global-id - // - The global FPGA image identifier (AGFI ID). + // * fpga-image-global-id - The global FPGA + // image identifier (AGFI ID). // - // * name - The name of the - // AFI. + // * name - The name of the AFI. // - // * owner-id - The AWS account ID of the AFI owner. + // * owner-id - The AWS + // account ID of the AFI owner. // - // * product-code - // - The product code. + // * product-code - The product code. // - // * shell-version - The version of the AWS Shell that was - // used to create the bitstream. + // * + // shell-version - The version of the AWS Shell that was used to create the + // bitstream. // - // * state - The state of the AFI (pending | - // failed | available | unavailable). + // * state - The state of the AFI (pending | failed | available | + // unavailable). // - // * tag: - The key/value combination of a - // tag assigned to the resource. Use the tag key in the filter name and the tag - // value as the filter value. For example, to find all resources that have a tag - // with the key Owner and the value TeamA, specify tag:Owner for the filter name - // and TeamA for the filter value. + // * tag: - The key/value combination of a tag assigned to the + // resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - The key of a tag assigned to - // the resource. Use this filter to find all resources assigned a tag with a - // specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. // - // * update-time - The time of the - // most recent update. + // * update-time - The time of the most recent update. Filters []*types.Filter // The AFI IDs. diff --git a/service/ec2/api_op_DescribeHostReservationOfferings.go b/service/ec2/api_op_DescribeHostReservationOfferings.go index d918fb148df..6f69277a1ea 100644 --- a/service/ec2/api_op_DescribeHostReservationOfferings.go +++ b/service/ec2/api_op_DescribeHostReservationOfferings.go @@ -39,11 +39,11 @@ type DescribeHostReservationOfferingsInput struct { // The filters. // - // * instance-family - The instance family of the offering (for + // * instance-family - The instance family of the offering (for // example, m4). // - // * payment-option - The payment option (NoUpfront | - // PartialUpfront | AllUpfront). + // * payment-option - The payment option (NoUpfront | PartialUpfront + // | AllUpfront). Filter []*types.Filter // This is the maximum duration of the reservation to purchase, specified in diff --git a/service/ec2/api_op_DescribeHostReservations.go b/service/ec2/api_op_DescribeHostReservations.go index a657b0fe0e2..fd7de868084 100644 --- a/service/ec2/api_op_DescribeHostReservations.go +++ b/service/ec2/api_op_DescribeHostReservations.go @@ -31,24 +31,24 @@ type DescribeHostReservationsInput struct { // The filters. // - // * instance-family - The instance family (for example, m4). + // * instance-family - The instance family (for example, m4). // - // - // * payment-option - The payment option (NoUpfront | PartialUpfront | + // * + // payment-option - The payment option (NoUpfront | PartialUpfront | // AllUpfront). // - // * state - The state of the reservation (payment-pending | + // * state - The state of the reservation (payment-pending | // payment-failed | active | retired). // - // * tag: - The key/value combination of a - // tag assigned to the resource. Use the tag key in the filter name and the tag - // value as the filter value. For example, to find all resources that have a tag - // with the key Owner and the value TeamA, specify tag:Owner for the filter name - // and TeamA for the filter value. + // * tag: - The key/value combination of a tag + // assigned to the resource. Use the tag key in the filter name and the tag value + // as the filter value. For example, to find all resources that have a tag with the + // key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA + // for the filter value. // - // * tag-key - The key of a tag assigned to - // the resource. Use this filter to find all resources assigned a tag with a - // specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. + // Use this filter to find all resources assigned a tag with a specific key, + // regardless of the tag value. Filter []*types.Filter // The host reservation IDs. diff --git a/service/ec2/api_op_DescribeHosts.go b/service/ec2/api_op_DescribeHosts.go index cdc52287bd5..7663b9cc314 100644 --- a/service/ec2/api_op_DescribeHosts.go +++ b/service/ec2/api_op_DescribeHosts.go @@ -34,26 +34,26 @@ type DescribeHostsInput struct { // The filters. // - // * auto-placement - Whether auto-placement is enabled or - // disabled (on | off). + // * auto-placement - Whether auto-placement is enabled or disabled + // (on | off). // - // * availability-zone - The Availability Zone of the - // host. + // * availability-zone - The Availability Zone of the host. // - // * client-token - The idempotency token that you provided when you - // allocated the host. + // * + // client-token - The idempotency token that you provided when you allocated the + // host. // - // * host-reservation-id - The ID of the reservation - // assigned to this host. + // * host-reservation-id - The ID of the reservation assigned to this + // host. // - // * instance-type - The instance type size that the - // Dedicated Host is configured to support. + // * instance-type - The instance type size that the Dedicated Host is + // configured to support. // - // * state - The allocation state of - // the Dedicated Host (available | under-assessment | permanent-failure | released - // | released-permanent-failure). + // * state - The allocation state of the Dedicated Host + // (available | under-assessment | permanent-failure | released | + // released-permanent-failure). // - // * tag-key - The key of a tag assigned to the + // * tag-key - The key of a tag assigned to the // resource. Use this filter to find all resources assigned a tag with a specific // key, regardless of the tag value. Filter []*types.Filter diff --git a/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go b/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go index 7bd5959f990..6b0abf06d7e 100644 --- a/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go +++ b/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go @@ -34,10 +34,10 @@ type DescribeIamInstanceProfileAssociationsInput struct { // The filters. // - // * instance-id - The ID of the instance. + // * instance-id - The ID of the instance. // - // * state - The - // state of the association (associating | associated | disassociating). + // * state - The state of + // the association (associating | associated | disassociating). Filters []*types.Filter // The maximum number of results to return in a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeImages.go b/service/ec2/api_op_DescribeImages.go index 8352b09664d..9a777f3e082 100644 --- a/service/ec2/api_op_DescribeImages.go +++ b/service/ec2/api_op_DescribeImages.go @@ -48,105 +48,103 @@ type DescribeImagesInput struct { // The filters. // - // * architecture - The image architecture (i386 | x86_64 | + // * architecture - The image architecture (i386 | x86_64 | // arm64). // - // * block-device-mapping.delete-on-termination - A Boolean value that + // * block-device-mapping.delete-on-termination - A Boolean value that // indicates whether the Amazon EBS volume is deleted on instance termination. // + // * + // block-device-mapping.device-name - The device name specified in the block device + // mapping (for example, /dev/sdh or xvdh). // - // * block-device-mapping.device-name - The device name specified in the block - // device mapping (for example, /dev/sdh or xvdh). + // * block-device-mapping.snapshot-id - + // The ID of the snapshot used for the EBS volume. // - // * - // block-device-mapping.snapshot-id - The ID of the snapshot used for the EBS - // volume. + // * + // block-device-mapping.volume-size - The volume size of the EBS volume, in GiB. // - // * block-device-mapping.volume-size - The volume size of the EBS - // volume, in GiB. + // * + // block-device-mapping.volume-type - The volume type of the EBS volume (gp2 | io1 + // | io2 | st1 | sc1 | standard). // - // * block-device-mapping.volume-type - The volume type of the - // EBS volume (gp2 | io1 | io2 | st1 | sc1 | standard). + // * block-device-mapping.encrypted - A Boolean + // that indicates whether the EBS volume is encrypted. // - // * - // block-device-mapping.encrypted - A Boolean that indicates whether the EBS volume - // is encrypted. + // * description - The + // description of the image (provided during image creation). // - // * description - The description of the image (provided during - // image creation). + // * ena-support - A + // Boolean that indicates whether enhanced networking with ENA is enabled. // - // * ena-support - A Boolean that indicates whether enhanced - // networking with ENA is enabled. + // * + // hypervisor - The hypervisor type (ovm | xen). // - // * hypervisor - The hypervisor type (ovm | - // xen). + // * image-id - The ID of the + // image. // - // * image-id - The ID of the image. + // * image-type - The image type (machine | kernel | ramdisk). // - // * image-type - The image type - // (machine | kernel | ramdisk). + // * is-public + // - A Boolean that indicates whether the image is public. // - // * is-public - A Boolean that indicates - // whether the image is public. + // * kernel-id - The + // kernel ID. // - // * kernel-id - The kernel ID. + // * manifest-location - The location of the image manifest. // - // * - // manifest-location - The location of the image manifest. + // * name - + // The name of the AMI (provided during image creation). // - // * name - The name - // of the AMI (provided during image creation). - // - // * owner-alias - The owner + // * owner-alias - The owner // alias, from an Amazon-maintained list (amazon | aws-marketplace). This is not // the user-configured AWS account alias set using the IAM console. We recommend // that you use the related parameter instead of this filter. // - // * owner-id - The - // AWS account ID of the owner. We recommend that you use the related parameter - // instead of this filter. + // * owner-id - The AWS + // account ID of the owner. We recommend that you use the related parameter instead + // of this filter. // - // * platform - The platform. To only list - // Windows-based AMIs, use windows. + // * platform - The platform. To only list Windows-based AMIs, use + // windows. // - // * product-code - The product code. + // * product-code - The product code. // - // * - // product-code.type - The type of the product code (devpay | marketplace). + // * product-code.type - The type of + // the product code (devpay | marketplace). // - // * - // ramdisk-id - The RAM disk ID. + // * ramdisk-id - The RAM disk ID. // - // * root-device-name - The device name of the - // root device volume (for example, /dev/sda1). + // * + // root-device-name - The device name of the root device volume (for example, + // /dev/sda1). // - // * root-device-type - The type - // of the root device volume (ebs | instance-store). + // * root-device-type - The type of the root device volume (ebs | + // instance-store). // - // * state - The state of - // the image (available | pending | failed). + // * state - The state of the image (available | pending | + // failed). // - // * state-reason-code - The reason - // code for the state change. + // * state-reason-code - The reason code for the state change. // - // * state-reason-message - The message for the - // state change. + // * + // state-reason-message - The message for the state change. // - // * sriov-net-support - A value of simple indicates that - // enhanced networking with the Intel 82599 VF interface is enabled. + // * sriov-net-support - + // A value of simple indicates that enhanced networking with the Intel 82599 VF + // interface is enabled. // - // * tag: - - // The key/value combination of a tag assigned to the resource. Use the tag key in - // the filter name and the tag value as the filter value. For example, to find all - // resources that have a tag with the key Owner and the value TeamA, specify - // tag:Owner for the filter name and TeamA for the filter value. + // * tag: - The key/value combination of a tag assigned to + // the resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - - // The key of a tag assigned to the resource. Use this filter to find all resources - // assigned a tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. // - // * - // virtualization-type - The virtualization type (paravirtual | hvm). + // * virtualization-type - The virtualization type (paravirtual | hvm). Filters []*types.Filter // The image IDs. Default: Describes all images available to you. diff --git a/service/ec2/api_op_DescribeInstanceCreditSpecifications.go b/service/ec2/api_op_DescribeInstanceCreditSpecifications.go index 90972f5764a..c87bb99945c 100644 --- a/service/ec2/api_op_DescribeInstanceCreditSpecifications.go +++ b/service/ec2/api_op_DescribeInstanceCreditSpecifications.go @@ -54,7 +54,7 @@ type DescribeInstanceCreditSpecificationsInput struct { // The filters. // - // * instance-id - The ID of the instance. + // * instance-id - The ID of the instance. Filters []*types.Filter // The instance IDs. Default: Describes all your instances. Constraints: Maximum diff --git a/service/ec2/api_op_DescribeInstanceStatus.go b/service/ec2/api_op_DescribeInstanceStatus.go index 76125b8626e..290c43a096e 100644 --- a/service/ec2/api_op_DescribeInstanceStatus.go +++ b/service/ec2/api_op_DescribeInstanceStatus.go @@ -16,22 +16,22 @@ import ( // to return the status of all instances. Instance status includes the following // components: // -// * Status checks - Amazon EC2 performs status checks on running -// EC2 instances to identify hardware and software issues. For more information, -// see Status checks for your instances +// * Status checks - Amazon EC2 performs status checks on running EC2 +// instances to identify hardware and software issues. For more information, see +// Status checks for your instances // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) // and Troubleshooting instances with failed status checks // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) // in the Amazon Elastic Compute Cloud User Guide. // -// * Scheduled events - Amazon -// EC2 can schedule events (such as reboot, stop, or terminate) for your instances +// * Scheduled events - Amazon EC2 +// can schedule events (such as reboot, stop, or terminate) for your instances // related to hardware issues, software updates, or system maintenance. For more // information, see Scheduled events for your instances // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) // in the Amazon Elastic Compute Cloud User Guide. // -// * Instance state - You can +// * Instance state - You can // manage your instances from the moment you launch them through their termination. // For more information, see Instance lifecycle // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) @@ -61,51 +61,51 @@ type DescribeInstanceStatusInput struct { // The filters. // - // * availability-zone - The Availability Zone of the instance. + // * availability-zone - The Availability Zone of the instance. // + // * + // event.code - The code for the scheduled event (instance-reboot | system-reboot | + // system-maintenance | instance-retirement | instance-stop). // - // * event.code - The code for the scheduled event (instance-reboot | system-reboot - // | system-maintenance | instance-retirement | instance-stop). + // * event.description + // - A description of the event. // - // * - // event.description - A description of the event. + // * event.instance-event-id - The ID of the event + // whose date and time you are modifying. // - // * event.instance-event-id - - // The ID of the event whose date and time you are modifying. + // * event.not-after - The latest end time + // for the scheduled event (for example, 2014-09-15T17:15:20.000Z). // - // * - // event.not-after - The latest end time for the scheduled event (for example, + // * + // event.not-before - The earliest start time for the scheduled event (for example, // 2014-09-15T17:15:20.000Z). // - // * event.not-before - The earliest start time for - // the scheduled event (for example, 2014-09-15T17:15:20.000Z). + // * event.not-before-deadline - The deadline for + // starting the event (for example, 2014-09-15T17:15:20.000Z). // - // * - // event.not-before-deadline - The deadline for starting the event (for example, - // 2014-09-15T17:15:20.000Z). - // - // * instance-state-code - The code for the - // instance state, as a 16-bit unsigned integer. The high byte is used for internal - // purposes and should be ignored. The low byte is set based on the state - // represented. The valid values are 0 (pending), 16 (running), 32 (shutting-down), - // 48 (terminated), 64 (stopping), and 80 (stopped). + // * + // instance-state-code - The code for the instance state, as a 16-bit unsigned + // integer. The high byte is used for internal purposes and should be ignored. The + // low byte is set based on the state represented. The valid values are 0 + // (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), and + // 80 (stopped). // - // * instance-state-name - - // The state of the instance (pending | running | shutting-down | terminated | - // stopping | stopped). + // * instance-state-name - The state of the instance (pending | + // running | shutting-down | terminated | stopping | stopped). // - // * instance-status.reachability - Filters on instance - // status where the name is reachability (passed | failed | initializing | - // insufficient-data). + // * + // instance-status.reachability - Filters on instance status where the name is + // reachability (passed | failed | initializing | insufficient-data). // - // * instance-status.status - The status of the instance - // (ok | impaired | initializing | insufficient-data | not-applicable). + // * + // instance-status.status - The status of the instance (ok | impaired | + // initializing | insufficient-data | not-applicable). // - // * + // * // system-status.reachability - Filters on system status where the name is // reachability (passed | failed | initializing | insufficient-data). // - // * + // * // system-status.status - The system status of the instance (ok | impaired | // initializing | insufficient-data | not-applicable). Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeInstanceTypeOfferings.go b/service/ec2/api_op_DescribeInstanceTypeOfferings.go index 497d17f476e..dd51224d646 100644 --- a/service/ec2/api_op_DescribeInstanceTypeOfferings.go +++ b/service/ec2/api_op_DescribeInstanceTypeOfferings.go @@ -39,11 +39,11 @@ type DescribeInstanceTypeOfferingsInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * location - // - This depends on the location type. For example, if the location type is region + // * location - + // This depends on the location type. For example, if the location type is region // (default), the location is the Region code (for example, us-east-2.) // - // * + // * // instance-type - The instance type. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeInstanceTypes.go b/service/ec2/api_op_DescribeInstanceTypes.go index 8b8a06e9d2e..f6e3a25fdf3 100644 --- a/service/ec2/api_op_DescribeInstanceTypes.go +++ b/service/ec2/api_op_DescribeInstanceTypes.go @@ -38,150 +38,148 @@ type DescribeInstanceTypesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * + // * // auto-recovery-supported - Indicates whether auto recovery is supported. (true | // false) // - // * bare-metal - Indicates whether it is a bare metal instance type. - // (true | false) + // * bare-metal - Indicates whether it is a bare metal instance type. (true + // | false) // - // * burstable-performance-supported - Indicates whether it is - // a burstable performance instance type. (true | false) + // * burstable-performance-supported - Indicates whether it is a + // burstable performance instance type. (true | false) // - // * current-generation - // - Indicates whether this instance type is the latest generation instance type of + // * current-generation - + // Indicates whether this instance type is the latest generation instance type of // an instance family. (true | false) // - // * + // * // ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline bandwidth // performance for an EBS-optimized instance type, in Mbps. // - // * + // * // ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output storage // operations per second for an EBS-optimized instance type. // - // * + // * // ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline // throughput performance for an EBS-optimized instance type, in MBps. // - // * + // * // ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum bandwidth // performance for an EBS-optimized instance type, in Mbps. // - // * + // * // ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output storage // operations per second for an EBS-optimized instance type. // - // * + // * // ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum throughput // performance for an EBS-optimized instance type, in MBps. // - // * + // * // ebs-info.ebs-optimized-support - Indicates whether the instance type is // EBS-optimized. (supported | unsupported | default) // - // * + // * // ebs-info.encryption-support - Indicates whether EBS encryption is supported. // (supported | unsupported) // - // * ebs-info.nvme-support - Indicates whether + // * ebs-info.nvme-support - Indicates whether // non-volatile memory express (NVMe) is supported for EBS volumes. (required | // supported | unsupported) // - // * free-tier-eligible - Indicates whether the - // instance type is eligible to use in the free tier. (true | false) + // * free-tier-eligible - Indicates whether the instance + // type is eligible to use in the free tier. (true | false) // - // * + // * // hibernation-supported - Indicates whether On-Demand hibernation is supported. // (true | false) // - // * hypervisor - The hypervisor. (nitro | xen) + // * hypervisor - The hypervisor. (nitro | xen) // - // * + // * // instance-storage-info.disk.count - The number of local disks. // - // * + // * // instance-storage-info.disk.size-in-gb - The storage size of each instance // storage disk, in GB. // - // * instance-storage-info.disk.type - The storage - // technology for the local instance storage disks. (hdd | ssd) + // * instance-storage-info.disk.type - The storage technology + // for the local instance storage disks. (hdd | ssd) // - // * + // * // instance-storage-info.nvme-support - Indicates whether non-volatile memory // express (NVMe) is supported for instance store. (required | supported) | // unsupported) // - // * instance-storage-info.total-size-in-gb - The total amount of + // * instance-storage-info.total-size-in-gb - The total amount of // storage available from all local instance storage, in GB. // - // * + // * // instance-storage-supported - Indicates whether the instance type has local // instance storage. (true | false) // - // * instance-type - The instance type (for + // * instance-type - The instance type (for // example c5.2xlarge or c5*). // - // * memory-info.size-in-mib - The memory size. - // + // * memory-info.size-in-mib - The memory size. // - // * network-info.efa-supported - Indicates whether the instance type supports + // * + // network-info.efa-supported - Indicates whether the instance type supports // Elastic Fabric Adapter (EFA). (true | false) // - // * network-info.ena-support - + // * network-info.ena-support - // Indicates whether Elastic Network Adapter (ENA) is supported or required. // (required | supported | unsupported) // - // * + // * // network-info.ipv4-addresses-per-interface - The maximum number of private IPv4 // addresses per network interface. // - // * - // network-info.ipv6-addresses-per-interface - The maximum number of private IPv6 - // addresses per network interface. + // * network-info.ipv6-addresses-per-interface - + // The maximum number of private IPv6 addresses per network interface. // - // * network-info.ipv6-supported - Indicates - // whether the instance type supports IPv6. (true | false) + // * + // network-info.ipv6-supported - Indicates whether the instance type supports IPv6. + // (true | false) // - // * - // network-info.maximum-network-interfaces - The maximum number of network - // interfaces per instance. + // * network-info.maximum-network-interfaces - The maximum number + // of network interfaces per instance. // - // * network-info.network-performance - The network - // performance (for example, "25 Gigabit"). + // * network-info.network-performance - The + // network performance (for example, "25 Gigabit"). // - // * + // * // processor-info.supported-architecture - The CPU architecture. (arm64 | i386 | // x86_64) // - // * processor-info.sustained-clock-speed-in-ghz - The CPU clock - // speed, in GHz. + // * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in + // GHz. // - // * supported-root-device-type - The root device type. (ebs | + // * supported-root-device-type - The root device type. (ebs | // instance-store) // - // * supported-usage-class - The usage class. (on-demand | + // * supported-usage-class - The usage class. (on-demand | // spot) // - // * supported-virtualization-type - The virtualization type. (hvm | + // * supported-virtualization-type - The virtualization type. (hvm | // paravirtual) // - // * vcpu-info.default-cores - The default number of cores for - // the instance type. + // * vcpu-info.default-cores - The default number of cores for the + // instance type. // - // * vcpu-info.default-threads-per-core - The default - // number of threads per core for the instance type. + // * vcpu-info.default-threads-per-core - The default number of + // threads per core for the instance type. // - // * vcpu-info.default-vcpus - // - The default number of vCPUs for the instance type. + // * vcpu-info.default-vcpus - The default + // number of vCPUs for the instance type. // - // * - // vcpu-info.valid-cores - The number of cores that can be configured for the - // instance type. + // * vcpu-info.valid-cores - The number of + // cores that can be configured for the instance type. // - // * vcpu-info.valid-threads-per-core - The number of threads - // per core that can be configured for the instance type. For example, "1" or - // "1,2". + // * + // vcpu-info.valid-threads-per-core - The number of threads per core that can be + // configured for the instance type. For example, "1" or "1,2". Filters []*types.Filter // The instance types. For more information, see Instance Types diff --git a/service/ec2/api_op_DescribeInstances.go b/service/ec2/api_op_DescribeInstances.go index 01919fda185..c64132ec397 100644 --- a/service/ec2/api_op_DescribeInstances.go +++ b/service/ec2/api_op_DescribeInstances.go @@ -50,310 +50,307 @@ type DescribeInstancesInput struct { // The filters. // - // * affinity - The affinity setting for an instance running on a + // * affinity - The affinity setting for an instance running on a // Dedicated Host (default | host). // - // * architecture - The instance architecture + // * architecture - The instance architecture // (i386 | x86_64 | arm64). // - // * availability-zone - The Availability Zone of the + // * availability-zone - The Availability Zone of the // instance. // - // * block-device-mapping.attach-time - The attach time for an EBS + // * block-device-mapping.attach-time - The attach time for an EBS // volume mapped to the instance, for example, 2010-09-15T17:15:20.000Z. // - // * + // * // block-device-mapping.delete-on-termination - A Boolean that indicates whether // the EBS volume is deleted on instance termination. // - // * + // * // block-device-mapping.device-name - The device name specified in the block device // mapping (for example, /dev/sdh or xvdh). // - // * block-device-mapping.status - - // The status for the EBS volume (attaching | attached | detaching | detached). + // * block-device-mapping.status - The + // status for the EBS volume (attaching | attached | detaching | detached). // + // * + // block-device-mapping.volume-id - The volume ID of the EBS volume. // - // * block-device-mapping.volume-id - The volume ID of the EBS volume. - // - // * + // * // client-token - The idempotency token you provided when you launched the // instance. // - // * dns-name - The public DNS name of the instance. + // * dns-name - The public DNS name of the instance. // - // * group-id - // - The ID of the security group for the instance. EC2-Classic only. + // * group-id - The + // ID of the security group for the instance. EC2-Classic only. // - // * - // group-name - The name of the security group for the instance. EC2-Classic - // only. + // * group-name - The + // name of the security group for the instance. EC2-Classic only. // - // * hibernation-options.configured - A Boolean that indicates whether - // the instance is enabled for hibernation. A value of true means that the instance - // is enabled for hibernation. + // * + // hibernation-options.configured - A Boolean that indicates whether the instance + // is enabled for hibernation. A value of true means that the instance is enabled + // for hibernation. // - // * host-id - The ID of the Dedicated Host on - // which the instance is running, if applicable. + // * host-id - The ID of the Dedicated Host on which the instance + // is running, if applicable. // - // * hypervisor - The hypervisor - // type of the instance (ovm | xen). The value xen is used for both Xen and Nitro - // hypervisors. + // * hypervisor - The hypervisor type of the instance + // (ovm | xen). The value xen is used for both Xen and Nitro hypervisors. // - // * iam-instance-profile.arn - The instance profile associated - // with the instance. Specified as an ARN. + // * + // iam-instance-profile.arn - The instance profile associated with the instance. + // Specified as an ARN. // - // * image-id - The ID of the image - // used to launch the instance. + // * image-id - The ID of the image used to launch the + // instance. // - // * instance-id - The ID of the instance. + // * instance-id - The ID of the instance. // - // * - // instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled - // Instance (spot | scheduled). + // * instance-lifecycle - + // Indicates whether this is a Spot Instance or a Scheduled Instance (spot | + // scheduled). // - // * instance-state-code - The state of the - // instance, as a 16-bit unsigned integer. The high byte is used for internal - // purposes and should be ignored. The low byte is set based on the state - // represented. The valid values are: 0 (pending), 16 (running), 32 - // (shutting-down), 48 (terminated), 64 (stopping), and 80 (stopped). + // * instance-state-code - The state of the instance, as a 16-bit + // unsigned integer. The high byte is used for internal purposes and should be + // ignored. The low byte is set based on the state represented. The valid values + // are: 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 + // (stopping), and 80 (stopped). // - // * - // instance-state-name - The state of the instance (pending | running | - // shutting-down | terminated | stopping | stopped). + // * instance-state-name - The state of the instance + // (pending | running | shutting-down | terminated | stopping | stopped). // - // * instance-type - The - // type of instance (for example, t2.micro). + // * + // instance-type - The type of instance (for example, t2.micro). // - // * instance.group-id - The ID of - // the security group for the instance. + // * + // instance.group-id - The ID of the security group for the instance. // - // * instance.group-name - The name of - // the security group for the instance. + // * + // instance.group-name - The name of the security group for the instance. // - // * ip-address - The public IPv4 address - // of the instance. + // * + // ip-address - The public IPv4 address of the instance. // - // * kernel-id - The kernel ID. + // * kernel-id - The kernel + // ID. // - // * key-name - The name of - // the key pair used when the instance was launched. + // * key-name - The name of the key pair used when the instance was + // launched. // - // * launch-index - When - // launching multiple instances, this is the index for the instance in the launch - // group (for example, 0, 1, 2, and so on). + // * launch-index - When launching multiple instances, this is the index + // for the instance in the launch group (for example, 0, 1, 2, and so on). // - // * launch-time - The time when the - // instance was launched. + // * + // launch-time - The time when the instance was launched. // - // * metadata-options.http-tokens - The metadata - // request authorization state (optional | required) + // * + // metadata-options.http-tokens - The metadata request authorization state + // (optional | required) // - // * - // metadata-options.http-put-response-hop-limit - The http metadata request put - // response hop limit (integer, possible values 1 to 64) + // * metadata-options.http-put-response-hop-limit - The http + // metadata request put response hop limit (integer, possible values 1 to 64) // - // * + // * // metadata-options.http-endpoint - Enable or disable metadata access on http // endpoint (enabled | disabled) // - // * monitoring-state - Indicates whether - // detailed monitoring is enabled (disabled | enabled). + // * monitoring-state - Indicates whether detailed + // monitoring is enabled (disabled | enabled). // - // * + // * // network-interface.addresses.private-ip-address - The private IPv4 address // associated with the network interface. // - // * - // network-interface.addresses.primary - Specifies whether the IPv4 address of the - // network interface is the primary private IPv4 address. + // * network-interface.addresses.primary - + // Specifies whether the IPv4 address of the network interface is the primary + // private IPv4 address. // - // * - // network-interface.addresses.association.public-ip - The ID of the association of - // an Elastic IP address (IPv4) with a network interface. + // * network-interface.addresses.association.public-ip - The + // ID of the association of an Elastic IP address (IPv4) with a network + // interface. // - // * - // network-interface.addresses.association.ip-owner-id - The owner ID of the - // private IPv4 address associated with the network interface. + // * network-interface.addresses.association.ip-owner-id - The owner ID + // of the private IPv4 address associated with the network interface. // - // * + // * // network-interface.association.public-ip - The address of the Elastic IP address // (IPv4) bound to the network interface. // - // * + // * // network-interface.association.ip-owner-id - The owner of the Elastic IP address // (IPv4) associated with the network interface. // - // * + // * // network-interface.association.allocation-id - The allocation ID returned when // you allocated the Elastic IP address (IPv4) for your network interface. // - // * + // * // network-interface.association.association-id - The association ID returned when // the network interface was associated with an IPv4 address. // - // * + // * // network-interface.attachment.attachment-id - The ID of the interface // attachment. // - // * network-interface.attachment.instance-id - The ID of the - // instance to which the network interface is attached. + // * network-interface.attachment.instance-id - The ID of the instance + // to which the network interface is attached. // - // * + // * // network-interface.attachment.instance-owner-id - The owner ID of the instance to // which the network interface is attached. // - // * + // * // network-interface.attachment.device-index - The device index to which the // network interface is attached. // - // * network-interface.attachment.status - The + // * network-interface.attachment.status - The // status of the attachment (attaching | attached | detaching | detached). // - // * + // * // network-interface.attachment.attach-time - The time that the network interface // was attached to an instance. // - // * + // * // network-interface.attachment.delete-on-termination - Specifies whether the // attachment is deleted when an instance is terminated. // - // * + // * // network-interface.availability-zone - The Availability Zone for the network // interface. // - // * network-interface.description - The description of the network + // * network-interface.description - The description of the network // interface. // - // * network-interface.group-id - The ID of a security group - // associated with the network interface. + // * network-interface.group-id - The ID of a security group associated + // with the network interface. // - // * network-interface.group-name - The - // name of a security group associated with the network interface. + // * network-interface.group-name - The name of a + // security group associated with the network interface. // - // * + // * // network-interface.ipv6-addresses.ipv6-address - The IPv6 address associated with // the network interface. // - // * network-interface.mac-address - The MAC address of - // the network interface. - // - // * network-interface.network-interface-id - The ID of - // the network interface. + // * network-interface.mac-address - The MAC address of the + // network interface. // - // * network-interface.owner-id - The ID of the owner - // of the network interface. + // * network-interface.network-interface-id - The ID of the + // network interface. // - // * network-interface.private-dns-name - The - // private DNS name of the network interface. + // * network-interface.owner-id - The ID of the owner of the + // network interface. // - // * network-interface.requester-id - // - The requester ID for the network interface. + // * network-interface.private-dns-name - The private DNS name + // of the network interface. // - // * - // network-interface.requester-managed - Indicates whether the network interface is - // being managed by AWS. + // * network-interface.requester-id - The requester ID + // for the network interface. // - // * network-interface.status - The status of the - // network interface (available) | in-use). + // * network-interface.requester-managed - Indicates + // whether the network interface is being managed by AWS. // - // * - // network-interface.source-dest-check - Whether the network interface performs - // source/destination checking. A value of true means that checking is enabled, and - // false means that checking is disabled. The value must be false for the network - // interface to perform network address translation (NAT) in your VPC. + // * + // network-interface.status - The status of the network interface (available) | + // in-use). // - // * - // network-interface.subnet-id - The ID of the subnet for the network interface. + // * network-interface.source-dest-check - Whether the network interface + // performs source/destination checking. A value of true means that checking is + // enabled, and false means that checking is disabled. The value must be false for + // the network interface to perform network address translation (NAT) in your + // VPC. // + // * network-interface.subnet-id - The ID of the subnet for the network + // interface. // - // * network-interface.vpc-id - The ID of the VPC for the network interface. + // * network-interface.vpc-id - The ID of the VPC for the network + // interface. // - // * - // owner-id - The AWS account ID of the instance owner. + // * owner-id - The AWS account ID of the instance owner. // - // * placement-group-name - // - The name of the placement group for the instance. + // * + // placement-group-name - The name of the placement group for the instance. // - // * + // * // placement-partition-number - The partition in which the instance is located. // + // * + // platform - The platform. To list only Windows instances, use windows. // - // * platform - The platform. To list only Windows instances, use windows. - // - // * + // * // private-dns-name - The private IPv4 DNS name of the instance. // - // * + // * // private-ip-address - The private IPv4 address of the instance. // - // * - // product-code - The product code associated with the AMI used to launch the - // instance. - // - // * product-code.type - The type of product code (devpay | - // marketplace). - // - // * ramdisk-id - The RAM disk ID. + // * product-code - + // The product code associated with the AMI used to launch the instance. // - // * reason - The reason - // for the current state of the instance (for example, shows "User Initiated - // [date]" when you stop or terminate the instance). Similar to the - // state-reason-code filter. + // * + // product-code.type - The type of product code (devpay | marketplace). // - // * requester-id - The ID of the entity that - // launched the instance on your behalf (for example, AWS Management Console, Auto - // Scaling, and so on). + // * + // ramdisk-id - The RAM disk ID. // - // * reservation-id - The ID of the instance's - // reservation. A reservation ID is created any time you launch an instance. A - // reservation ID has a one-to-one relationship with an instance launch request, - // but can be associated with more than one instance if you launch multiple - // instances using the same launch request. For example, if you launch one - // instance, you get one reservation ID. If you launch ten instances using the same - // launch request, you also get one reservation ID. + // * reason - The reason for the current state of + // the instance (for example, shows "User Initiated [date]" when you stop or + // terminate the instance). Similar to the state-reason-code filter. // - // * root-device-name - The - // device name of the root device volume (for example, /dev/sda1). + // * + // requester-id - The ID of the entity that launched the instance on your behalf + // (for example, AWS Management Console, Auto Scaling, and so on). // - // * - // root-device-type - The type of the root device volume (ebs | instance-store). + // * + // reservation-id - The ID of the instance's reservation. A reservation ID is + // created any time you launch an instance. A reservation ID has a one-to-one + // relationship with an instance launch request, but can be associated with more + // than one instance if you launch multiple instances using the same launch + // request. For example, if you launch one instance, you get one reservation ID. If + // you launch ten instances using the same launch request, you also get one + // reservation ID. // + // * root-device-name - The device name of the root device volume + // (for example, /dev/sda1). // - // * source-dest-check - Indicates whether the instance performs source/destination - // checking. A value of true means that checking is enabled, and false means that - // checking is disabled. The value must be false for the instance to perform - // network address translation (NAT) in your VPC. + // * root-device-type - The type of the root device + // volume (ebs | instance-store). // - // * spot-instance-request-id - - // The ID of the Spot Instance request. + // * source-dest-check - Indicates whether the + // instance performs source/destination checking. A value of true means that + // checking is enabled, and false means that checking is disabled. The value must + // be false for the instance to perform network address translation (NAT) in your + // VPC. // - // * state-reason-code - The reason code - // for the state change. + // * spot-instance-request-id - The ID of the Spot Instance request. // - // * state-reason-message - A message that describes the - // state change. + // * + // state-reason-code - The reason code for the state change. // - // * subnet-id - The ID of the subnet for the instance. + // * + // state-reason-message - A message that describes the state change. // - // * - // tag: - The key/value combination of a tag assigned to the resource. Use the tag - // key in the filter name and the tag value as the filter value. For example, to - // find all resources that have a tag with the key Owner and the value TeamA, - // specify tag:Owner for the filter name and TeamA for the filter value. + // * subnet-id - + // The ID of the subnet for the instance. // - // * - // tag-key - The key of a tag assigned to the resource. Use this filter to find all - // resources that have a tag with a specific key, regardless of the tag value. + // * tag: - The key/value combination of a + // tag assigned to the resource. Use the tag key in the filter name and the tag + // value as the filter value. For example, to find all resources that have a tag + // with the key Owner and the value TeamA, specify tag:Owner for the filter name + // and TeamA for the filter value. // + // * tag-key - The key of a tag assigned to the + // resource. Use this filter to find all resources that have a tag with a specific + // key, regardless of the tag value. // - // * tenancy - The tenancy of an instance (dedicated | default | host). + // * tenancy - The tenancy of an instance + // (dedicated | default | host). // - // * - // virtualization-type - The virtualization type of the instance (paravirtual | - // hvm). + // * virtualization-type - The virtualization type + // of the instance (paravirtual | hvm). // - // * vpc-id - The ID of the VPC that the instance is running in. + // * vpc-id - The ID of the VPC that the + // instance is running in. Filters []*types.Filter // The instance IDs. Default: Describes all your instances. diff --git a/service/ec2/api_op_DescribeInternetGateways.go b/service/ec2/api_op_DescribeInternetGateways.go index 2fabdd5a2fd..7d06303050c 100644 --- a/service/ec2/api_op_DescribeInternetGateways.go +++ b/service/ec2/api_op_DescribeInternetGateways.go @@ -37,27 +37,27 @@ type DescribeInternetGatewaysInput struct { // One or more filters. // - // * attachment.state - The current state of the - // attachment between the gateway and the VPC (available). Present only if a VPC is + // * attachment.state - The current state of the attachment + // between the gateway and the VPC (available). Present only if a VPC is // attached. // - // * attachment.vpc-id - The ID of an attached VPC. + // * attachment.vpc-id - The ID of an attached VPC. // - // * + // * // internet-gateway-id - The ID of the Internet gateway. // - // * owner-id - The ID - // of the AWS account that owns the internet gateway. + // * owner-id - The ID of + // the AWS account that owns the internet gateway. // - // * tag: - The key/value + // * tag: - The key/value // combination of a tag assigned to the resource. Use the tag key in the filter // name and the tag value as the filter value. For example, to find all resources // that have a tag with the key Owner and the value TeamA, specify tag:Owner for // the filter name and TeamA for the filter value. // - // * tag-key - The key of a - // tag assigned to the resource. Use this filter to find all resources assigned a - // tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag + // assigned to the resource. Use this filter to find all resources assigned a tag + // with a specific key, regardless of the tag value. Filters []*types.Filter // One or more internet gateway IDs. Default: Describes all your internet gateways. diff --git a/service/ec2/api_op_DescribeIpv6Pools.go b/service/ec2/api_op_DescribeIpv6Pools.go index d01bbf469c5..0aae931ae44 100644 --- a/service/ec2/api_op_DescribeIpv6Pools.go +++ b/service/ec2/api_op_DescribeIpv6Pools.go @@ -37,15 +37,15 @@ type DescribeIpv6PoolsInput struct { // One or more filters. // - // * tag: - The key/value combination of a tag assigned - // to the resource. Use the tag key in the filter name and the tag value as the - // filter value. For example, to find all resources that have a tag with the key - // Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - // the filter value. + // * tag: - The key/value combination of a tag assigned to + // the resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - The key of a tag assigned to the resource. - // Use this filter to find all resources assigned a tag with a specific key, - // regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeKeyPairs.go b/service/ec2/api_op_DescribeKeyPairs.go index 794ab1c1cc2..33a92229b35 100644 --- a/service/ec2/api_op_DescribeKeyPairs.go +++ b/service/ec2/api_op_DescribeKeyPairs.go @@ -40,21 +40,21 @@ type DescribeKeyPairsInput struct { // The filters. // - // * key-pair-id - The ID of the key pair. + // * key-pair-id - The ID of the key pair. // - // * fingerprint - - // The fingerprint of the key pair. + // * fingerprint - The + // fingerprint of the key pair. // - // * key-name - The name of the key pair. + // * key-name - The name of the key pair. // + // * tag-key + // - The key of a tag assigned to the resource. Use this filter to find all + // resources assigned a tag with a specific key, regardless of the tag value. // - // * tag-key - The key of a tag assigned to the resource. Use this filter to find - // all resources assigned a tag with a specific key, regardless of the tag value. - // - // - // * tag: - The key/value combination of a tag assigned to the resource. Use the - // tag key in the filter name and the tag value as the filter value. For example, - // to find all resources that have a tag with the key Owner and the value TeamA, + // * + // tag: - The key/value combination of a tag assigned to the resource. Use the tag + // key in the filter name and the tag value as the filter value. For example, to + // find all resources that have a tag with the key Owner and the value TeamA, // specify tag:Owner for the filter name and TeamA for the filter value. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeLaunchTemplateVersions.go b/service/ec2/api_op_DescribeLaunchTemplateVersions.go index 31c30095bb4..dfafc488653 100644 --- a/service/ec2/api_op_DescribeLaunchTemplateVersions.go +++ b/service/ec2/api_op_DescribeLaunchTemplateVersions.go @@ -40,27 +40,27 @@ type DescribeLaunchTemplateVersionsInput struct { // One or more filters. // - // * create-time - The time the launch template version - // was created. + // * create-time - The time the launch template version was + // created. // - // * ebs-optimized - A boolean that indicates whether the - // instance is optimized for Amazon EBS I/O. + // * ebs-optimized - A boolean that indicates whether the instance is + // optimized for Amazon EBS I/O. // - // * iam-instance-profile - The ARN - // of the IAM instance profile. + // * iam-instance-profile - The ARN of the IAM + // instance profile. // - // * image-id - The ID of the AMI. + // * image-id - The ID of the AMI. // - // * - // instance-type - The instance type. + // * instance-type - The + // instance type. // - // * is-default-version - A boolean that - // indicates whether the launch template version is the default version. + // * is-default-version - A boolean that indicates whether the + // launch template version is the default version. // - // * - // kernel-id - The kernel ID. + // * kernel-id - The kernel ID. // - // * ram-disk-id - The RAM disk ID. + // * + // ram-disk-id - The RAM disk ID. Filters []*types.Filter // The ID of the launch template. To describe one or more versions of a specified diff --git a/service/ec2/api_op_DescribeLaunchTemplates.go b/service/ec2/api_op_DescribeLaunchTemplates.go index b193574ed8b..cabadd57de9 100644 --- a/service/ec2/api_op_DescribeLaunchTemplates.go +++ b/service/ec2/api_op_DescribeLaunchTemplates.go @@ -37,20 +37,20 @@ type DescribeLaunchTemplatesInput struct { // One or more filters. // - // * create-time - The time the launch template was + // * create-time - The time the launch template was // created. // - // * launch-template-name - The name of the launch template. + // * launch-template-name - The name of the launch template. // - // * - // tag: - The key/value combination of a tag assigned to the resource. Use the tag - // key in the filter name and the tag value as the filter value. For example, to - // find all resources that have a tag with the key Owner and the value TeamA, - // specify tag:Owner for the filter name and TeamA for the filter value. + // * tag: - + // The key/value combination of a tag assigned to the resource. Use the tag key in + // the filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * - // tag-key - The key of a tag assigned to the resource. Use this filter to find all - // resources assigned a tag with a specific key, regardless of the tag value. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. Filters []*types.Filter // One or more launch template IDs. diff --git a/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go b/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go index b4e5f28f97b..f8387e21b3a 100644 --- a/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go +++ b/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go @@ -38,19 +38,19 @@ type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput struct // One or more filters. // - // * local-gateway-id - The ID of a local gateway. + // * local-gateway-id - The ID of a local gateway. // - // * + // * // local-gateway-route-table-id - The ID of the local gateway route table. // - // * + // * // local-gateway-route-table-virtual-interface-group-association-id - The ID of the // association. // - // * local-gateway-route-table-virtual-interface-group-id - The - // ID of the virtual interface group. + // * local-gateway-route-table-virtual-interface-group-id - The ID of + // the virtual interface group. // - // * state - The state of the association. + // * state - The state of the association. Filters []*types.Filter // The IDs of the associations. diff --git a/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go b/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go index dd061d44e3c..ce55120f5d5 100644 --- a/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go +++ b/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go @@ -38,18 +38,18 @@ type DescribeLocalGatewayRouteTableVpcAssociationsInput struct { // One or more filters. // - // * local-gateway-id - The ID of a local gateway. + // * local-gateway-id - The ID of a local gateway. // - // * + // * // local-gateway-route-table-id - The ID of the local gateway route table. // - // * + // * // local-gateway-route-table-vpc-association-id - The ID of the association. // - // * + // * // state - The state of the association. // - // * vpc-id - The ID of the VPC. + // * vpc-id - The ID of the VPC. Filters []*types.Filter // The IDs of the associations. diff --git a/service/ec2/api_op_DescribeLocalGatewayRouteTables.go b/service/ec2/api_op_DescribeLocalGatewayRouteTables.go index 4d9eae822e9..ff052eda55f 100644 --- a/service/ec2/api_op_DescribeLocalGatewayRouteTables.go +++ b/service/ec2/api_op_DescribeLocalGatewayRouteTables.go @@ -38,15 +38,15 @@ type DescribeLocalGatewayRouteTablesInput struct { // One or more filters. // - // * local-gateway-id - The ID of a local gateway. + // * local-gateway-id - The ID of a local gateway. // - // * + // * // local-gateway-route-table-id - The ID of a local gateway route table. // - // * + // * // outpost-arn - The Amazon Resource Name (ARN) of the Outpost. // - // * state - The + // * state - The // state of the local gateway route table. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go b/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go index 07273b01ee4..00ad930b396 100644 --- a/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go +++ b/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go @@ -37,12 +37,12 @@ type DescribeLocalGatewayVirtualInterfaceGroupsInput struct { // One or more filters. // - // * local-gateway-id - The ID of a local gateway. + // * local-gateway-id - The ID of a local gateway. // - // * + // * // local-gateway-virtual-interface-id - The ID of the virtual interface. // - // * + // * // local-gateway-virtual-interface-group-id - The ID of the virtual interface // group. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeLocalGateways.go b/service/ec2/api_op_DescribeLocalGateways.go index b5f80767e9d..d07e605d4f7 100644 --- a/service/ec2/api_op_DescribeLocalGateways.go +++ b/service/ec2/api_op_DescribeLocalGateways.go @@ -41,22 +41,22 @@ type DescribeLocalGatewaysInput struct { // One or more filters. // - // * local-gateway-id - The ID of a local gateway. + // * local-gateway-id - The ID of a local gateway. // - // * + // * // local-gateway-route-table-id - The ID of the local gateway route table. // - // * + // * // local-gateway-route-table-virtual-interface-group-association-id - The ID of the // association. // - // * local-gateway-route-table-virtual-interface-group-id - The - // ID of the virtual interface group. + // * local-gateway-route-table-virtual-interface-group-id - The ID of + // the virtual interface group. // - // * outpost-arn - The Amazon Resource Name - // (ARN) of the Outpost. + // * outpost-arn - The Amazon Resource Name (ARN) of + // the Outpost. // - // * state - The state of the association. + // * state - The state of the association. LocalGatewayIds []*string // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeManagedPrefixLists.go b/service/ec2/api_op_DescribeManagedPrefixLists.go index 69ba39b4ee3..7478ae88a37 100644 --- a/service/ec2/api_op_DescribeManagedPrefixLists.go +++ b/service/ec2/api_op_DescribeManagedPrefixLists.go @@ -38,13 +38,13 @@ type DescribeManagedPrefixListsInput struct { // One or more filters. // - // * owner-id - The ID of the prefix list owner. + // * owner-id - The ID of the prefix list owner. // - // * + // * // prefix-list-id - The ID of the prefix list. // - // * prefix-list-name - The name - // of the prefix list. + // * prefix-list-name - The name of + // the prefix list. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeMovingAddresses.go b/service/ec2/api_op_DescribeMovingAddresses.go index 5ca292777cc..94abd811a18 100644 --- a/service/ec2/api_op_DescribeMovingAddresses.go +++ b/service/ec2/api_op_DescribeMovingAddresses.go @@ -40,7 +40,7 @@ type DescribeMovingAddressesInput struct { // One or more filters. // - // * moving-status - The status of the Elastic IP address + // * moving-status - The status of the Elastic IP address // (MovingToVpc | RestoringToClassic). Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeNatGateways.go b/service/ec2/api_op_DescribeNatGateways.go index 96c52d5f525..3e1ba3b8b48 100644 --- a/service/ec2/api_op_DescribeNatGateways.go +++ b/service/ec2/api_op_DescribeNatGateways.go @@ -37,27 +37,26 @@ type DescribeNatGatewaysInput struct { // One or more filters. // - // * nat-gateway-id - The ID of the NAT gateway. + // * nat-gateway-id - The ID of the NAT gateway. // - // * - // state - The state of the NAT gateway (pending | failed | available | deleting | + // * state - + // The state of the NAT gateway (pending | failed | available | deleting | // deleted). // - // * subnet-id - The ID of the subnet in which the NAT gateway + // * subnet-id - The ID of the subnet in which the NAT gateway // resides. // - // * tag: - The key/value combination of a tag assigned to the - // resource. Use the tag key in the filter name and the tag value as the filter - // value. For example, to find all resources that have a tag with the key Owner and - // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter - // value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. For + // example, to find all resources that have a tag with the key Owner and the value + // TeamA, specify tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources assigned a tag with a specific key, regardless of - // the tag value. + // * + // tag-key - The key of a tag assigned to the resource. Use this filter to find all + // resources assigned a tag with a specific key, regardless of the tag value. // - // * vpc-id - The ID of the VPC in which the NAT gateway - // resides. + // * + // vpc-id - The ID of the VPC in which the NAT gateway resides. Filter []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeNetworkAcls.go b/service/ec2/api_op_DescribeNetworkAcls.go index 81ac35200ed..e68825ad826 100644 --- a/service/ec2/api_op_DescribeNetworkAcls.go +++ b/service/ec2/api_op_DescribeNetworkAcls.go @@ -39,62 +39,62 @@ type DescribeNetworkAclsInput struct { // One or more filters. // - // * association.association-id - The ID of an - // association ID for the ACL. + // * association.association-id - The ID of an association ID + // for the ACL. // - // * association.network-acl-id - The ID of the - // network ACL involved in the association. + // * association.network-acl-id - The ID of the network ACL involved + // in the association. // - // * association.subnet-id - The ID - // of the subnet involved in the association. + // * association.subnet-id - The ID of the subnet involved in + // the association. // - // * default - Indicates whether - // the ACL is the default network ACL for the VPC. + // * default - Indicates whether the ACL is the default network + // ACL for the VPC. // - // * entry.cidr - The IPv4 - // CIDR range specified in the entry. + // * entry.cidr - The IPv4 CIDR range specified in the entry. // - // * entry.icmp.code - The ICMP code - // specified in the entry, if any. + // * + // entry.icmp.code - The ICMP code specified in the entry, if any. // - // * entry.icmp.type - The ICMP type specified - // in the entry, if any. + // * + // entry.icmp.type - The ICMP type specified in the entry, if any. // - // * entry.ipv6-cidr - The IPv6 CIDR range specified in - // the entry. + // * + // entry.ipv6-cidr - The IPv6 CIDR range specified in the entry. // - // * entry.port-range.from - The start of the port range specified - // in the entry. + // * + // entry.port-range.from - The start of the port range specified in the entry. // - // * entry.port-range.to - The end of the port range specified - // in the entry. + // * + // entry.port-range.to - The end of the port range specified in the entry. // - // * entry.protocol - The protocol specified in the entry (tcp | - // udp | icmp or a protocol number). + // * + // entry.protocol - The protocol specified in the entry (tcp | udp | icmp or a + // protocol number). // - // * entry.rule-action - Allows or denies - // the matching traffic (allow | deny). + // * entry.rule-action - Allows or denies the matching traffic + // (allow | deny). // - // * entry.rule-number - The number of an - // entry (in other words, rule) in the set of ACL entries. + // * entry.rule-number - The number of an entry (in other words, + // rule) in the set of ACL entries. // - // * network-acl-id - - // The ID of the network ACL. + // * network-acl-id - The ID of the network + // ACL. // - // * owner-id - The ID of the AWS account that owns - // the network ACL. + // * owner-id - The ID of the AWS account that owns the network ACL. // - // * tag: - The key/value combination of a tag assigned to - // the resource. Use the tag key in the filter name and the tag value as the filter - // value. For example, to find all resources that have a tag with the key Owner and - // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter - // value. + // * tag: + // - The key/value combination of a tag assigned to the resource. Use the tag key + // in the filter name and the tag value as the filter value. For example, to find + // all resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources assigned a tag with a specific key, regardless of - // the tag value. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. // - // * vpc-id - The ID of the VPC for the network ACL. + // * vpc-id - The + // ID of the VPC for the network ACL. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeNetworkInterfacePermissions.go b/service/ec2/api_op_DescribeNetworkInterfacePermissions.go index 0107431d225..03e3a78c483 100644 --- a/service/ec2/api_op_DescribeNetworkInterfacePermissions.go +++ b/service/ec2/api_op_DescribeNetworkInterfacePermissions.go @@ -32,20 +32,20 @@ type DescribeNetworkInterfacePermissionsInput struct { // One or more filters. // - // * + // * // network-interface-permission.network-interface-permission-id - The ID of the // permission. // - // * network-interface-permission.network-interface-id - The ID of - // the network interface. + // * network-interface-permission.network-interface-id - The ID of the + // network interface. // - // * network-interface-permission.aws-account-id - The - // AWS account ID. + // * network-interface-permission.aws-account-id - The AWS + // account ID. // - // * network-interface-permission.aws-service - The AWS - // service. + // * network-interface-permission.aws-service - The AWS service. // - // * network-interface-permission.permission - The type of permission + // * + // network-interface-permission.permission - The type of permission // (INSTANCE-ATTACH | EIP-ASSOCIATE). Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeNetworkInterfaces.go b/service/ec2/api_op_DescribeNetworkInterfaces.go index 3356b63ce45..776943ed51b 100644 --- a/service/ec2/api_op_DescribeNetworkInterfaces.go +++ b/service/ec2/api_op_DescribeNetworkInterfaces.go @@ -38,124 +38,122 @@ type DescribeNetworkInterfacesInput struct { // One or more filters. // - // * addresses.private-ip-address - The private IPv4 + // * addresses.private-ip-address - The private IPv4 // addresses associated with the network interface. // - // * addresses.primary - - // Whether the private IPv4 address is the primary IP address associated with the - // network interface. + // * addresses.primary - Whether + // the private IPv4 address is the primary IP address associated with the network + // interface. // - // * addresses.association.public-ip - The association ID - // returned when the network interface was associated with the Elastic IP address - // (IPv4). + // * addresses.association.public-ip - The association ID returned when + // the network interface was associated with the Elastic IP address (IPv4). // - // * addresses.association.owner-id - The owner ID of the addresses - // associated with the network interface. + // * + // addresses.association.owner-id - The owner ID of the addresses associated with + // the network interface. // - // * association.association-id - The - // association ID returned when the network interface was associated with an IPv4 - // address. + // * association.association-id - The association ID + // returned when the network interface was associated with an IPv4 address. // - // * association.allocation-id - The allocation ID returned when you - // allocated the Elastic IP address (IPv4) for your network interface. + // * + // association.allocation-id - The allocation ID returned when you allocated the + // Elastic IP address (IPv4) for your network interface. // - // * - // association.ip-owner-id - The owner of the Elastic IP address (IPv4) associated - // with the network interface. + // * association.ip-owner-id + // - The owner of the Elastic IP address (IPv4) associated with the network + // interface. // - // * association.public-ip - The address of the - // Elastic IP address (IPv4) bound to the network interface. + // * association.public-ip - The address of the Elastic IP address + // (IPv4) bound to the network interface. // - // * - // association.public-dns-name - The public DNS name for the network interface - // (IPv4). + // * association.public-dns-name - The + // public DNS name for the network interface (IPv4). // - // * attachment.attachment-id - The ID of the interface attachment. + // * attachment.attachment-id - + // The ID of the interface attachment. // + // * attachment.attach-time - The time that + // the network interface was attached to an instance. // - // * attachment.attach-time - The time that the network interface was attached to - // an instance. + // * + // attachment.delete-on-termination - Indicates whether the attachment is deleted + // when an instance is terminated. // - // * attachment.delete-on-termination - Indicates whether the - // attachment is deleted when an instance is terminated. + // * attachment.device-index - The device index to + // which the network interface is attached. // - // * - // attachment.device-index - The device index to which the network interface is - // attached. + // * attachment.instance-id - The ID of + // the instance to which the network interface is attached. // - // * attachment.instance-id - The ID of the instance to which the - // network interface is attached. + // * + // attachment.instance-owner-id - The owner ID of the instance to which the network + // interface is attached. // - // * attachment.instance-owner-id - The owner - // ID of the instance to which the network interface is attached. + // * attachment.status - The status of the attachment + // (attaching | attached | detaching | detached). // - // * - // attachment.status - The status of the attachment (attaching | attached | - // detaching | detached). + // * availability-zone - The + // Availability Zone of the network interface. // - // * availability-zone - The Availability Zone of the - // network interface. - // - // * description - The description of the network - // interface. + // * description - The description of + // the network interface. // - // * group-id - The ID of a security group associated with the - // network interface. + // * group-id - The ID of a security group associated with + // the network interface. // - // * group-name - The name of a security group associated + // * group-name - The name of a security group associated // with the network interface. // - // * ipv6-addresses.ipv6-address - An IPv6 address + // * ipv6-addresses.ipv6-address - An IPv6 address // associated with the network interface. // - // * mac-address - The MAC address of - // the network interface. + // * mac-address - The MAC address of the + // network interface. // - // * network-interface-id - The ID of the network - // interface. + // * network-interface-id - The ID of the network interface. // - // * owner-id - The AWS account ID of the network interface - // owner. + // * + // owner-id - The AWS account ID of the network interface owner. // - // * private-ip-address - The private IPv4 address or addresses of the - // network interface. + // * + // private-ip-address - The private IPv4 address or addresses of the network + // interface. // - // * private-dns-name - The private DNS name of the network - // interface (IPv4). + // * private-dns-name - The private DNS name of the network interface + // (IPv4). // - // * requester-id - The ID of the entity that launched the - // instance on your behalf (for example, AWS Management Console, Auto Scaling, and - // so on). + // * requester-id - The ID of the entity that launched the instance on + // your behalf (for example, AWS Management Console, Auto Scaling, and so on). // - // * requester-managed - Indicates whether the network interface is - // being managed by an AWS service (for example, AWS Management Console, Auto - // Scaling, and so on). + // * + // requester-managed - Indicates whether the network interface is being managed by + // an AWS service (for example, AWS Management Console, Auto Scaling, and so + // on). // - // * source-dest-check - Indicates whether the network - // interface performs source/destination checking. A value of true means checking - // is enabled, and false means checking is disabled. The value must be false for - // the network interface to perform network address translation (NAT) in your - // VPC. + // * source-dest-check - Indicates whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. // - // * status - The status of the network interface. If the network - // interface is not attached to an instance, the status is available; if a network - // interface is attached to an instance the status is in-use. + // * status - + // The status of the network interface. If the network interface is not attached to + // an instance, the status is available; if a network interface is attached to an + // instance the status is in-use. // - // * subnet-id - - // The ID of the subnet for the network interface. + // * subnet-id - The ID of the subnet for the + // network interface. // - // * tag: - The key/value - // combination of a tag assigned to the resource. Use the tag key in the filter - // name and the tag value as the filter value. For example, to find all resources - // that have a tag with the key Owner and the value TeamA, specify tag:Owner for - // the filter name and TeamA for the filter value. + // * tag: - The key/value combination of a tag assigned to the + // resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - The key of a - // tag assigned to the resource. Use this filter to find all resources assigned a - // tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. // - // * vpc-id - The ID of - // the VPC for the network interface. + // * vpc-id - The ID of the VPC for the network interface. Filters []*types.Filter // The maximum number of items to return for this request. The request returns a diff --git a/service/ec2/api_op_DescribePlacementGroups.go b/service/ec2/api_op_DescribePlacementGroups.go index cd8556f0cea..e9aa1bba3a2 100644 --- a/service/ec2/api_op_DescribePlacementGroups.go +++ b/service/ec2/api_op_DescribePlacementGroups.go @@ -40,24 +40,24 @@ type DescribePlacementGroupsInput struct { // The filters. // - // * group-name - The name of the placement group. + // * group-name - The name of the placement group. // - // * state - - // The state of the placement group (pending | available | deleting | deleted). + // * state - The + // state of the placement group (pending | available | deleting | deleted). // - // - // * strategy - The strategy of the placement group (cluster | spread | + // * + // strategy - The strategy of the placement group (cluster | spread | // partition). // - // * tag: - The key/value combination of a tag assigned to the + // * tag: - The key/value combination of a tag assigned to the // resource. Use the tag key in the filter name and the tag value as the filter // value. For example, to find all resources that have a tag with the key Owner and // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter // value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources that have a tag with a specific key, regardless of - // the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless of the tag + // value. Filters []*types.Filter // The IDs of the placement groups. diff --git a/service/ec2/api_op_DescribePrefixLists.go b/service/ec2/api_op_DescribePrefixLists.go index ef74be6f71e..2f774b5092f 100644 --- a/service/ec2/api_op_DescribePrefixLists.go +++ b/service/ec2/api_op_DescribePrefixLists.go @@ -39,9 +39,9 @@ type DescribePrefixListsInput struct { // One or more filters. // - // * prefix-list-id: The ID of a prefix list. + // * prefix-list-id: The ID of a prefix list. // - // * + // * // prefix-list-name: The name of a prefix list. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribePublicIpv4Pools.go b/service/ec2/api_op_DescribePublicIpv4Pools.go index 0672255e168..10daccc920a 100644 --- a/service/ec2/api_op_DescribePublicIpv4Pools.go +++ b/service/ec2/api_op_DescribePublicIpv4Pools.go @@ -31,15 +31,15 @@ type DescribePublicIpv4PoolsInput struct { // One or more filters. // - // * tag: - The key/value combination of a tag assigned - // to the resource. Use the tag key in the filter name and the tag value as the - // filter value. For example, to find all resources that have a tag with the key - // Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - // the filter value. + // * tag: - The key/value combination of a tag assigned to + // the resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - The key of a tag assigned to the resource. - // Use this filter to find all resources assigned a tag with a specific key, - // regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeRegions.go b/service/ec2/api_op_DescribeRegions.go index 563d2ec6836..0d1699a8f49 100644 --- a/service/ec2/api_op_DescribeRegions.go +++ b/service/ec2/api_op_DescribeRegions.go @@ -46,14 +46,14 @@ type DescribeRegionsInput struct { // The filters. // - // * endpoint - The endpoint of the Region (for example, + // * endpoint - The endpoint of the Region (for example, // ec2.us-east-1.amazonaws.com). // - // * opt-in-status - The opt-in status of the - // Region (opt-in-not-required | opted-in | not-opted-in). + // * opt-in-status - The opt-in status of the Region + // (opt-in-not-required | opted-in | not-opted-in). // - // * region-name - The - // name of the Region (for example, us-east-1). + // * region-name - The name of + // the Region (for example, us-east-1). Filters []*types.Filter // The names of the Regions. You can specify any Regions, whether they are enabled diff --git a/service/ec2/api_op_DescribeReservedInstances.go b/service/ec2/api_op_DescribeReservedInstances.go index c0f5a57488c..7b407260d64 100644 --- a/service/ec2/api_op_DescribeReservedInstances.go +++ b/service/ec2/api_op_DescribeReservedInstances.go @@ -41,58 +41,57 @@ type DescribeReservedInstancesInput struct { // One or more filters. // - // * availability-zone - The Availability Zone where the + // * availability-zone - The Availability Zone where the // Reserved Instance can be used. // - // * duration - The duration of the Reserved + // * duration - The duration of the Reserved // Instance (one year or three years), in seconds (31536000 | 94608000). // - // * end - // - The time when the Reserved Instance expires (for example, + // * end - + // The time when the Reserved Instance expires (for example, // 2015-08-07T11:54:42.000Z). // - // * fixed-price - The purchase price of the - // Reserved Instance (for example, 9800.0). + // * fixed-price - The purchase price of the Reserved + // Instance (for example, 9800.0). // - // * instance-type - The instance - // type that is covered by the reservation. + // * instance-type - The instance type that is + // covered by the reservation. // - // * scope - The scope of the - // Reserved Instance (Region or Availability Zone). + // * scope - The scope of the Reserved Instance + // (Region or Availability Zone). // - // * product-description - - // The Reserved Instance product platform description. Instances that include - // (Amazon VPC) in the product platform description will only be displayed to - // EC2-Classic account holders and are for use with Amazon VPC (Linux/UNIX | - // Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon VPC) | Red Hat - // Enterprise Linux | Red Hat Enterprise Linux (Amazon VPC) | Windows | Windows - // (Amazon VPC) | Windows with SQL Server Standard | Windows with SQL Server - // Standard (Amazon VPC) | Windows with SQL Server Web | Windows with SQL Server - // Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows with SQL Server - // Enterprise (Amazon VPC)). + // * product-description - The Reserved Instance + // product platform description. Instances that include (Amazon VPC) in the product + // platform description will only be displayed to EC2-Classic account holders and + // are for use with Amazon VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | + // SUSE Linux (Amazon VPC) | Red Hat Enterprise Linux | Red Hat Enterprise Linux + // (Amazon VPC) | Windows | Windows (Amazon VPC) | Windows with SQL Server Standard + // | Windows with SQL Server Standard (Amazon VPC) | Windows with SQL Server Web | + // Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | + // Windows with SQL Server Enterprise (Amazon VPC)). // - // * reserved-instances-id - The ID of the Reserved - // Instance. + // * reserved-instances-id - The + // ID of the Reserved Instance. // - // * start - The time at which the Reserved Instance purchase - // request was placed (for example, 2014-08-07T11:54:42.000Z). + // * start - The time at which the Reserved Instance + // purchase request was placed (for example, 2014-08-07T11:54:42.000Z). // - // * state - The - // state of the Reserved Instance (payment-pending | active | payment-failed | + // * state - + // The state of the Reserved Instance (payment-pending | active | payment-failed | // retired). // - // * tag: - The key/value combination of a tag assigned to the - // resource. Use the tag key in the filter name and the tag value as the filter - // value. For example, to find all resources that have a tag with the key Owner and - // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter - // value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. For + // example, to find all resources that have a tag with the key Owner and the value + // TeamA, specify tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources assigned a tag with a specific key, regardless of - // the tag value. + // * + // tag-key - The key of a tag assigned to the resource. Use this filter to find all + // resources assigned a tag with a specific key, regardless of the tag value. // - // * usage-price - The usage price of the Reserved Instance, - // per hour (for example, 0.84). + // * + // usage-price - The usage price of the Reserved Instance, per hour (for example, + // 0.84). Filters []*types.Filter // Describes whether the Reserved Instance is Standard or Convertible. diff --git a/service/ec2/api_op_DescribeReservedInstancesListings.go b/service/ec2/api_op_DescribeReservedInstancesListings.go index f9353d26ea6..d5ca441e3a9 100644 --- a/service/ec2/api_op_DescribeReservedInstancesListings.go +++ b/service/ec2/api_op_DescribeReservedInstancesListings.go @@ -47,17 +47,16 @@ type DescribeReservedInstancesListingsInput struct { // One or more filters. // - // * reserved-instances-id - The ID of the Reserved + // * reserved-instances-id - The ID of the Reserved // Instances. // - // * reserved-instances-listing-id - The ID of the Reserved - // Instances listing. + // * reserved-instances-listing-id - The ID of the Reserved Instances + // listing. // - // * status - The status of the Reserved Instance listing - // (pending | active | cancelled | closed). + // * status - The status of the Reserved Instance listing (pending | + // active | cancelled | closed). // - // * status-message - The reason for - // the status. + // * status-message - The reason for the status. Filters []*types.Filter // One or more Reserved Instance IDs. diff --git a/service/ec2/api_op_DescribeReservedInstancesModifications.go b/service/ec2/api_op_DescribeReservedInstancesModifications.go index 413b5a47be3..3986d99f2b7 100644 --- a/service/ec2/api_op_DescribeReservedInstancesModifications.go +++ b/service/ec2/api_op_DescribeReservedInstancesModifications.go @@ -38,49 +38,48 @@ type DescribeReservedInstancesModificationsInput struct { // One or more filters. // - // * client-token - The idempotency token for the + // * client-token - The idempotency token for the // modification request. // - // * create-date - The time when the modification - // request was created. + // * create-date - The time when the modification request + // was created. // - // * effective-date - The time when the modification - // becomes effective. + // * effective-date - The time when the modification becomes + // effective. // - // * modification-result.reserved-instances-id - The ID for - // the Reserved Instances created as part of the modification request. This ID is - // only available when the status of the modification is fulfilled. + // * modification-result.reserved-instances-id - The ID for the + // Reserved Instances created as part of the modification request. This ID is only + // available when the status of the modification is fulfilled. // - // * + // * // modification-result.target-configuration.availability-zone - The Availability // Zone for the new Reserved Instances. // - // * + // * // modification-result.target-configuration.instance-count - The number of new // Reserved Instances. // - // * - // modification-result.target-configuration.instance-type - The instance type of - // the new Reserved Instances. + // * modification-result.target-configuration.instance-type - + // The instance type of the new Reserved Instances. // - // * + // * // modification-result.target-configuration.platform - The network platform of the // new Reserved Instances (EC2-Classic | EC2-VPC). // - // * reserved-instances-id - - // The ID of the Reserved Instances modified. + // * reserved-instances-id - The + // ID of the Reserved Instances modified. // - // * - // reserved-instances-modification-id - The ID of the modification request. + // * reserved-instances-modification-id - + // The ID of the modification request. // - // * - // status - The status of the Reserved Instances modification request (processing | - // fulfilled | failed). + // * status - The status of the Reserved + // Instances modification request (processing | fulfilled | failed). // - // * status-message - The reason for the status. + // * + // status-message - The reason for the status. // - // * - // update-date - The time when the modification request was last updated. + // * update-date - The time when the + // modification request was last updated. Filters []*types.Filter // The token to retrieve the next page of results. diff --git a/service/ec2/api_op_DescribeReservedInstancesOfferings.go b/service/ec2/api_op_DescribeReservedInstancesOfferings.go index 30cec604888..049f55702f0 100644 --- a/service/ec2/api_op_DescribeReservedInstancesOfferings.go +++ b/service/ec2/api_op_DescribeReservedInstancesOfferings.go @@ -50,25 +50,25 @@ type DescribeReservedInstancesOfferingsInput struct { // One or more filters. // - // * availability-zone - The Availability Zone where the + // * availability-zone - The Availability Zone where the // Reserved Instance can be used. // - // * duration - The duration of the Reserved + // * duration - The duration of the Reserved // Instance (for example, one year or three years), in seconds (31536000 | // 94608000). // - // * fixed-price - The purchase price of the Reserved Instance (for + // * fixed-price - The purchase price of the Reserved Instance (for // example, 9800.0). // - // * instance-type - The instance type that is covered by - // the reservation. + // * instance-type - The instance type that is covered by the + // reservation. // - // * marketplace - Set to true to show only Reserved Instance + // * marketplace - Set to true to show only Reserved Instance // Marketplace offerings. When this filter is not used, which is the default // behavior, all offerings from both AWS and the Reserved Instance Marketplace are // listed. // - // * product-description - The Reserved Instance product platform + // * product-description - The Reserved Instance product platform // description. Instances that include (Amazon VPC) in the product platform // description will only be displayed to EC2-Classic account holders and are for // use with Amazon VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE @@ -78,15 +78,14 @@ type DescribeReservedInstancesOfferingsInput struct { // Windows with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | // Windows with SQL Server Enterprise (Amazon VPC)) // - // * + // * // reserved-instances-offering-id - The Reserved Instances offering ID. // - // * - // scope - The scope of the Reserved Instance (Availability Zone or Region). + // * scope - + // The scope of the Reserved Instance (Availability Zone or Region). // - // * - // usage-price - The usage price of the Reserved Instance, per hour (for example, - // 0.84). + // * usage-price + // - The usage price of the Reserved Instance, per hour (for example, 0.84). Filters []*types.Filter // Include Reserved Instance Marketplace offerings in the response. diff --git a/service/ec2/api_op_DescribeRouteTables.go b/service/ec2/api_op_DescribeRouteTables.go index 6d96db845b5..b2f72224335 100644 --- a/service/ec2/api_op_DescribeRouteTables.go +++ b/service/ec2/api_op_DescribeRouteTables.go @@ -43,78 +43,77 @@ type DescribeRouteTablesInput struct { // One or more filters. // - // * association.route-table-association-id - The ID of - // an association ID for the route table. + // * association.route-table-association-id - The ID of an + // association ID for the route table. // - // * association.route-table-id - The - // ID of the route table involved in the association. + // * association.route-table-id - The ID of + // the route table involved in the association. // - // * association.subnet-id - // - The ID of the subnet involved in the association. + // * association.subnet-id - The ID + // of the subnet involved in the association. // - // * association.main - - // Indicates whether the route table is the main route table for the VPC (true | - // false). Route tables that do not have an association ID are not returned in the + // * association.main - Indicates + // whether the route table is the main route table for the VPC (true | false). + // Route tables that do not have an association ID are not returned in the // response. // - // * owner-id - The ID of the AWS account that owns the route - // table. + // * owner-id - The ID of the AWS account that owns the route table. // - // * route-table-id - The ID of the route table. + // * + // route-table-id - The ID of the route table. // - // * - // route.destination-cidr-block - The IPv4 CIDR range specified in a route in the - // table. + // * route.destination-cidr-block - + // The IPv4 CIDR range specified in a route in the table. // - // * route.destination-ipv6-cidr-block - The IPv6 CIDR range specified - // in a route in the route table. + // * + // route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in + // the route table. // - // * route.destination-prefix-list-id - The ID - // (prefix) of the AWS service specified in a route in the table. + // * route.destination-prefix-list-id - The ID (prefix) of the + // AWS service specified in a route in the table. // - // * + // * // route.egress-only-internet-gateway-id - The ID of an egress-only Internet // gateway specified in a route in the route table. // - // * route.gateway-id - The - // ID of a gateway specified in a route in the table. + // * route.gateway-id - The ID of + // a gateway specified in a route in the table. // - // * route.instance-id - - // The ID of an instance specified in a route in the table. + // * route.instance-id - The ID of an + // instance specified in a route in the table. // - // * - // route.nat-gateway-id - The ID of a NAT gateway. + // * route.nat-gateway-id - The ID of + // a NAT gateway. // - // * route.transit-gateway-id - // - The ID of a transit gateway. + // * route.transit-gateway-id - The ID of a transit gateway. // - // * route.origin - Describes how the route was - // created. CreateRouteTable indicates that the route was automatically created - // when the route table was created; CreateRoute indicates that the route was - // manually added to the route table; EnableVgwRoutePropagation indicates that the - // route was propagated by route propagation. + // * + // route.origin - Describes how the route was created. CreateRouteTable indicates + // that the route was automatically created when the route table was created; + // CreateRoute indicates that the route was manually added to the route table; + // EnableVgwRoutePropagation indicates that the route was propagated by route + // propagation. // - // * route.state - The state of a - // route in the route table (active | blackhole). The blackhole state indicates - // that the route's target isn't available (for example, the specified gateway - // isn't attached to the VPC, the specified NAT instance has been terminated, and - // so on). + // * route.state - The state of a route in the route table (active | + // blackhole). The blackhole state indicates that the route's target isn't + // available (for example, the specified gateway isn't attached to the VPC, the + // specified NAT instance has been terminated, and so on). // - // * route.vpc-peering-connection-id - The ID of a VPC peering - // connection specified in a route in the table. + // * + // route.vpc-peering-connection-id - The ID of a VPC peering connection specified + // in a route in the table. // - // * tag: - The key/value - // combination of a tag assigned to the resource. Use the tag key in the filter - // name and the tag value as the filter value. For example, to find all resources - // that have a tag with the key Owner and the value TeamA, specify tag:Owner for - // the filter name and TeamA for the filter value. + // * tag: - The key/value combination of a tag assigned + // to the resource. Use the tag key in the filter name and the tag value as the + // filter value. For example, to find all resources that have a tag with the key + // Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. // - // * tag-key - The key of a - // tag assigned to the resource. Use this filter to find all resources assigned a - // tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use + // this filter to find all resources assigned a tag with a specific key, regardless + // of the tag value. // - // * vpc-id - The ID of - // the VPC for the route table. + // * vpc-id - The ID of the VPC for the route table. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeScheduledInstanceAvailability.go b/service/ec2/api_op_DescribeScheduledInstanceAvailability.go index 171f5247e84..523646fa617 100644 --- a/service/ec2/api_op_DescribeScheduledInstanceAvailability.go +++ b/service/ec2/api_op_DescribeScheduledInstanceAvailability.go @@ -54,16 +54,16 @@ type DescribeScheduledInstanceAvailabilityInput struct { // The filters. // - // * availability-zone - The Availability Zone (for example, + // * availability-zone - The Availability Zone (for example, // us-west-2a). // - // * instance-type - The instance type (for example, c4.large). + // * instance-type - The instance type (for example, c4.large). // + // * + // network-platform - The network platform (EC2-Classic or EC2-VPC). // - // * network-platform - The network platform (EC2-Classic or EC2-VPC). - // - // * - // platform - The platform (Linux/UNIX or Windows). + // * platform - + // The platform (Linux/UNIX or Windows). Filters []*types.Filter // The maximum number of results to return in a single call. This value can be diff --git a/service/ec2/api_op_DescribeScheduledInstances.go b/service/ec2/api_op_DescribeScheduledInstances.go index f40800321ab..6d55d23cb2a 100644 --- a/service/ec2/api_op_DescribeScheduledInstances.go +++ b/service/ec2/api_op_DescribeScheduledInstances.go @@ -38,16 +38,16 @@ type DescribeScheduledInstancesInput struct { // The filters. // - // * availability-zone - The Availability Zone (for example, + // * availability-zone - The Availability Zone (for example, // us-west-2a). // - // * instance-type - The instance type (for example, c4.large). + // * instance-type - The instance type (for example, c4.large). // + // * + // network-platform - The network platform (EC2-Classic or EC2-VPC). // - // * network-platform - The network platform (EC2-Classic or EC2-VPC). - // - // * - // platform - The platform (Linux/UNIX or Windows). + // * platform - + // The platform (Linux/UNIX or Windows). Filters []*types.Filter // The maximum number of results to return in a single call. This value can be diff --git a/service/ec2/api_op_DescribeSecurityGroups.go b/service/ec2/api_op_DescribeSecurityGroups.go index 9ea19be4af5..dd54d349a68 100644 --- a/service/ec2/api_op_DescribeSecurityGroups.go +++ b/service/ec2/api_op_DescribeSecurityGroups.go @@ -45,93 +45,92 @@ type DescribeSecurityGroupsInput struct { // groups for which any combination of rules - not necessarily a single rule - // match all filters. // - // * description - The description of the security group. + // * description - The description of the security group. // - // - // * egress.ip-permission.cidr - An IPv4 CIDR block for an outbound security group + // * + // egress.ip-permission.cidr - An IPv4 CIDR block for an outbound security group // rule. // - // * egress.ip-permission.from-port - For an outbound rule, the start of + // * egress.ip-permission.from-port - For an outbound rule, the start of // port range for the TCP and UDP protocols, or an ICMP type number. // - // * + // * // egress.ip-permission.group-id - The ID of a security group that has been // referenced in an outbound security group rule. // - // * + // * // egress.ip-permission.group-name - The name of a security group that has been // referenced in an outbound security group rule. // - // * - // egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound security - // group rule. + // * egress.ip-permission.ipv6-cidr + // - An IPv6 CIDR block for an outbound security group rule. // - // * egress.ip-permission.prefix-list-id - The ID of a prefix list - // to which a security group rule allows outbound access. + // * + // egress.ip-permission.prefix-list-id - The ID of a prefix list to which a + // security group rule allows outbound access. // - // * - // egress.ip-permission.protocol - The IP protocol for an outbound security group - // rule (tcp | udp | icmp or a protocol number). + // * egress.ip-permission.protocol - + // The IP protocol for an outbound security group rule (tcp | udp | icmp or a + // protocol number). // - // * - // egress.ip-permission.to-port - For an outbound rule, the end of port range for - // the TCP and UDP protocols, or an ICMP code. + // * egress.ip-permission.to-port - For an outbound rule, the + // end of port range for the TCP and UDP protocols, or an ICMP code. // - // * egress.ip-permission.user-id - // - The ID of an AWS account that has been referenced in an outbound security - // group rule. + // * + // egress.ip-permission.user-id - The ID of an AWS account that has been referenced + // in an outbound security group rule. // - // * group-id - The ID of the security group. + // * group-id - The ID of the security + // group. // - // * group-name - - // The name of the security group. + // * group-name - The name of the security group. // - // * ip-permission.cidr - An IPv4 CIDR block - // for an inbound security group rule. + // * ip-permission.cidr - + // An IPv4 CIDR block for an inbound security group rule. // - // * ip-permission.from-port - For an - // inbound rule, the start of port range for the TCP and UDP protocols, or an ICMP - // type number. + // * + // ip-permission.from-port - For an inbound rule, the start of port range for the + // TCP and UDP protocols, or an ICMP type number. // - // * ip-permission.group-id - The ID of a security group that has - // been referenced in an inbound security group rule. + // * ip-permission.group-id - The + // ID of a security group that has been referenced in an inbound security group + // rule. // - // * - // ip-permission.group-name - The name of a security group that has been referenced - // in an inbound security group rule. + // * ip-permission.group-name - The name of a security group that has been + // referenced in an inbound security group rule. // - // * ip-permission.ipv6-cidr - An IPv6 CIDR - // block for an inbound security group rule. + // * ip-permission.ipv6-cidr - An + // IPv6 CIDR block for an inbound security group rule. // - // * ip-permission.prefix-list-id - - // The ID of a prefix list from which a security group rule allows inbound - // access. + // * + // ip-permission.prefix-list-id - The ID of a prefix list from which a security + // group rule allows inbound access. // - // * ip-permission.protocol - The IP protocol for an inbound security - // group rule (tcp | udp | icmp or a protocol number). + // * ip-permission.protocol - The IP protocol + // for an inbound security group rule (tcp | udp | icmp or a protocol number). // - // * ip-permission.to-port - // - For an inbound rule, the end of port range for the TCP and UDP protocols, or - // an ICMP code. + // * + // ip-permission.to-port - For an inbound rule, the end of port range for the TCP + // and UDP protocols, or an ICMP code. // - // * ip-permission.user-id - The ID of an AWS account that has - // been referenced in an inbound security group rule. + // * ip-permission.user-id - The ID of an AWS + // account that has been referenced in an inbound security group rule. // - // * owner-id - The AWS - // account ID of the owner of the security group. + // * owner-id + // - The AWS account ID of the owner of the security group. // - // * tag: - The key/value + // * tag: - The key/value // combination of a tag assigned to the resource. Use the tag key in the filter // name and the tag value as the filter value. For example, to find all resources // that have a tag with the key Owner and the value TeamA, specify tag:Owner for // the filter name and TeamA for the filter value. // - // * tag-key - The key of a - // tag assigned to the resource. Use this filter to find all resources assigned a - // tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag + // assigned to the resource. Use this filter to find all resources assigned a tag + // with a specific key, regardless of the tag value. // - // * vpc-id - The ID of - // the VPC specified when the security group was created. + // * vpc-id - The ID of the VPC + // specified when the security group was created. Filters []*types.Filter // The IDs of the security groups. Required for security groups in a nondefault diff --git a/service/ec2/api_op_DescribeSnapshots.go b/service/ec2/api_op_DescribeSnapshots.go index 25192387af6..06dad478913 100644 --- a/service/ec2/api_op_DescribeSnapshots.go +++ b/service/ec2/api_op_DescribeSnapshots.go @@ -17,15 +17,15 @@ import ( // AWS accounts for which you have explicit create volume permissions. The create // volume permissions fall into the following categories: // -// * public: The owner -// of the snapshot granted create volume permissions for the snapshot to the all +// * public: The owner of +// the snapshot granted create volume permissions for the snapshot to the all // group. All AWS accounts have create volume permissions for these snapshots. // -// -// * explicit: The owner of the snapshot granted create volume permissions to a +// * +// explicit: The owner of the snapshot granted create volume permissions to a // specific AWS account. // -// * implicit: An AWS account has implicit create volume +// * implicit: An AWS account has implicit create volume // permissions for all snapshots it owns. // // The list of snapshots returned can be @@ -78,46 +78,45 @@ type DescribeSnapshotsInput struct { // The filters. // - // * description - A description of the snapshot. + // * description - A description of the snapshot. // - // * - // encrypted - Indicates whether the snapshot is encrypted (true | false) + // * encrypted - + // Indicates whether the snapshot is encrypted (true | false) // - // * - // owner-alias - The owner alias, from an Amazon-maintained list (amazon). This is - // not the user-configured AWS account alias set using the IAM console. We - // recommend that you use the related parameter instead of this filter. + // * owner-alias - The + // owner alias, from an Amazon-maintained list (amazon). This is not the + // user-configured AWS account alias set using the IAM console. We recommend that + // you use the related parameter instead of this filter. // - // * - // owner-id - The AWS account ID of the owner. We recommend that you use the - // related parameter instead of this filter. + // * owner-id - The AWS + // account ID of the owner. We recommend that you use the related parameter instead + // of this filter. // - // * progress - The progress of the - // snapshot, as a percentage (for example, 80%). + // * progress - The progress of the snapshot, as a percentage (for + // example, 80%). // - // * snapshot-id - The snapshot - // ID. + // * snapshot-id - The snapshot ID. // - // * start-time - The time stamp when the snapshot was initiated. + // * start-time - The time stamp + // when the snapshot was initiated. // - // * - // status - The status of the snapshot (pending | completed | error). + // * status - The status of the snapshot (pending + // | completed | error). // - // * tag: - - // The key/value combination of a tag assigned to the resource. Use the tag key in - // the filter name and the tag value as the filter value. For example, to find all - // resources that have a tag with the key Owner and the value TeamA, specify - // tag:Owner for the filter name and TeamA for the filter value. + // * tag: - The key/value combination of a tag assigned to + // the resource. Use the tag key in the filter name and the tag value as the filter + // value. For example, to find all resources that have a tag with the key Owner and + // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter + // value. // - // * tag-key - - // The key of a tag assigned to the resource. Use this filter to find all resources - // assigned a tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of the tag + // value. // - // * - // volume-id - The ID of the volume the snapshot is for. + // * volume-id - The ID of the volume the snapshot is for. // - // * volume-size - The - // size of the volume, in GiB. + // * volume-size - + // The size of the volume, in GiB. Filters []*types.Filter // The maximum number of snapshot results returned by DescribeSnapshots in diff --git a/service/ec2/api_op_DescribeSpotInstanceRequests.go b/service/ec2/api_op_DescribeSpotInstanceRequests.go index 1d5a3719134..6b0b2b919c9 100644 --- a/service/ec2/api_op_DescribeSpotInstanceRequests.go +++ b/service/ec2/api_op_DescribeSpotInstanceRequests.go @@ -51,136 +51,133 @@ type DescribeSpotInstanceRequestsInput struct { // One or more filters. // - // * availability-zone-group - The Availability Zone + // * availability-zone-group - The Availability Zone // group. // - // * create-time - The time stamp when the Spot Instance request was + // * create-time - The time stamp when the Spot Instance request was // created. // - // * fault-code - The fault code related to the request. + // * fault-code - The fault code related to the request. // - // * - // fault-message - The fault message related to the request. + // * fault-message + // - The fault message related to the request. // - // * instance-id - - // The ID of the instance that fulfilled the request. + // * instance-id - The ID of the + // instance that fulfilled the request. // - // * launch-group - The - // Spot Instance launch group. + // * launch-group - The Spot Instance launch + // group. // - // * - // launch.block-device-mapping.delete-on-termination - Indicates whether the EBS - // volume is deleted on instance termination. + // * launch.block-device-mapping.delete-on-termination - Indicates whether + // the EBS volume is deleted on instance termination. // - // * + // * // launch.block-device-mapping.device-name - The device name for the volume in the // block device mapping (for example, /dev/sdh or xvdh). // - // * + // * // launch.block-device-mapping.snapshot-id - The ID of the snapshot for the EBS // volume. // - // * launch.block-device-mapping.volume-size - The size of the EBS - // volume, in GiB. - // - // * launch.block-device-mapping.volume-type - The type of EBS - // volume: gp2 for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 - // for Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic. + // * launch.block-device-mapping.volume-size - The size of the EBS volume, + // in GiB. // - // * - // launch.group-id - The ID of the security group for the instance. + // * launch.block-device-mapping.volume-type - The type of EBS volume: gp2 + // for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput + // Optimized HDD, sc1for Cold HDD, or standard for Magnetic. // - // * - // launch.group-name - The name of the security group for the instance. + // * launch.group-id - + // The ID of the security group for the instance. // - // * - // launch.image-id - The ID of the AMI. + // * launch.group-name - The name + // of the security group for the instance. // - // * launch.instance-type - The type of - // instance (for example, m3.medium). + // * launch.image-id - The ID of the + // AMI. // - // * launch.kernel-id - The kernel ID. + // * launch.instance-type - The type of instance (for example, m3.medium). // + // * + // launch.kernel-id - The kernel ID. // - // * launch.key-name - The name of the key pair the instance launched with. + // * launch.key-name - The name of the key pair + // the instance launched with. // - // * - // launch.monitoring-enabled - Whether detailed monitoring is enabled for the Spot - // Instance. + // * launch.monitoring-enabled - Whether detailed + // monitoring is enabled for the Spot Instance. // - // * launch.ramdisk-id - The RAM disk ID. + // * launch.ramdisk-id - The RAM disk + // ID. // - // * - // launched-availability-zone - The Availability Zone in which the request is - // launched. + // * launched-availability-zone - The Availability Zone in which the request + // is launched. // - // * network-interface.addresses.primary - Indicates whether the IP + // * network-interface.addresses.primary - Indicates whether the IP // address is the primary private IP address. // - // * + // * // network-interface.delete-on-termination - Indicates whether the network // interface is deleted when the instance is terminated. // - // * + // * // network-interface.description - A description of the network interface. // - // * + // * // network-interface.device-index - The index of the device for the network // interface attachment on the instance. // - // * network-interface.group-id - The ID - // of the security group associated with the network interface. + // * network-interface.group-id - The ID of + // the security group associated with the network interface. // - // * + // * // network-interface.network-interface-id - The ID of the network interface. // - // * + // * // network-interface.private-ip-address - The primary private IP address of the // network interface. // - // * network-interface.subnet-id - The ID of the subnet for - // the instance. + // * network-interface.subnet-id - The ID of the subnet for the + // instance. // - // * product-description - The product description associated - // with the instance (Linux/UNIX | Windows). + // * product-description - The product description associated with the + // instance (Linux/UNIX | Windows). // - // * spot-instance-request-id - The - // Spot Instance request ID. + // * spot-instance-request-id - The Spot Instance + // request ID. // - // * spot-price - The maximum hourly price for any - // Spot Instance launched to fulfill the request. + // * spot-price - The maximum hourly price for any Spot Instance + // launched to fulfill the request. // - // * state - The state of the - // Spot Instance request (open | active | closed | cancelled | failed). Spot - // request status information can help you track your Amazon EC2 Spot Instance - // requests. For more information, see Spot request status + // * state - The state of the Spot Instance + // request (open | active | closed | cancelled | failed). Spot request status + // information can help you track your Amazon EC2 Spot Instance requests. For more + // information, see Spot request status // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) in // the Amazon EC2 User Guide for Linux Instances. // - // * status-code - The short - // code describing the most recent evaluation of your Spot Instance request. + // * status-code - The short code + // describing the most recent evaluation of your Spot Instance request. // - // * + // * // status-message - The message explaining the status of the Spot Instance // request. // - // * tag: - The key/value combination of a tag assigned to the - // resource. Use the tag key in the filter name and the tag value as the filter - // value. For example, to find all resources that have a tag with the key Owner and - // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter - // value. + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. For + // example, to find all resources that have a tag with the key Owner and the value + // TeamA, specify tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources assigned a tag with a specific key, regardless of - // the tag value. + // * + // tag-key - The key of a tag assigned to the resource. Use this filter to find all + // resources assigned a tag with a specific key, regardless of the tag value. // - // * type - The type of Spot Instance request (one-time | - // persistent). + // * + // type - The type of Spot Instance request (one-time | persistent). // - // * valid-from - The start date of the request. + // * valid-from + // - The start date of the request. // - // * - // valid-until - The end date of the request. + // * valid-until - The end date of the request. Filters []*types.Filter // The maximum number of results to return in a single call. Specify a value diff --git a/service/ec2/api_op_DescribeSpotPriceHistory.go b/service/ec2/api_op_DescribeSpotPriceHistory.go index a327faa6164..ec399884a0d 100644 --- a/service/ec2/api_op_DescribeSpotPriceHistory.go +++ b/service/ec2/api_op_DescribeSpotPriceHistory.go @@ -53,24 +53,24 @@ type DescribeSpotPriceHistoryInput struct { // One or more filters. // - // * availability-zone - The Availability Zone for which + // * availability-zone - The Availability Zone for which // prices should be returned. // - // * instance-type - The type of instance (for - // example, m3.medium). + // * instance-type - The type of instance (for example, + // m3.medium). // - // * product-description - The product description for - // the Spot price (Linux/UNIX | Red Hat Enterprise Linux | SUSE Linux | Windows | - // Linux/UNIX (Amazon VPC) | Red Hat Enterprise Linux (Amazon VPC) | SUSE Linux - // (Amazon VPC) | Windows (Amazon VPC)). + // * product-description - The product description for the Spot price + // (Linux/UNIX | Red Hat Enterprise Linux | SUSE Linux | Windows | Linux/UNIX + // (Amazon VPC) | Red Hat Enterprise Linux (Amazon VPC) | SUSE Linux (Amazon VPC) | + // Windows (Amazon VPC)). // - // * spot-price - The Spot price. The - // value must match exactly (or use wildcards; greater than or less than comparison - // is not supported). + // * spot-price - The Spot price. The value must match + // exactly (or use wildcards; greater than or less than comparison is not + // supported). // - // * timestamp - The time stamp of the Spot price history, - // in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and - // ?). Greater than or less than comparison is not supported. + // * timestamp - The time stamp of the Spot price history, in UTC + // format (for example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). + // Greater than or less than comparison is not supported. Filters []*types.Filter // Filters the results by the specified instance types. diff --git a/service/ec2/api_op_DescribeSubnets.go b/service/ec2/api_op_DescribeSubnets.go index 57f4def7121..798b4157339 100644 --- a/service/ec2/api_op_DescribeSubnets.go +++ b/service/ec2/api_op_DescribeSubnets.go @@ -39,58 +39,58 @@ type DescribeSubnetsInput struct { // One or more filters. // - // * availability-zone - The Availability Zone for the + // * availability-zone - The Availability Zone for the // subnet. You can also use availabilityZone as the filter name. // - // * + // * // availability-zone-id - The ID of the Availability Zone for the subnet. You can // also use availabilityZoneId as the filter name. // - // * - // available-ip-address-count - The number of IPv4 addresses in the subnet that are - // available. + // * available-ip-address-count - + // The number of IPv4 addresses in the subnet that are available. // - // * cidr-block - The IPv4 CIDR block of the subnet. The CIDR block - // you specify must exactly match the subnet's CIDR block for information to be - // returned for the subnet. You can also use cidr or cidrBlock as the filter - // names. + // * cidr-block - + // The IPv4 CIDR block of the subnet. The CIDR block you specify must exactly match + // the subnet's CIDR block for information to be returned for the subnet. You can + // also use cidr or cidrBlock as the filter names. // - // * default-for-az - Indicates whether this is the default subnet for - // the Availability Zone. You can also use defaultForAz as the filter name. + // * default-for-az - Indicates + // whether this is the default subnet for the Availability Zone. You can also use + // defaultForAz as the filter name. // - // * - // ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with - // the subnet. + // * ipv6-cidr-block-association.ipv6-cidr-block + // - An IPv6 CIDR block associated with the subnet. // - // * ipv6-cidr-block-association.association-id - An association - // ID for an IPv6 CIDR block associated with the subnet. + // * + // ipv6-cidr-block-association.association-id - An association ID for an IPv6 CIDR + // block associated with the subnet. // - // * - // ipv6-cidr-block-association.state - The state of an IPv6 CIDR block associated - // with the subnet. + // * ipv6-cidr-block-association.state - The + // state of an IPv6 CIDR block associated with the subnet. // - // * owner-id - The ID of the AWS account that owns the - // subnet. + // * owner-id - The ID of + // the AWS account that owns the subnet. // - // * state - The state of the subnet (pending | available). + // * state - The state of the subnet + // (pending | available). // - // * - // subnet-arn - The Amazon Resource Name (ARN) of the subnet. + // * subnet-arn - The Amazon Resource Name (ARN) of the + // subnet. // - // * subnet-id - - // The ID of the subnet. + // * subnet-id - The ID of the subnet. // - // * tag: - The key/value combination of a tag assigned - // to the resource. Use the tag key in the filter name and the tag value as the - // filter value. For example, to find all resources that have a tag with the key - // Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for - // the filter value. + // * tag: - The key/value combination + // of a tag assigned to the resource. Use the tag key in the filter name and the + // tag value as the filter value. For example, to find all resources that have a + // tag with the key Owner and the value TeamA, specify tag:Owner for the filter + // name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. - // Use this filter to find all resources assigned a tag with a specific key, - // regardless of the tag value. + // * tag-key - The key of a tag assigned to + // the resource. Use this filter to find all resources assigned a tag with a + // specific key, regardless of the tag value. // - // * vpc-id - The ID of the VPC for the subnet. + // * vpc-id - The ID of the VPC for the + // subnet. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeTags.go b/service/ec2/api_op_DescribeTags.go index facfd3aa4d9..8dae0bedbbe 100644 --- a/service/ec2/api_op_DescribeTags.go +++ b/service/ec2/api_op_DescribeTags.go @@ -40,25 +40,24 @@ type DescribeTagsInput struct { // The filters. // - // * key - The tag key. + // * key - The tag key. // - // * resource-id - The ID of the - // resource. + // * resource-id - The ID of the resource. // - // * resource-type - The resource type (customer-gateway | - // dedicated-host | dhcp-options | elastic-ip | fleet | fpga-image | - // host-reservation | image | instance | internet-gateway | key-pair | - // launch-template | natgateway | network-acl | network-interface | placement-group - // | reserved-instances | route-table | security-group | snapshot | - // spot-instances-request | subnet | volume | vpc | vpc-endpoint | - // vpc-endpoint-service | vpc-peering-connection | vpn-connection | vpn-gateway). + // * + // resource-type - The resource type (customer-gateway | dedicated-host | + // dhcp-options | elastic-ip | fleet | fpga-image | host-reservation | image | + // instance | internet-gateway | key-pair | launch-template | natgateway | + // network-acl | network-interface | placement-group | reserved-instances | + // route-table | security-group | snapshot | spot-instances-request | subnet | + // volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection | + // vpn-connection | vpn-gateway). // + // * tag: - The key/value combination of the tag. + // For example, specify "tag:Owner" for the filter name and "TeamA" for the filter + // value to find resources with the tag "Owner=TeamA". // - // * tag: - The key/value combination of the tag. For example, specify "tag:Owner" - // for the filter name and "TeamA" for the filter value to find resources with the - // tag "Owner=TeamA". - // - // * value - The tag value. + // * value - The tag value. Filters []*types.Filter // The maximum number of results to return in a single call. This value can be diff --git a/service/ec2/api_op_DescribeTrafficMirrorFilters.go b/service/ec2/api_op_DescribeTrafficMirrorFilters.go index 91d993e09ce..98d867b3251 100644 --- a/service/ec2/api_op_DescribeTrafficMirrorFilters.go +++ b/service/ec2/api_op_DescribeTrafficMirrorFilters.go @@ -37,11 +37,11 @@ type DescribeTrafficMirrorFiltersInput struct { // One or more filters. The possible values are: // - // * description: The Traffic - // Mirror filter description. + // * description: The Traffic Mirror + // filter description. // - // * traffic-mirror-filter-id: The ID of the - // Traffic Mirror filter. + // * traffic-mirror-filter-id: The ID of the Traffic Mirror + // filter. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeTrafficMirrorSessions.go b/service/ec2/api_op_DescribeTrafficMirrorSessions.go index 32aa502ca09..39818cccfcf 100644 --- a/service/ec2/api_op_DescribeTrafficMirrorSessions.go +++ b/service/ec2/api_op_DescribeTrafficMirrorSessions.go @@ -38,30 +38,30 @@ type DescribeTrafficMirrorSessionsInput struct { // One or more filters. The possible values are: // - // * description: The Traffic - // Mirror session description. + // * description: The Traffic Mirror + // session description. // - // * network-interface-id: The ID of the Traffic - // Mirror session network interface. + // * network-interface-id: The ID of the Traffic Mirror + // session network interface. // - // * owner-id: The ID of the account that - // owns the Traffic Mirror session. + // * owner-id: The ID of the account that owns the + // Traffic Mirror session. // - // * packet-length: The assigned number of - // packets to mirror. + // * packet-length: The assigned number of packets to + // mirror. // - // * session-number: The assigned session number. + // * session-number: The assigned session number. // - // * + // * // traffic-mirror-filter-id: The ID of the Traffic Mirror filter. // - // * + // * // traffic-mirror-session-id: The ID of the Traffic Mirror session. // - // * + // * // traffic-mirror-target-id: The ID of the Traffic Mirror target. // - // * + // * // virtual-network-id: The virtual network ID of the Traffic Mirror session. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeTrafficMirrorTargets.go b/service/ec2/api_op_DescribeTrafficMirrorTargets.go index aad6efc93ac..1066d1be1a8 100644 --- a/service/ec2/api_op_DescribeTrafficMirrorTargets.go +++ b/service/ec2/api_op_DescribeTrafficMirrorTargets.go @@ -37,20 +37,20 @@ type DescribeTrafficMirrorTargetsInput struct { // One or more filters. The possible values are: // - // * description: The Traffic - // Mirror target description. + // * description: The Traffic Mirror + // target description. // - // * network-interface-id: The ID of the Traffic - // Mirror session network interface. + // * network-interface-id: The ID of the Traffic Mirror + // session network interface. // - // * network-load-balancer-arn: The Amazon - // Resource Name (ARN) of the Network Load Balancer that is associated with the - // session. + // * network-load-balancer-arn: The Amazon Resource + // Name (ARN) of the Network Load Balancer that is associated with the session. // - // * owner-id: The ID of the account that owns the Traffic Mirror - // session. + // * + // owner-id: The ID of the account that owns the Traffic Mirror session. // - // * traffic-mirror-target-id: The ID of the Traffic Mirror target. + // * + // traffic-mirror-target-id: The ID of the Traffic Mirror target. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeTransitGatewayAttachments.go b/service/ec2/api_op_DescribeTransitGatewayAttachments.go index 76e2540c1d9..ce47bb1a13e 100644 --- a/service/ec2/api_op_DescribeTransitGatewayAttachments.go +++ b/service/ec2/api_op_DescribeTransitGatewayAttachments.go @@ -39,35 +39,34 @@ type DescribeTransitGatewayAttachmentsInput struct { // One or more filters. The possible values are: // - // * association.state - The - // state of the association (associating | associated | disassociating). + // * association.state - The state + // of the association (associating | associated | disassociating). // - // * + // * // association.transit-gateway-route-table-id - The ID of the route table for the // transit gateway. // - // * resource-id - The ID of the resource. + // * resource-id - The ID of the resource. // - // * - // resource-owner-id - The ID of the AWS account that owns the resource. + // * resource-owner-id - + // The ID of the AWS account that owns the resource. // - // * - // resource-type - The resource type. Valid values are vpc | vpn | - // direct-connect-gateway | peering. + // * resource-type - The + // resource type. Valid values are vpc | vpn | direct-connect-gateway | peering. // - // * state - The state of the attachment. - // Valid values are available | deleted | deleting | failed | failing | - // initiatingRequest | modifying | pendingAcceptance | pending | rollingBack | - // rejected | rejecting. + // * + // state - The state of the attachment. Valid values are available | deleted | + // deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. // - // * transit-gateway-attachment-id - The ID of the - // attachment. + // * transit-gateway-attachment-id + // - The ID of the attachment. // - // * transit-gateway-id - The ID of the transit gateway. - // - // * - // transit-gateway-owner-id - The ID of the AWS account that owns the transit + // * transit-gateway-id - The ID of the transit // gateway. + // + // * transit-gateway-owner-id - The ID of the AWS account that owns the + // transit gateway. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go b/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go index c190ca9cc65..e79d885fb4b 100644 --- a/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go +++ b/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go @@ -37,14 +37,14 @@ type DescribeTransitGatewayMulticastDomainsInput struct { // One or more filters. The possible values are: // - // * state - The state of the + // * state - The state of the // transit gateway multicast domain. Valid values are pending | available | // deleting | deleted. // - // * transit-gateway-id - The ID of the transit gateway. + // * transit-gateway-id - The ID of the transit gateway. // - // - // * transit-gateway-multicast-domain-id - The ID of the transit gateway multicast + // * + // transit-gateway-multicast-domain-id - The ID of the transit gateway multicast // domain. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go b/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go index 492495146a8..f0f45f98d36 100644 --- a/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go +++ b/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go @@ -37,22 +37,21 @@ type DescribeTransitGatewayPeeringAttachmentsInput struct { // One or more filters. The possible values are: // - // * - // transit-gateway-attachment-id - The ID of the transit gateway attachment. + // * transit-gateway-attachment-id - + // The ID of the transit gateway attachment. // - // * - // local-owner-id - The ID of your AWS account. + // * local-owner-id - The ID of your AWS + // account. // - // * remote-owner-id - The ID of - // the AWS account in the remote Region that owns the transit gateway. + // * remote-owner-id - The ID of the AWS account in the remote Region + // that owns the transit gateway. // - // * state - // - The state of the peering attachment. Valid values are available | deleted | - // deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance - // | pending | rollingBack | rejected | rejecting). + // * state - The state of the peering attachment. + // Valid values are available | deleted | deleting | failed | failing | + // initiatingRequest | modifying | pendingAcceptance | pending | rollingBack | + // rejected | rejecting). // - // * transit-gateway-id - The - // ID of the transit gateway. + // * transit-gateway-id - The ID of the transit gateway. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeTransitGatewayRouteTables.go b/service/ec2/api_op_DescribeTransitGatewayRouteTables.go index 9d68885b8f7..80dddc33c37 100644 --- a/service/ec2/api_op_DescribeTransitGatewayRouteTables.go +++ b/service/ec2/api_op_DescribeTransitGatewayRouteTables.go @@ -38,21 +38,20 @@ type DescribeTransitGatewayRouteTablesInput struct { // One or more filters. The possible values are: // - // * - // default-association-route-table - Indicates whether this is the default - // association route table for the transit gateway (true | false). + // * default-association-route-table + // - Indicates whether this is the default association route table for the transit + // gateway (true | false). // - // * - // default-propagation-route-table - Indicates whether this is the default - // propagation route table for the transit gateway (true | false). + // * default-propagation-route-table - Indicates whether + // this is the default propagation route table for the transit gateway (true | + // false). // - // * state - - // The state of the route table (available | deleting | deleted | pending). + // * state - The state of the route table (available | deleting | deleted + // | pending). // - // * - // transit-gateway-id - The ID of the transit gateway. + // * transit-gateway-id - The ID of the transit gateway. // - // * + // * // transit-gateway-route-table-id - The ID of the transit gateway route table. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go b/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go index cd79f53ae4e..7b431b5a861 100644 --- a/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go +++ b/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go @@ -38,18 +38,18 @@ type DescribeTransitGatewayVpcAttachmentsInput struct { // One or more filters. The possible values are: // - // * state - The state of the + // * state - The state of the // attachment. Valid values are available | deleted | deleting | failed | failing | // initiatingRequest | modifying | pendingAcceptance | pending | rollingBack | // rejected | rejecting. // - // * transit-gateway-attachment-id - The ID of the + // * transit-gateway-attachment-id - The ID of the // attachment. // - // * transit-gateway-id - The ID of the transit gateway. + // * transit-gateway-id - The ID of the transit gateway. // - // * - // vpc-id - The ID of the VPC. + // * vpc-id - + // The ID of the VPC. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeTransitGateways.go b/service/ec2/api_op_DescribeTransitGateways.go index 13a9926be39..95e8c0cf6df 100644 --- a/service/ec2/api_op_DescribeTransitGateways.go +++ b/service/ec2/api_op_DescribeTransitGateways.go @@ -38,44 +38,43 @@ type DescribeTransitGatewaysInput struct { // One or more filters. The possible values are: // - // * + // * // options.propagation-default-route-table-id - The ID of the default propagation // route table. // - // * options.amazon-side-asn - The private ASN for the Amazon - // side of a BGP session. + // * options.amazon-side-asn - The private ASN for the Amazon side of + // a BGP session. // - // * options.association-default-route-table-id - The - // ID of the default association route table. + // * options.association-default-route-table-id - The ID of the + // default association route table. // - // * - // options.auto-accept-shared-attachments - Indicates whether there is automatic - // acceptance of attachment requests (enable | disable). - // - // * - // options.default-route-table-association - Indicates whether resource attachments - // are automatically associated with the default association route table (enable | + // * options.auto-accept-shared-attachments - + // Indicates whether there is automatic acceptance of attachment requests (enable | // disable). // - // * options.default-route-table-propagation - Indicates whether - // resource attachments automatically propagate routes to the default propagation + // * options.default-route-table-association - Indicates whether + // resource attachments are automatically associated with the default association // route table (enable | disable). // - // * options.dns-support - Indicates whether - // DNS support is enabled (enable | disable). + // * options.default-route-table-propagation - + // Indicates whether resource attachments automatically propagate routes to the + // default propagation route table (enable | disable). // - // * options.vpn-ecmp-support - - // Indicates whether Equal Cost Multipath Protocol support is enabled (enable | - // disable). + // * options.dns-support - + // Indicates whether DNS support is enabled (enable | disable). + // + // * + // options.vpn-ecmp-support - Indicates whether Equal Cost Multipath Protocol + // support is enabled (enable | disable). // - // * owner-id - The ID of the AWS account that owns the transit - // gateway. + // * owner-id - The ID of the AWS account + // that owns the transit gateway. // - // * state - The state of the transit gateway (available | deleted | - // deleting | modifying | pending). + // * state - The state of the transit gateway + // (available | deleted | deleting | modifying | pending). // - // * transit-gateway-id - The ID of the - // transit gateway. + // * transit-gateway-id - + // The ID of the transit gateway. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeVolumeStatus.go b/service/ec2/api_op_DescribeVolumeStatus.go index 41e8bc900ad..0cf75d85f30 100644 --- a/service/ec2/api_op_DescribeVolumeStatus.go +++ b/service/ec2/api_op_DescribeVolumeStatus.go @@ -65,43 +65,42 @@ type DescribeVolumeStatusInput struct { // The filters. // - // * action.code - The action code for the event (for example, + // * action.code - The action code for the event (for example, // enable-volume-io). // - // * action.description - A description of the action. + // * action.description - A description of the action. // + // * + // action.event-id - The event ID associated with the action. // - // * action.event-id - The event ID associated with the action. + // * availability-zone + // - The Availability Zone of the instance. // - // * - // availability-zone - The Availability Zone of the instance. + // * event.description - A description of + // the event. // - // * - // event.description - A description of the event. + // * event.event-id - The event ID. // - // * event.event-id - The - // event ID. + // * event.event-type - The event + // type (for io-enabled: passed | failed; for io-performance: + // io-performance:degraded | io-performance:severely-degraded | + // io-performance:stalled). // - // * event.event-type - The event type (for io-enabled: passed | - // failed; for io-performance: io-performance:degraded | - // io-performance:severely-degraded | io-performance:stalled). + // * event.not-after - The latest end time for the + // event. // - // * - // event.not-after - The latest end time for the event. + // * event.not-before - The earliest start time for the event. // - // * event.not-before - - // The earliest start time for the event. + // * + // volume-status.details-name - The cause for volume-status.status (io-enabled | + // io-performance). // - // * volume-status.details-name - The - // cause for volume-status.status (io-enabled | io-performance). + // * volume-status.details-status - The status of + // volume-status.details-name (for io-enabled: passed | failed; for io-performance: + // normal | degraded | severely-degraded | stalled). // - // * - // volume-status.details-status - The status of volume-status.details-name (for - // io-enabled: passed | failed; for io-performance: normal | degraded | - // severely-degraded | stalled). - // - // * volume-status.status - The status of the - // volume (ok | impaired | warning | insufficient-data). + // * volume-status.status - The + // status of the volume (ok | impaired | warning | insufficient-data). Filters []*types.Filter // The maximum number of volume results returned by DescribeVolumeStatus in diff --git a/service/ec2/api_op_DescribeVolumes.go b/service/ec2/api_op_DescribeVolumes.go index d89ce823f95..53e07c501af 100644 --- a/service/ec2/api_op_DescribeVolumes.go +++ b/service/ec2/api_op_DescribeVolumes.go @@ -45,60 +45,59 @@ type DescribeVolumesInput struct { // The filters. // - // * attachment.attach-time - The time stamp when the attachment + // * attachment.attach-time - The time stamp when the attachment // initiated. // - // * attachment.delete-on-termination - Whether the volume is - // deleted on instance termination. + // * attachment.delete-on-termination - Whether the volume is deleted + // on instance termination. // - // * attachment.device - The device name - // specified in the block device mapping (for example, /dev/sda1). + // * attachment.device - The device name specified in the + // block device mapping (for example, /dev/sda1). // - // * - // attachment.instance-id - The ID of the instance the volume is attached to. + // * attachment.instance-id - The + // ID of the instance the volume is attached to. // + // * attachment.status - The + // attachment state (attaching | attached | detaching). // - // * attachment.status - The attachment state (attaching | attached | detaching). + // * availability-zone - The + // Availability Zone in which the volume was created. // + // * create-time - The time + // stamp when the volume was created. // - // * availability-zone - The Availability Zone in which the volume was created. + // * encrypted - Indicates whether the volume + // is encrypted (true | false) // + // * multi-attach-enabled - Indicates whether the + // volume is enabled for Multi-Attach (true | false) // - // * create-time - The time stamp when the volume was created. + // * fast-restored - Indicates + // whether the volume was created from a snapshot that is enabled for fast snapshot + // restore (true | false). // - // * encrypted - - // Indicates whether the volume is encrypted (true | false) + // * size - The size of the volume, in GiB. // - // * - // multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach - // (true | false) + // * snapshot-id + // - The snapshot from which the volume was created. // - // * fast-restored - Indicates whether the volume was created - // from a snapshot that is enabled for fast snapshot restore (true | false). + // * status - The state of the + // volume (creating | available | in-use | deleting | deleted | error). // - // * - // size - The size of the volume, in GiB. + // * tag: - + // The key/value combination of a tag assigned to the resource. Use the tag key in + // the filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * snapshot-id - The snapshot from - // which the volume was created. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. // - // * status - The state of the volume (creating - // | available | in-use | deleting | deleted | error). + // * volume-id - + // The volume ID. // - // * tag: - The key/value - // combination of a tag assigned to the resource. Use the tag key in the filter - // name and the tag value as the filter value. For example, to find all resources - // that have a tag with the key Owner and the value TeamA, specify tag:Owner for - // the filter name and TeamA for the filter value. - // - // * tag-key - The key of a - // tag assigned to the resource. Use this filter to find all resources assigned a - // tag with a specific key, regardless of the tag value. - // - // * volume-id - The - // volume ID. - // - // * volume-type - The Amazon EBS volume type. This can be gp2 for + // * volume-type - The Amazon EBS volume type. This can be gp2 for // General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput // Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeVolumesModifications.go b/service/ec2/api_op_DescribeVolumesModifications.go index 951cfbdd014..16828ca8e4d 100644 --- a/service/ec2/api_op_DescribeVolumesModifications.go +++ b/service/ec2/api_op_DescribeVolumesModifications.go @@ -46,38 +46,37 @@ type DescribeVolumesModificationsInput struct { // The filters. // - // * modification-state - The current modification state - // (modifying | optimizing | completed | failed). + // * modification-state - The current modification state (modifying | + // optimizing | completed | failed). // - // * original-iops - The - // original IOPS rate of the volume. + // * original-iops - The original IOPS rate of + // the volume. // - // * original-size - The original size of - // the volume, in GiB. + // * original-size - The original size of the volume, in GiB. // - // * original-volume-type - The original volume type of - // the volume (standard | io1 | io2 | gp2 | sc1 | st1). + // * + // original-volume-type - The original volume type of the volume (standard | io1 | + // io2 | gp2 | sc1 | st1). // - // * - // originalMultiAttachEnabled - Indicates whether Multi-Attach support was enabled - // (true | false). + // * originalMultiAttachEnabled - Indicates whether + // Multi-Attach support was enabled (true | false). // - // * start-time - The modification start time. + // * start-time - The + // modification start time. // - // * - // target-iops - The target IOPS rate of the volume. + // * target-iops - The target IOPS rate of the volume. // - // * target-size - The - // target size of the volume, in GiB. + // * + // target-size - The target size of the volume, in GiB. // - // * target-volume-type - The target volume - // type of the volume (standard | io1 | io2 | gp2 | sc1 | st1). + // * target-volume-type - The + // target volume type of the volume (standard | io1 | io2 | gp2 | sc1 | st1). // - // * + // * // targetMultiAttachEnabled - Indicates whether Multi-Attach support is to be // enabled (true | false). // - // * volume-id - The ID of the volume. + // * volume-id - The ID of the volume. Filters []*types.Filter // The maximum number of results (up to a limit of 500) to be returned in a diff --git a/service/ec2/api_op_DescribeVpcClassicLink.go b/service/ec2/api_op_DescribeVpcClassicLink.go index 7b47a83b276..d61841c5595 100644 --- a/service/ec2/api_op_DescribeVpcClassicLink.go +++ b/service/ec2/api_op_DescribeVpcClassicLink.go @@ -37,18 +37,18 @@ type DescribeVpcClassicLinkInput struct { // One or more filters. // - // * is-classic-link-enabled - Whether the VPC is enabled - // for ClassicLink (true | false). + // * is-classic-link-enabled - Whether the VPC is enabled for + // ClassicLink (true | false). // - // * tag: - The key/value combination of a tag + // * tag: - The key/value combination of a tag // assigned to the resource. Use the tag key in the filter name and the tag value // as the filter value. For example, to find all resources that have a tag with the // key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA // for the filter value. // - // * tag-key - The key of a tag assigned to the - // resource. Use this filter to find all resources assigned a tag with a specific - // key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. + // Use this filter to find all resources assigned a tag with a specific key, + // regardless of the tag value. Filters []*types.Filter // One or more VPCs for which you want to describe the ClassicLink status. diff --git a/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go b/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go index 1da919d465b..696c2f237c0 100644 --- a/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go +++ b/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go @@ -41,22 +41,22 @@ type DescribeVpcEndpointConnectionNotificationsInput struct { // One or more filters. // - // * connection-notification-arn - The ARN of the SNS - // topic for the notification. + // * connection-notification-arn - The ARN of the SNS topic + // for the notification. // - // * connection-notification-id - The ID of the + // * connection-notification-id - The ID of the // notification. // - // * connection-notification-state - The state of the - // notification (Enabled | Disabled). + // * connection-notification-state - The state of the notification + // (Enabled | Disabled). // - // * connection-notification-type - The - // type of notification (Topic). + // * connection-notification-type - The type of notification + // (Topic). // - // * service-id - The ID of the endpoint - // service. + // * service-id - The ID of the endpoint service. // - // * vpc-endpoint-id - The ID of the VPC endpoint. + // * vpc-endpoint-id - + // The ID of the VPC endpoint. Filters []*types.Filter // The maximum number of results to return in a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeVpcEndpointConnections.go b/service/ec2/api_op_DescribeVpcEndpointConnections.go index ea0c14037bb..ab3c3286161 100644 --- a/service/ec2/api_op_DescribeVpcEndpointConnections.go +++ b/service/ec2/api_op_DescribeVpcEndpointConnections.go @@ -38,17 +38,17 @@ type DescribeVpcEndpointConnectionsInput struct { // One or more filters. // - // * service-id - The ID of the service. + // * service-id - The ID of the service. // - // * + // * // vpc-endpoint-owner - The AWS account number of the owner of the endpoint. // - // * + // * // vpc-endpoint-state - The state of the endpoint (pendingAcceptance | pending | // available | deleting | deleted | rejected | failed). // - // * vpc-endpoint-id - - // The ID of the endpoint. + // * vpc-endpoint-id - The ID + // of the endpoint. Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go b/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go index 8bafe42d630..05c86da3759 100644 --- a/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go +++ b/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go @@ -38,23 +38,23 @@ type DescribeVpcEndpointServiceConfigurationsInput struct { // One or more filters. // - // * service-name - The name of the service. + // * service-name - The name of the service. // - // * - // service-id - The ID of the service. + // * service-id - + // The ID of the service. // - // * service-state - The state of the - // service (Pending | Available | Deleting | Deleted | Failed). + // * service-state - The state of the service (Pending | + // Available | Deleting | Deleted | Failed). // - // * tag: - The - // key/value combination of a tag assigned to the resource. Use the tag key in the - // filter name and the tag value as the filter value. For example, to find all - // resources that have a tag with the key Owner and the value TeamA, specify - // tag:Owner for the filter name and TeamA for the filter value. + // * tag: - The key/value combination of + // a tag assigned to the resource. Use the tag key in the filter name and the tag + // value as the filter value. For example, to find all resources that have a tag + // with the key Owner and the value TeamA, specify tag:Owner for the filter name + // and TeamA for the filter value. // - // * tag-key - - // The key of a tag assigned to the resource. Use this filter to find all resources - // assigned a tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the + // resource. Use this filter to find all resources assigned a tag with a specific + // key, regardless of the tag value. Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go b/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go index 31c7df1873d..5dfef3cb5ae 100644 --- a/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go +++ b/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go @@ -43,11 +43,10 @@ type DescribeVpcEndpointServicePermissionsInput struct { // One or more filters. // - // * principal - The ARN of the principal. + // * principal - The ARN of the principal. // - // * - // principal-type - The principal type (All | Service | OrganizationUnit | Account - // | User | Role). + // * principal-type + // - The principal type (All | Service | OrganizationUnit | Account | User | Role). Filters []*types.Filter // The maximum number of results to return for the request in a single page. The diff --git a/service/ec2/api_op_DescribeVpcEndpointServices.go b/service/ec2/api_op_DescribeVpcEndpointServices.go index bca68f62461..bb1ea6a9e5b 100644 --- a/service/ec2/api_op_DescribeVpcEndpointServices.go +++ b/service/ec2/api_op_DescribeVpcEndpointServices.go @@ -38,16 +38,16 @@ type DescribeVpcEndpointServicesInput struct { // One or more filters. // - // * service-name - The name of the service. + // * service-name - The name of the service. // - // * tag: - // - The key/value combination of a tag assigned to the resource. Use the tag key - // in the filter name and the tag value as the filter value. For example, to find - // all resources that have a tag with the key Owner and the value TeamA, specify + // * tag: - The + // key/value combination of a tag assigned to the resource. Use the tag key in the + // filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify // tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - - // The key of a tag assigned to the resource. Use this filter to find all resources + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources // assigned a tag with a specific key, regardless of the tag value. Filters []*types.Filter diff --git a/service/ec2/api_op_DescribeVpcEndpoints.go b/service/ec2/api_op_DescribeVpcEndpoints.go index 5a859b3e9fb..20b629d5dbe 100644 --- a/service/ec2/api_op_DescribeVpcEndpoints.go +++ b/service/ec2/api_op_DescribeVpcEndpoints.go @@ -38,27 +38,26 @@ type DescribeVpcEndpointsInput struct { // One or more filters. // - // * service-name - The name of the service. + // * service-name - The name of the service. // - // * - // vpc-id - The ID of the VPC in which the endpoint resides. + // * vpc-id - The + // ID of the VPC in which the endpoint resides. // - // * vpc-endpoint-id - // - The ID of the endpoint. + // * vpc-endpoint-id - The ID of the + // endpoint. // - // * vpc-endpoint-state - The state of the endpoint - // (pendingAcceptance | pending | available | deleting | deleted | rejected | - // failed). + // * vpc-endpoint-state - The state of the endpoint (pendingAcceptance | + // pending | available | deleting | deleted | rejected | failed). // - // * tag: - The key/value combination of a tag assigned to the - // resource. Use the tag key in the filter name and the tag value as the filter - // value. For example, to find all resources that have a tag with the key Owner and - // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter - // value. + // * tag: - The + // key/value combination of a tag assigned to the resource. Use the tag key in the + // filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources assigned a tag with a specific key, regardless of - // the tag value. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. Filters []*types.Filter // The maximum number of items to return for this request. The request returns a diff --git a/service/ec2/api_op_DescribeVpcPeeringConnections.go b/service/ec2/api_op_DescribeVpcPeeringConnections.go index a1d05afa9c3..fbbc0b72342 100644 --- a/service/ec2/api_op_DescribeVpcPeeringConnections.go +++ b/service/ec2/api_op_DescribeVpcPeeringConnections.go @@ -37,46 +37,46 @@ type DescribeVpcPeeringConnectionsInput struct { // One or more filters. // - // * accepter-vpc-info.cidr-block - The IPv4 CIDR block - // of the accepter VPC. + // * accepter-vpc-info.cidr-block - The IPv4 CIDR block of + // the accepter VPC. // - // * accepter-vpc-info.owner-id - The AWS account ID of - // the owner of the accepter VPC. + // * accepter-vpc-info.owner-id - The AWS account ID of the + // owner of the accepter VPC. // - // * accepter-vpc-info.vpc-id - The ID of the - // accepter VPC. + // * accepter-vpc-info.vpc-id - The ID of the accepter + // VPC. // - // * expiration-time - The expiration date and time for the VPC - // peering connection. + // * expiration-time - The expiration date and time for the VPC peering + // connection. // - // * requester-vpc-info.cidr-block - The IPv4 CIDR block - // of the requester's VPC. + // * requester-vpc-info.cidr-block - The IPv4 CIDR block of the + // requester's VPC. // - // * requester-vpc-info.owner-id - The AWS account ID - // of the owner of the requester VPC. + // * requester-vpc-info.owner-id - The AWS account ID of the + // owner of the requester VPC. // - // * requester-vpc-info.vpc-id - The ID of - // the requester VPC. + // * requester-vpc-info.vpc-id - The ID of the + // requester VPC. // - // * status-code - The status of the VPC peering connection + // * status-code - The status of the VPC peering connection // (pending-acceptance | failed | expired | provisioning | active | deleting | // deleted | rejected). // - // * status-message - A message that provides more + // * status-message - A message that provides more // information about the status of the VPC peering connection, if applicable. // - // - // * tag: - The key/value combination of a tag assigned to the resource. Use the - // tag key in the filter name and the tag value as the filter value. For example, - // to find all resources that have a tag with the key Owner and the value TeamA, + // * + // tag: - The key/value combination of a tag assigned to the resource. Use the tag + // key in the filter name and the tag value as the filter value. For example, to + // find all resources that have a tag with the key Owner and the value TeamA, // specify tag:Owner for the filter name and TeamA for the filter value. // - // * - // tag-key - The key of a tag assigned to the resource. Use this filter to find all + // * tag-key + // - The key of a tag assigned to the resource. Use this filter to find all // resources assigned a tag with a specific key, regardless of the tag value. // - // - // * vpc-peering-connection-id - The ID of the VPC peering connection. + // * + // vpc-peering-connection-id - The ID of the VPC peering connection. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeVpcs.go b/service/ec2/api_op_DescribeVpcs.go index e86ce57224e..7a2ccde3401 100644 --- a/service/ec2/api_op_DescribeVpcs.go +++ b/service/ec2/api_op_DescribeVpcs.go @@ -37,57 +37,57 @@ type DescribeVpcsInput struct { // One or more filters. // - // * cidr - The primary IPv4 CIDR block of the VPC. The - // CIDR block you specify must exactly match the VPC's CIDR block for information - // to be returned for the VPC. Must contain the slash followed by one or two digits - // (for example, /28). + // * cidr - The primary IPv4 CIDR block of the VPC. The CIDR + // block you specify must exactly match the VPC's CIDR block for information to be + // returned for the VPC. Must contain the slash followed by one or two digits (for + // example, /28). // - // * cidr-block-association.cidr-block - An IPv4 CIDR - // block associated with the VPC. + // * cidr-block-association.cidr-block - An IPv4 CIDR block + // associated with the VPC. // - // * cidr-block-association.association-id - - // The association ID for an IPv4 CIDR block associated with the VPC. + // * cidr-block-association.association-id - The + // association ID for an IPv4 CIDR block associated with the VPC. // - // * + // * // cidr-block-association.state - The state of an IPv4 CIDR block associated with // the VPC. // - // * dhcp-options-id - The ID of a set of DHCP options. + // * dhcp-options-id - The ID of a set of DHCP options. // - // * + // * // ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated with // the VPC. // - // * ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 - // address pool from which the IPv6 CIDR block is allocated. + // * ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 address + // pool from which the IPv6 CIDR block is allocated. // - // * + // * // ipv6-cidr-block-association.association-id - The association ID for an IPv6 CIDR // block associated with the VPC. // - // * ipv6-cidr-block-association.state - The - // state of an IPv6 CIDR block associated with the VPC. + // * ipv6-cidr-block-association.state - The state + // of an IPv6 CIDR block associated with the VPC. // - // * isDefault - - // Indicates whether the VPC is the default VPC. + // * isDefault - Indicates whether + // the VPC is the default VPC. // - // * owner-id - The ID of the - // AWS account that owns the VPC. + // * owner-id - The ID of the AWS account that owns + // the VPC. // - // * state - The state of the VPC (pending | - // available). + // * state - The state of the VPC (pending | available). // - // * tag: - The key/value combination of a tag assigned to the - // resource. Use the tag key in the filter name and the tag value as the filter - // value. For example, to find all resources that have a tag with the key Owner and - // the value TeamA, specify tag:Owner for the filter name and TeamA for the filter - // value. + // * tag: - The + // key/value combination of a tag assigned to the resource. Use the tag key in the + // filter name and the tag value as the filter value. For example, to find all + // resources that have a tag with the key Owner and the value TeamA, specify + // tag:Owner for the filter name and TeamA for the filter value. // - // * tag-key - The key of a tag assigned to the resource. Use this - // filter to find all resources assigned a tag with a specific key, regardless of - // the tag value. + // * tag-key - The + // key of a tag assigned to the resource. Use this filter to find all resources + // assigned a tag with a specific key, regardless of the tag value. // - // * vpc-id - The ID of the VPC. + // * vpc-id - The + // ID of the VPC. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_DescribeVpnConnections.go b/service/ec2/api_op_DescribeVpnConnections.go index a73085b1345..aa4a862f74c 100644 --- a/service/ec2/api_op_DescribeVpnConnections.go +++ b/service/ec2/api_op_DescribeVpnConnections.go @@ -40,48 +40,47 @@ type DescribeVpnConnectionsInput struct { // One or more filters. // - // * customer-gateway-configuration - The configuration + // * customer-gateway-configuration - The configuration // information for the customer gateway. // - // * customer-gateway-id - The ID of a + // * customer-gateway-id - The ID of a // customer gateway associated with the VPN connection. // - // * state - The state of - // the VPN connection (pending | available | deleting | deleted). + // * state - The state of the + // VPN connection (pending | available | deleting | deleted). // - // * + // * // option.static-routes-only - Indicates whether the connection has static routes // only. Used for devices that do not support Border Gateway Protocol (BGP). // - // * + // * // route.destination-cidr-block - The destination CIDR block. This corresponds to // the subnet used in a customer data center. // - // * bgp-asn - The BGP Autonomous + // * bgp-asn - The BGP Autonomous // System Number (ASN) associated with a BGP device. // - // * tag: - The key/value + // * tag: - The key/value // combination of a tag assigned to the resource. Use the tag key in the filter // name and the tag value as the filter value. For example, to find all resources // that have a tag with the key Owner and the value TeamA, specify tag:Owner for // the filter name and TeamA for the filter value. // - // * tag-key - The key of a - // tag assigned to the resource. Use this filter to find all resources assigned a - // tag with a specific key, regardless of the tag value. + // * tag-key - The key of a tag + // assigned to the resource. Use this filter to find all resources assigned a tag + // with a specific key, regardless of the tag value. // - // * type - The type of - // VPN connection. Currently the only supported type is ipsec.1. + // * type - The type of VPN + // connection. Currently the only supported type is ipsec.1. // - // * - // vpn-connection-id - The ID of the VPN connection. + // * vpn-connection-id - + // The ID of the VPN connection. // - // * vpn-gateway-id - The ID - // of a virtual private gateway associated with the VPN connection. + // * vpn-gateway-id - The ID of a virtual private + // gateway associated with the VPN connection. // - // * - // transit-gateway-id - The ID of a transit gateway associated with the VPN - // connection. + // * transit-gateway-id - The ID of a + // transit gateway associated with the VPN connection. Filters []*types.Filter // One or more VPN connection IDs. Default: Describes your VPN connections. diff --git a/service/ec2/api_op_DescribeVpnGateways.go b/service/ec2/api_op_DescribeVpnGateways.go index a2bac7faf7c..58b627f9192 100644 --- a/service/ec2/api_op_DescribeVpnGateways.go +++ b/service/ec2/api_op_DescribeVpnGateways.go @@ -41,37 +41,37 @@ type DescribeVpnGatewaysInput struct { // One or more filters. // - // * amazon-side-asn - The Autonomous System Number (ASN) - // for the Amazon side of the gateway. + // * amazon-side-asn - The Autonomous System Number (ASN) for + // the Amazon side of the gateway. // - // * attachment.state - The current state - // of the attachment between the gateway and the VPC (attaching | attached | - // detaching | detached). + // * attachment.state - The current state of the + // attachment between the gateway and the VPC (attaching | attached | detaching | + // detached). // - // * attachment.vpc-id - The ID of an attached VPC. + // * attachment.vpc-id - The ID of an attached VPC. // - // - // * availability-zone - The Availability Zone for the virtual private gateway (if + // * + // availability-zone - The Availability Zone for the virtual private gateway (if // applicable). // - // * state - The state of the virtual private gateway (pending | + // * state - The state of the virtual private gateway (pending | // available | deleting | deleted). // - // * tag: - The key/value combination of a - // tag assigned to the resource. Use the tag key in the filter name and the tag - // value as the filter value. For example, to find all resources that have a tag - // with the key Owner and the value TeamA, specify tag:Owner for the filter name - // and TeamA for the filter value. + // * tag: - The key/value combination of a tag + // assigned to the resource. Use the tag key in the filter name and the tag value + // as the filter value. For example, to find all resources that have a tag with the + // key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA + // for the filter value. // - // * tag-key - The key of a tag assigned to - // the resource. Use this filter to find all resources assigned a tag with a - // specific key, regardless of the tag value. + // * tag-key - The key of a tag assigned to the resource. + // Use this filter to find all resources assigned a tag with a specific key, + // regardless of the tag value. // - // * type - The type of virtual - // private gateway. Currently the only supported type is ipsec.1. + // * type - The type of virtual private gateway. + // Currently the only supported type is ipsec.1. // - // * - // vpn-gateway-id - The ID of the virtual private gateway. + // * vpn-gateway-id - The ID of the + // virtual private gateway. Filters []*types.Filter // One or more virtual private gateway IDs. Default: Describes all your virtual diff --git a/service/ec2/api_op_DetachNetworkInterface.go b/service/ec2/api_op_DetachNetworkInterface.go index b38ba22623f..239729a2bc9 100644 --- a/service/ec2/api_op_DetachNetworkInterface.go +++ b/service/ec2/api_op_DetachNetworkInterface.go @@ -42,16 +42,16 @@ type DetachNetworkInterfaceInput struct { // Specifies whether to force a detachment. // - // * Use the Force parameter only as - // a last resort to detach a network interface from a failed instance. + // * Use the Force parameter only as a + // last resort to detach a network interface from a failed instance. // - // * If - // you use the Force parameter to detach a network interface, you might not be able - // to attach a different network interface to the same index on the instance - // without first stopping and starting the instance. + // * If you use + // the Force parameter to detach a network interface, you might not be able to + // attach a different network interface to the same index on the instance without + // first stopping and starting the instance. // - // * If you force the - // detachment of a network interface, the instance metadata + // * If you force the detachment of a + // network interface, the instance metadata // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) // might not get updated. This means that the attributes associated with the // detached network interface might still be visible. The instance metadata will diff --git a/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go b/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go index 97e1cb399ac..82873a95c58 100644 --- a/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go +++ b/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go @@ -12,18 +12,18 @@ import ( ) // Disassociates a target network from the specified Client VPN endpoint. When you -// disassociate the last target network from a Client VPN, the following happens: -// +// disassociate the last target network from a Client VPN, the following +// happens: // // * The route that was automatically added for the VPC is deleted // -// * All +// * All // active client connections are terminated // -// * New client connections are +// * New client connections are // disallowed // -// * The Client VPN endpoint's status changes to pending-associate +// * The Client VPN endpoint's status changes to pending-associate func (c *Client) DisassociateClientVpnTargetNetwork(ctx context.Context, params *DisassociateClientVpnTargetNetworkInput, optFns ...func(*Options)) (*DisassociateClientVpnTargetNetworkOutput, error) { if params == nil { params = &DisassociateClientVpnTargetNetworkInput{} diff --git a/service/ec2/api_op_ExportTransitGatewayRoutes.go b/service/ec2/api_op_ExportTransitGatewayRoutes.go index e86ab99da96..ddf1c83efe9 100644 --- a/service/ec2/api_op_ExportTransitGatewayRoutes.go +++ b/service/ec2/api_op_ExportTransitGatewayRoutes.go @@ -52,35 +52,34 @@ type ExportTransitGatewayRoutesInput struct { // One or more filters. The possible values are: // - // * + // * // attachment.transit-gateway-attachment-id - The id of the transit gateway // attachment. // - // * attachment.resource-id - The resource id of the transit - // gateway attachment. + // * attachment.resource-id - The resource id of the transit gateway + // attachment. // - // * route-search.exact-match - The exact match of the - // specified filter. + // * route-search.exact-match - The exact match of the specified + // filter. // - // * route-search.longest-prefix-match - The longest prefix - // that matches the route. + // * route-search.longest-prefix-match - The longest prefix that matches + // the route. // - // * route-search.subnet-of-match - The routes with a - // subnet that match the specified CIDR filter. + // * route-search.subnet-of-match - The routes with a subnet that match + // the specified CIDR filter. // - // * - // route-search.supernet-of-match - The routes with a CIDR that encompass the CIDR - // filter. For example, if you have 10.0.1.0/29 and 10.0.1.0/31 routes in your - // route table and you specify supernet-of-match as 10.0.1.0/30, then the result - // returns 10.0.1.0/29. + // * route-search.supernet-of-match - The routes with a + // CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and + // 10.0.1.0/31 routes in your route table and you specify supernet-of-match as + // 10.0.1.0/30, then the result returns 10.0.1.0/29. // - // * state - The state of the route (active | - // blackhole). + // * state - The state of the + // route (active | blackhole). // - // * transit-gateway-route-destination-cidr-block - The CIDR - // range. + // * transit-gateway-route-destination-cidr-block - + // The CIDR range. // - // * type - The type of route (propagated | static). + // * type - The type of route (propagated | static). Filters []*types.Filter } diff --git a/service/ec2/api_op_GetCapacityReservationUsage.go b/service/ec2/api_op_GetCapacityReservationUsage.go index 5fa00c3a0ec..db9d7a71249 100644 --- a/service/ec2/api_op_GetCapacityReservationUsage.go +++ b/service/ec2/api_op_GetCapacityReservationUsage.go @@ -76,24 +76,24 @@ type GetCapacityReservationUsageOutput struct { // The current state of the Capacity Reservation. A Capacity Reservation can be in // one of the following states: // - // * active - The Capacity Reservation is active - // and the capacity is available for your use. + // * active - The Capacity Reservation is active and + // the capacity is available for your use. // - // * expired - The Capacity - // Reservation expired automatically at the date and time specified in your - // request. The reserved capacity is no longer available for your use. + // * expired - The Capacity Reservation + // expired automatically at the date and time specified in your request. The + // reserved capacity is no longer available for your use. // - // * - // cancelled - The Capacity Reservation was manually cancelled. The reserved - // capacity is no longer available for your use. + // * cancelled - The + // Capacity Reservation was manually cancelled. The reserved capacity is no longer + // available for your use. // - // * pending - The Capacity - // Reservation request was successful but the capacity provisioning is still - // pending. + // * pending - The Capacity Reservation request was + // successful but the capacity provisioning is still pending. // - // * failed - The Capacity Reservation request has failed. A request - // might fail due to invalid request parameters, capacity constraints, or instance - // limit constraints. Failed requests are retained for 60 minutes. + // * failed - The + // Capacity Reservation request has failed. A request might fail due to invalid + // request parameters, capacity constraints, or instance limit constraints. Failed + // requests are retained for 60 minutes. State types.CapacityReservationState // The number of instances for which the Capacity Reservation reserves capacity. diff --git a/service/ec2/api_op_GetCoipPoolUsage.go b/service/ec2/api_op_GetCoipPoolUsage.go index eee08bb4e8d..dfe9f0b5af6 100644 --- a/service/ec2/api_op_GetCoipPoolUsage.go +++ b/service/ec2/api_op_GetCoipPoolUsage.go @@ -42,15 +42,15 @@ type GetCoipPoolUsageInput struct { // The filters. The following are the possible values: // - // * + // * // coip-address-usage.allocation-id // - // * coip-address-usage.aws-account-id + // * coip-address-usage.aws-account-id // - // * + // * // coip-address-usage.aws-service // - // * coip-address-usage.co-ip + // * coip-address-usage.co-ip Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go b/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go index 02da5482503..3d9c0ecbacc 100644 --- a/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go +++ b/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go @@ -43,8 +43,8 @@ type GetTransitGatewayAttachmentPropagationsInput struct { // One or more filters. The possible values are: // - // * - // transit-gateway-route-table-id - The ID of the transit gateway route table. + // * transit-gateway-route-table-id + // - The ID of the transit gateway route table. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go b/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go index 92b5d0f60da..a58099be2f7 100644 --- a/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go +++ b/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go @@ -38,20 +38,20 @@ type GetTransitGatewayMulticastDomainAssociationsInput struct { // One or more filters. The possible values are: // - // * resource-id - The ID of the + // * resource-id - The ID of the // resource. // - // * resource-type - The type of resource. The valid value is: - // vpc. + // * resource-type - The type of resource. The valid value is: vpc. // - // * state - The state of the subnet association. Valid values are - // associated | associating | disassociated | disassociating. + // * + // state - The state of the subnet association. Valid values are associated | + // associating | disassociated | disassociating. // - // * subnet-id - - // The ID of the subnet. + // * subnet-id - The ID of the + // subnet. // - // * transit-gateway-attachment-id - The id of the - // transit gateway attachment. + // * transit-gateway-attachment-id - The id of the transit gateway + // attachment. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go b/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go index a596bb3f540..44303cb0b12 100644 --- a/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go +++ b/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go @@ -43,27 +43,26 @@ type GetTransitGatewayPrefixListReferencesInput struct { // One or more filters. The possible values are: // - // * attachment.resource-id - - // The ID of the resource for the attachment. + // * attachment.resource-id - The ID + // of the resource for the attachment. // - // * attachment.resource-type - The - // type of resource for the attachment. Valid values are vpc | vpn | - // direct-connect-gateway | peering. + // * attachment.resource-type - The type of + // resource for the attachment. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // - // * - // attachment.transit-gateway-attachment-id - The ID of the attachment. - // - // * - // is-blackhole - Whether traffic matching the route is blocked (true | false). + // * attachment.transit-gateway-attachment-id - The ID of the + // attachment. // + // * is-blackhole - Whether traffic matching the route is blocked + // (true | false). // // * prefix-list-id - The ID of the prefix list. // - // * prefix-list-owner-id - The - // ID of the owner of the prefix list. + // * + // prefix-list-owner-id - The ID of the owner of the prefix list. // - // * state - The state of the prefix list - // reference (pending | available | modifying | deleting). + // * state - The + // state of the prefix list reference (pending | available | modifying | deleting). Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go b/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go index f470e7e5d92..024b04f3951 100644 --- a/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go +++ b/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go @@ -43,14 +43,14 @@ type GetTransitGatewayRouteTableAssociationsInput struct { // One or more filters. The possible values are: // - // * resource-id - The ID of the + // * resource-id - The ID of the // resource. // - // * resource-type - The resource type. Valid values are vpc | vpn | + // * resource-type - The resource type. Valid values are vpc | vpn | // direct-connect-gateway | peering. // - // * transit-gateway-attachment-id - The ID - // of the attachment. + // * transit-gateway-attachment-id - The ID of + // the attachment. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go b/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go index 8ba89a758f0..a5dd424119d 100644 --- a/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go +++ b/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go @@ -43,14 +43,14 @@ type GetTransitGatewayRouteTablePropagationsInput struct { // One or more filters. The possible values are: // - // * resource-id - The ID of the + // * resource-id - The ID of the // resource. // - // * resource-type - The resource type. Valid values are vpc | vpn | + // * resource-type - The resource type. Valid values are vpc | vpn | // direct-connect-gateway | peering. // - // * transit-gateway-attachment-id - The ID - // of the attachment. + // * transit-gateway-attachment-id - The ID of + // the attachment. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_ImportImage.go b/service/ec2/api_op_ImportImage.go index bafb4b2119c..1ffab92dd4f 100644 --- a/service/ec2/api_op_ImportImage.go +++ b/service/ec2/api_op_ImportImage.go @@ -72,23 +72,23 @@ type ImportImageInput struct { // Encrypted flag must also be set. The CMK identifier may be provided in any of // the following formats: // - // * Key ID + // * Key ID // - // * Key alias. The alias ARN contains - // the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID - // of the CMK owner, the alias namespace, and then the CMK alias. For example, + // * Key alias. The alias ARN contains the + // arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of + // the CMK owner, the alias namespace, and then the CMK alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // * ARN using key ID. - // The ID ARN contains the arn:aws:kms namespace, followed by the Region of the - // CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK - // ID. For example, + // * ARN using key ID. The + // ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, + // the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For + // example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // - // - // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the Region of the CMK, the AWS account ID of the CMK owner, the - // alias namespace, and then the CMK alias. For example, + // * + // ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed + // by the Region of the CMK, the AWS account ID of the CMK owner, the alias + // namespace, and then the CMK alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // AWS parses KmsKeyId diff --git a/service/ec2/api_op_ImportSnapshot.go b/service/ec2/api_op_ImportSnapshot.go index e23b6d99b15..8cd2118ea90 100644 --- a/service/ec2/api_op_ImportSnapshot.go +++ b/service/ec2/api_op_ImportSnapshot.go @@ -62,23 +62,23 @@ type ImportSnapshotInput struct { // Encrypted flag must also be set. The CMK identifier may be provided in any of // the following formats: // - // * Key ID + // * Key ID // - // * Key alias. The alias ARN contains - // the arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID - // of the CMK owner, the alias namespace, and then the CMK alias. For example, + // * Key alias. The alias ARN contains the + // arn:aws:kms namespace, followed by the Region of the CMK, the AWS account ID of + // the CMK owner, the alias namespace, and then the CMK alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // * ARN using key ID. - // The ID ARN contains the arn:aws:kms namespace, followed by the Region of the - // CMK, the AWS account ID of the CMK owner, the key namespace, and then the CMK - // ID. For example, + // * ARN using key ID. The + // ID ARN contains the arn:aws:kms namespace, followed by the Region of the CMK, + // the AWS account ID of the CMK owner, the key namespace, and then the CMK ID. For + // example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // - // - // * ARN using key alias. The alias ARN contains the arn:aws:kms namespace, - // followed by the Region of the CMK, the AWS account ID of the CMK owner, the - // alias namespace, and then the CMK alias. For example, + // * + // ARN using key alias. The alias ARN contains the arn:aws:kms namespace, followed + // by the Region of the CMK, the AWS account ID of the CMK owner, the alias + // namespace, and then the CMK alias. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // AWS parses KmsKeyId diff --git a/service/ec2/api_op_ModifyCapacityReservation.go b/service/ec2/api_op_ModifyCapacityReservation.go index 03559d6a8ef..424a582fda0 100644 --- a/service/ec2/api_op_ModifyCapacityReservation.go +++ b/service/ec2/api_op_ModifyCapacityReservation.go @@ -59,13 +59,13 @@ type ModifyCapacityReservationInput struct { // Indicates the way in which the Capacity Reservation ends. A Capacity Reservation // can have one of the following end types: // - // * unlimited - The Capacity - // Reservation remains active until you explicitly cancel it. Do not provide an - // EndDate value if EndDateType is unlimited. + // * unlimited - The Capacity Reservation + // remains active until you explicitly cancel it. Do not provide an EndDate value + // if EndDateType is unlimited. // - // * limited - The Capacity - // Reservation expires automatically at a specified date and time. You must provide - // an EndDate value if EndDateType is limited. + // * limited - The Capacity Reservation expires + // automatically at a specified date and time. You must provide an EndDate value if + // EndDateType is limited. EndDateType types.EndDateType // The number of instances for which to reserve capacity. diff --git a/service/ec2/api_op_ModifyClientVpnEndpoint.go b/service/ec2/api_op_ModifyClientVpnEndpoint.go index 6dece56c453..c084ea3bf8d 100644 --- a/service/ec2/api_op_ModifyClientVpnEndpoint.go +++ b/service/ec2/api_op_ModifyClientVpnEndpoint.go @@ -39,16 +39,16 @@ type ModifyClientVpnEndpointInput struct { // connection logging, data about client connections is sent to a Cloudwatch Logs // log stream. The following information is logged: // - // * Client connection + // * Client connection // requests // - // * Client connection results (successful and unsuccessful) + // * Client connection results (successful and unsuccessful) // - // * - // Reasons for unsuccessful client connection requests + // * Reasons + // for unsuccessful client connection requests // - // * Client connection - // termination time + // * Client connection termination + // time ConnectionLogOptions *types.ConnectionLogOptions // A brief description of the Client VPN endpoint. diff --git a/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go b/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go index 7d394338621..25420bb2057 100644 --- a/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go +++ b/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go @@ -42,17 +42,17 @@ type ModifyEbsDefaultKmsKeyIdInput struct { // AWS managed CMK for EBS is used. If KmsKeyId is specified, the encrypted state // must be true. You can specify the CMK using any of the following: // - // * Key ID. - // For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For + // example, key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Key alias. For - // example, alias/ExampleAlias. + // * Key alias. For example, + // alias/ExampleAlias. // - // * Key ARN. For example, + // * Key ARN. For example, // arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. // - // - // * Alias ARN. For example, + // * + // Alias ARN. For example, // arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // // AWS authenticates the diff --git a/service/ec2/api_op_ModifyInstancePlacement.go b/service/ec2/api_op_ModifyInstancePlacement.go index fde5e09a075..d049c979db4 100644 --- a/service/ec2/api_op_ModifyInstancePlacement.go +++ b/service/ec2/api_op_ModifyInstancePlacement.go @@ -14,21 +14,21 @@ import ( // Modifies the placement attributes for a specified instance. You can do the // following: // -// * Modify the affinity between an instance and a Dedicated Host +// * Modify the affinity between an instance and a Dedicated Host // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/dedicated-hosts-overview.html). // When affinity is set to host and the instance is not associated with a specific // Dedicated Host, the next time the instance is launched, it is automatically // associated with the host on which it lands. If the instance is restarted or // rebooted, this relationship persists. // -// * Change the Dedicated Host with -// which an instance is associated. +// * Change the Dedicated Host with which an +// instance is associated. // -// * Change the instance tenancy of an -// instance from host to dedicated, or from dedicated to host. +// * Change the instance tenancy of an instance from host +// to dedicated, or from dedicated to host. // -// * Move an -// instance to or from a placement group +// * Move an instance to or from a +// placement group // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html). // // At diff --git a/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go b/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go index 424750e16c5..d825c7c62e2 100644 --- a/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go +++ b/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go @@ -14,15 +14,15 @@ import ( // Modifies the VPC peering connection options on one side of a VPC peering // connection. You can do the following: // -// * Enable/disable communication over -// the peering connection between an EC2-Classic instance that's linked to your VPC +// * Enable/disable communication over the +// peering connection between an EC2-Classic instance that's linked to your VPC // (using ClassicLink) and instances in the peer VPC. // -// * Enable/disable +// * Enable/disable // communication over the peering connection between instances in your VPC and an // EC2-Classic instance that's linked to the peer VPC. // -// * Enable/disable the +// * Enable/disable the // ability to resolve public DNS hostnames to private IP addresses when queried // from instances in the peer VPC. // diff --git a/service/ec2/api_op_ModifyVpnConnection.go b/service/ec2/api_op_ModifyVpnConnection.go index eaeaad680a6..7064660e1b0 100644 --- a/service/ec2/api_op_ModifyVpnConnection.go +++ b/service/ec2/api_op_ModifyVpnConnection.go @@ -15,20 +15,20 @@ import ( // connection. To modify the target gateway, the following migration options are // available: // -// * An existing virtual private gateway to a new virtual private +// * An existing virtual private gateway to a new virtual private // gateway // -// * An existing virtual private gateway to a transit gateway +// * An existing virtual private gateway to a transit gateway // -// * -// An existing transit gateway to a new transit gateway +// * An +// existing transit gateway to a new transit gateway // -// * An existing transit -// gateway to a virtual private gateway +// * An existing transit gateway +// to a virtual private gateway // -// Before you perform the migration to the -// new gateway, you must configure the new gateway. Use CreateVpnGateway to create -// a virtual private gateway, or CreateTransitGateway to create a transit gateway. +// Before you perform the migration to the new +// gateway, you must configure the new gateway. Use CreateVpnGateway to create a +// virtual private gateway, or CreateTransitGateway to create a transit gateway. // This step is required when you migrate from a virtual private gateway with // static routes to a transit gateway. You must delete the static routes before you // migrate to the new gateway. Keep a copy of the static route before you delete diff --git a/service/ec2/api_op_RegisterImage.go b/service/ec2/api_op_RegisterImage.go index f43ba6a2d2e..50fc7c6435f 100644 --- a/service/ec2/api_op_RegisterImage.go +++ b/service/ec2/api_op_RegisterImage.go @@ -31,18 +31,18 @@ import ( // registering the AMI, do the following to preserve the billing product code // association: // -// * Launch an instance from an existing AMI with that billing +// * Launch an instance from an existing AMI with that billing // product code. // -// * Customize the instance. +// * Customize the instance. // -// * Create an AMI from the -// instance using CreateImage. +// * Create an AMI from the instance +// using CreateImage. // -// If you purchase a Reserved Instance to apply to an -// On-Demand Instance that was launched from an AMI with a billing product code, -// make sure that the Reserved Instance has the matching billing product code. If -// you purchase a Reserved Instance without the matching billing product code, the +// If you purchase a Reserved Instance to apply to an On-Demand +// Instance that was launched from an AMI with a billing product code, make sure +// that the Reserved Instance has the matching billing product code. If you +// purchase a Reserved Instance without the matching billing product code, the // Reserved Instance will not be applied to the On-Demand Instance. For information // about how to obtain the platform details and billing information of an AMI, see // Obtaining billing information diff --git a/service/ec2/api_op_ReportInstanceStatus.go b/service/ec2/api_op_ReportInstanceStatus.go index bec2ea2ba73..86683db559f 100644 --- a/service/ec2/api_op_ReportInstanceStatus.go +++ b/service/ec2/api_op_ReportInstanceStatus.go @@ -42,34 +42,33 @@ type ReportInstanceStatusInput struct { // The reason codes that describe the health state of your instance. // - // * + // * // instance-stuck-in-state: My instance is stuck in a state. // - // * unresponsive: - // My instance is unresponsive. + // * unresponsive: My + // instance is unresponsive. // - // * not-accepting-credentials: My instance is - // not accepting my credentials. + // * not-accepting-credentials: My instance is not + // accepting my credentials. // - // * password-not-available: A password is not - // available for my instance. + // * password-not-available: A password is not available + // for my instance. // - // * performance-network: My instance is - // experiencing performance problems that I believe are network related. + // * performance-network: My instance is experiencing performance + // problems that I believe are network related. // - // * - // performance-instance-store: My instance is experiencing performance problems - // that I believe are related to the instance stores. + // * performance-instance-store: My + // instance is experiencing performance problems that I believe are related to the + // instance stores. // - // * - // performance-ebs-volume: My instance is experiencing performance problems that I - // believe are related to an EBS volume. + // * performance-ebs-volume: My instance is experiencing + // performance problems that I believe are related to an EBS volume. // - // * performance-other: My instance is - // experiencing performance problems. + // * + // performance-other: My instance is experiencing performance problems. // - // * other: [explain using the description - // parameter] + // * other: + // [explain using the description parameter] // // This member is required. ReasonCodes []types.ReportInstanceReasonCodes diff --git a/service/ec2/api_op_RequestSpotInstances.go b/service/ec2/api_op_RequestSpotInstances.go index 6364c322f14..7a705aca996 100644 --- a/service/ec2/api_op_RequestSpotInstances.go +++ b/service/ec2/api_op_RequestSpotInstances.go @@ -114,12 +114,12 @@ type RequestSpotInstancesInput struct { // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). // - // * For a + // * For a // persistent request, the request remains active until the ValidUntil date and // time is reached. Otherwise, the request remains active until you cancel it. // - // - // * For a one-time request, the request remains active until all instances launch, + // * + // For a one-time request, the request remains active until all instances launch, // the request is canceled, or the ValidUntil date and time is reached. By default, // the request is valid for 7 days from the date the request was created. ValidUntil *time.Time diff --git a/service/ec2/api_op_RunInstances.go b/service/ec2/api_op_RunInstances.go index 2a6e1f21fba..ece39896d99 100644 --- a/service/ec2/api_op_RunInstances.go +++ b/service/ec2/api_op_RunInstances.go @@ -16,35 +16,35 @@ import ( // permissions. You can specify a number of options, or leave the default options. // The following rules apply: // -// * [EC2-VPC] If you don't specify a subnet ID, we +// * [EC2-VPC] If you don't specify a subnet ID, we // choose a default subnet from your default VPC for you. If you don't have a // default VPC, you must specify a subnet ID in the request. // -// * [EC2-Classic] -// If don't specify an Availability Zone, we choose one for you. +// * [EC2-Classic] If +// don't specify an Availability Zone, we choose one for you. // -// * Some -// instance types must be launched into a VPC. If you do not have a default VPC, or -// if you do not specify a subnet ID, the request fails. For more information, see +// * Some instance +// types must be launched into a VPC. If you do not have a default VPC, or if you +// do not specify a subnet ID, the request fails. For more information, see // Instance types available only in a VPC // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). // -// -// * [EC2-VPC] All instances have a network interface with a primary private IPv4 +// * +// [EC2-VPC] All instances have a network interface with a primary private IPv4 // address. If you don't specify this address, we choose one from the IPv4 range of // your subnet. // -// * Not all instance types support IPv6 addresses. For more +// * Not all instance types support IPv6 addresses. For more // information, see Instance types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // -// -// * If you don't specify a security group ID, we use the default security group. -// For more information, see Security groups +// * If +// you don't specify a security group ID, we use the default security group. For +// more information, see Security groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). // -// -// * If any of the AMIs have a product code attached for which the user has not +// * +// If any of the AMIs have a product code attached for which the user has not // subscribed, the request fails. // // You can create a launch template diff --git a/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go b/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go index 8b1ccc8e248..353a40df0c8 100644 --- a/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go +++ b/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go @@ -38,35 +38,35 @@ type SearchTransitGatewayMulticastGroupsInput struct { // One or more filters. The possible values are: // - // * group-ip-address - The IP + // * group-ip-address - The IP // address of the transit gateway multicast group. // - // * is-group-member - The + // * is-group-member - The // resource is a group member. Valid values are true | false. // - // * - // is-group-source - The resource is a group source. Valid values are true | - // false. + // * is-group-source - + // The resource is a group source. Valid values are true | false. // - // * member-type - The member type. Valid values are igmp | static. + // * member-type - + // The member type. Valid values are igmp | static. // + // * resource-id - The ID of the + // resource. // - // * resource-id - The ID of the resource. + // * resource-type - The type of resource. Valid values are vpc | vpn | + // direct-connect-gateway | tgw-peering. // - // * resource-type - The type of - // resource. Valid values are vpc | vpn | direct-connect-gateway | tgw-peering. + // * source-type - The source type. Valid + // values are igmp | static. // + // * state - The state of the subnet association. Valid + // values are associated | associated | disassociated | disassociating. // - // * source-type - The source type. Valid values are igmp | static. + // * + // subnet-id - The ID of the subnet. // - // * state - - // The state of the subnet association. Valid values are associated | associated | - // disassociated | disassociating. - // - // * subnet-id - The ID of the subnet. - // - // * - // transit-gateway-attachment-id - The id of the transit gateway attachment. + // * transit-gateway-attachment-id - The id of + // the transit gateway attachment. Filters []*types.Filter // The maximum number of results to return with a single call. To retrieve the diff --git a/service/ec2/api_op_SearchTransitGatewayRoutes.go b/service/ec2/api_op_SearchTransitGatewayRoutes.go index adeacbfa764..7dd95f823fa 100644 --- a/service/ec2/api_op_SearchTransitGatewayRoutes.go +++ b/service/ec2/api_op_SearchTransitGatewayRoutes.go @@ -31,39 +31,38 @@ type SearchTransitGatewayRoutesInput struct { // One or more filters. The possible values are: // - // * + // * // attachment.transit-gateway-attachment-id- The id of the transit gateway // attachment. // - // * attachment.resource-id - The resource id of the transit - // gateway attachment. - // - // * attachment.resource-type - The attachment resource - // type. Valid values are vpc | vpn | direct-connect-gateway | peering. + // * attachment.resource-id - The resource id of the transit gateway + // attachment. // - // * - // prefix-list-id - The ID of the prefix list. + // * attachment.resource-type - The attachment resource type. Valid + // values are vpc | vpn | direct-connect-gateway | peering. // - // * route-search.exact-match - - // The exact match of the specified filter. + // * prefix-list-id - The + // ID of the prefix list. // - // * - // route-search.longest-prefix-match - The longest prefix that matches the route. + // * route-search.exact-match - The exact match of the + // specified filter. // + // * route-search.longest-prefix-match - The longest prefix that + // matches the route. // - // * route-search.subnet-of-match - The routes with a subnet that match the - // specified CIDR filter. + // * route-search.subnet-of-match - The routes with a subnet + // that match the specified CIDR filter. // - // * route-search.supernet-of-match - The routes with a - // CIDR that encompass the CIDR filter. For example, if you have 10.0.1.0/29 and - // 10.0.1.0/31 routes in your route table and you specify supernet-of-match as - // 10.0.1.0/30, then the result returns 10.0.1.0/29. + // * route-search.supernet-of-match - The + // routes with a CIDR that encompass the CIDR filter. For example, if you have + // 10.0.1.0/29 and 10.0.1.0/31 routes in your route table and you specify + // supernet-of-match as 10.0.1.0/30, then the result returns 10.0.1.0/29. // - // * state - The state of - // the route (active | blackhole). + // * state + // - The state of the route (active | blackhole). // - // * type - The type of route (propagated | - // static). + // * type - The type of route + // (propagated | static). // // This member is required. Filters []*types.Filter diff --git a/service/ec2/doc.go b/service/ec2/doc.go index f27a4080e17..340fc34eff8 100644 --- a/service/ec2/doc.go +++ b/service/ec2/doc.go @@ -8,19 +8,19 @@ // eliminates the need to invest in hardware up front, so you can develop and // deploy applications faster. To learn more, see the following resources: // -// * +// * // Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon EC2 // documentation (http://aws.amazon.com/documentation/ec2) // -// * Amazon EBS: -// Amazon EBS product page (http://aws.amazon.com/ebs), Amazon EBS documentation +// * Amazon EBS: Amazon +// EBS product page (http://aws.amazon.com/ebs), Amazon EBS documentation // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) // -// * -// Amazon VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon VPC +// * Amazon +// VPC: Amazon VPC product page (http://aws.amazon.com/vpc), Amazon VPC // documentation (http://aws.amazon.com/documentation/vpc) // -// * AWS VPN: AWS VPN +// * AWS VPN: AWS VPN // product page (http://aws.amazon.com/vpn), AWS VPN documentation // (http://aws.amazon.com/documentation/vpn) package ec2 diff --git a/service/ec2/types/enums.go b/service/ec2/types/enums.go index 7114fe9a5e6..5f8c7512afd 100644 --- a/service/ec2/types/enums.go +++ b/service/ec2/types/enums.go @@ -24,10 +24,10 @@ type ActivityStatus string // Enum values for ActivityStatus const ( - ActivityStatusError ActivityStatus = "error" - ActivityStatusPending_fulfillment ActivityStatus = "pending_fulfillment" - ActivityStatusPending_termination ActivityStatus = "pending_termination" - ActivityStatusFulfilled ActivityStatus = "fulfilled" + ActivityStatusError ActivityStatus = "error" + ActivityStatusPendingFulfillment ActivityStatus = "pending_fulfillment" + ActivityStatusPendingTermination ActivityStatus = "pending_termination" + ActivityStatusFulfilled ActivityStatus = "fulfilled" ) // Values returns all known values for ActivityStatus. Note that this can be @@ -90,9 +90,9 @@ type AllocationStrategy string // Enum values for AllocationStrategy const ( - AllocationStrategyLowest_price AllocationStrategy = "lowestPrice" - AllocationStrategyDiversified AllocationStrategy = "diversified" - AllocationStrategyCapacity_optimized AllocationStrategy = "capacityOptimized" + AllocationStrategyLowestPrice AllocationStrategy = "lowestPrice" + AllocationStrategyDiversified AllocationStrategy = "diversified" + AllocationStrategyCapacityOptimized AllocationStrategy = "capacityOptimized" ) // Values returns all known values for AllocationStrategy. Note that this can be @@ -128,9 +128,9 @@ type ArchitectureType string // Enum values for ArchitectureType const ( - ArchitectureTypeI386 ArchitectureType = "i386" - ArchitectureTypeX86_64 ArchitectureType = "x86_64" - ArchitectureTypeArm64 ArchitectureType = "arm64" + ArchitectureTypeI386 ArchitectureType = "i386" + ArchitectureTypeX8664 ArchitectureType = "x86_64" + ArchitectureTypeArm64 ArchitectureType = "arm64" ) // Values returns all known values for ArchitectureType. Note that this can be @@ -148,9 +148,9 @@ type ArchitectureValues string // Enum values for ArchitectureValues const ( - ArchitectureValuesI386 ArchitectureValues = "i386" - ArchitectureValuesX86_64 ArchitectureValues = "x86_64" - ArchitectureValuesArm64 ArchitectureValues = "arm64" + ArchitectureValuesI386 ArchitectureValues = "i386" + ArchitectureValuesX8664 ArchitectureValues = "x86_64" + ArchitectureValuesArm64 ArchitectureValues = "arm64" ) // Values returns all known values for ArchitectureValues. Note that this can be @@ -309,13 +309,13 @@ type BatchState string // Enum values for BatchState const ( - BatchStateSubmitted BatchState = "submitted" - BatchStateActive BatchState = "active" - BatchStateCancelled BatchState = "cancelled" - BatchStateFailed BatchState = "failed" - BatchStateCancelled_running BatchState = "cancelled_running" - BatchStateCancelled_terminating_instances BatchState = "cancelled_terminating" - BatchStateModifying BatchState = "modifying" + BatchStateSubmitted BatchState = "submitted" + BatchStateActive BatchState = "active" + BatchStateCancelled BatchState = "cancelled" + BatchStateFailed BatchState = "failed" + BatchStateCancelledRunning BatchState = "cancelled_running" + BatchStateCancelledTerminatingInstances BatchState = "cancelled_terminating" + BatchStateModifying BatchState = "modifying" ) // Values returns all known values for BatchState. Note that this can be expanded @@ -395,10 +395,10 @@ type CancelBatchErrorCode string // Enum values for CancelBatchErrorCode const ( - CancelBatchErrorCodeFleet_request_id_does_not_exist CancelBatchErrorCode = "fleetRequestIdDoesNotExist" - CancelBatchErrorCodeFleet_request_id_malformed CancelBatchErrorCode = "fleetRequestIdMalformed" - CancelBatchErrorCodeFleet_request_not_in_cancellable_state CancelBatchErrorCode = "fleetRequestNotInCancellableState" - CancelBatchErrorCodeUnexpected_error CancelBatchErrorCode = "unexpectedError" + CancelBatchErrorCodeFleetRequestIdDoesNotExist CancelBatchErrorCode = "fleetRequestIdDoesNotExist" + CancelBatchErrorCodeFleetRequestIdMalformed CancelBatchErrorCode = "fleetRequestIdMalformed" + CancelBatchErrorCodeFleetRequestNotInCancellableState CancelBatchErrorCode = "fleetRequestNotInCancellableState" + CancelBatchErrorCodeUnexpectedError CancelBatchErrorCode = "unexpectedError" ) // Values returns all known values for CancelBatchErrorCode. Note that this can be @@ -442,17 +442,17 @@ type CapacityReservationInstancePlatform string // Enum values for CapacityReservationInstancePlatform const ( - CapacityReservationInstancePlatformLinux_unix CapacityReservationInstancePlatform = "Linux/UNIX" - CapacityReservationInstancePlatformRed_hat_enterprise_linux CapacityReservationInstancePlatform = "Red Hat Enterprise Linux" - CapacityReservationInstancePlatformSuse_linux CapacityReservationInstancePlatform = "SUSE Linux" - CapacityReservationInstancePlatformWindows CapacityReservationInstancePlatform = "Windows" - CapacityReservationInstancePlatformWindows_with_sql_server CapacityReservationInstancePlatform = "Windows with SQL Server" - CapacityReservationInstancePlatformWindows_with_sql_server_enterprise CapacityReservationInstancePlatform = "Windows with SQL Server Enterprise" - CapacityReservationInstancePlatformWindows_with_sql_server_standard CapacityReservationInstancePlatform = "Windows with SQL Server Standard" - CapacityReservationInstancePlatformWindows_with_sql_server_web CapacityReservationInstancePlatform = "Windows with SQL Server Web" - CapacityReservationInstancePlatformLinux_with_sql_server_standard CapacityReservationInstancePlatform = "Linux with SQL Server Standard" - CapacityReservationInstancePlatformLinux_with_sql_server_web CapacityReservationInstancePlatform = "Linux with SQL Server Web" - CapacityReservationInstancePlatformLinux_with_sql_server_enterprise CapacityReservationInstancePlatform = "Linux with SQL Server Enterprise" + CapacityReservationInstancePlatformLinuxUnix CapacityReservationInstancePlatform = "Linux/UNIX" + CapacityReservationInstancePlatformRedHatEnterpriseLinux CapacityReservationInstancePlatform = "Red Hat Enterprise Linux" + CapacityReservationInstancePlatformSuseLinux CapacityReservationInstancePlatform = "SUSE Linux" + CapacityReservationInstancePlatformWindows CapacityReservationInstancePlatform = "Windows" + CapacityReservationInstancePlatformWindowsWithSqlServer CapacityReservationInstancePlatform = "Windows with SQL Server" + CapacityReservationInstancePlatformWindowsWithSqlServerEnterprise CapacityReservationInstancePlatform = "Windows with SQL Server Enterprise" + CapacityReservationInstancePlatformWindowsWithSqlServerStandard CapacityReservationInstancePlatform = "Windows with SQL Server Standard" + CapacityReservationInstancePlatformWindowsWithSqlServerWeb CapacityReservationInstancePlatform = "Windows with SQL Server Web" + CapacityReservationInstancePlatformLinuxWithSqlServerStandard CapacityReservationInstancePlatform = "Linux with SQL Server Standard" + CapacityReservationInstancePlatformLinuxWithSqlServerWeb CapacityReservationInstancePlatform = "Linux with SQL Server Web" + CapacityReservationInstancePlatformLinuxWithSqlServerEnterprise CapacityReservationInstancePlatform = "Linux with SQL Server Enterprise" ) // Values returns all known values for CapacityReservationInstancePlatform. Note @@ -851,8 +851,8 @@ type DefaultTargetCapacityType string // Enum values for DefaultTargetCapacityType const ( - DefaultTargetCapacityTypeSpot DefaultTargetCapacityType = "spot" - DefaultTargetCapacityTypeOn_demand DefaultTargetCapacityType = "on-demand" + DefaultTargetCapacityTypeSpot DefaultTargetCapacityType = "spot" + DefaultTargetCapacityTypeOnDemand DefaultTargetCapacityType = "on-demand" ) // Values returns all known values for DefaultTargetCapacityType. Note that this @@ -869,10 +869,10 @@ type DeleteFleetErrorCode string // Enum values for DeleteFleetErrorCode const ( - DeleteFleetErrorCodeFleet_id_does_not_exist DeleteFleetErrorCode = "fleetIdDoesNotExist" - DeleteFleetErrorCodeFleet_id_malformed DeleteFleetErrorCode = "fleetIdMalformed" - DeleteFleetErrorCodeFleet_not_in_deletable_state DeleteFleetErrorCode = "fleetNotInDeletableState" - DeleteFleetErrorCodeUnexpected_error DeleteFleetErrorCode = "unexpectedError" + DeleteFleetErrorCodeFleetIdDoesNotExist DeleteFleetErrorCode = "fleetIdDoesNotExist" + DeleteFleetErrorCodeFleetIdMalformed DeleteFleetErrorCode = "fleetIdMalformed" + DeleteFleetErrorCodeFleetNotInDeletableState DeleteFleetErrorCode = "fleetNotInDeletableState" + DeleteFleetErrorCodeUnexpectedError DeleteFleetErrorCode = "unexpectedError" ) // Values returns all known values for DeleteFleetErrorCode. Note that this can be @@ -891,9 +891,9 @@ type DeleteQueuedReservedInstancesErrorCode string // Enum values for DeleteQueuedReservedInstancesErrorCode const ( - DeleteQueuedReservedInstancesErrorCodeReserved_instances_id_invalid DeleteQueuedReservedInstancesErrorCode = "reserved-instances-id-invalid" - DeleteQueuedReservedInstancesErrorCodeReserved_instances_not_in_queued_state DeleteQueuedReservedInstancesErrorCode = "reserved-instances-not-in-queued-state" - DeleteQueuedReservedInstancesErrorCodeUnexpected_error DeleteQueuedReservedInstancesErrorCode = "unexpected-error" + DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid DeleteQueuedReservedInstancesErrorCode = "reserved-instances-id-invalid" + DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState DeleteQueuedReservedInstancesErrorCode = "reserved-instances-not-in-queued-state" + DeleteQueuedReservedInstancesErrorCodeUnexpectedError DeleteQueuedReservedInstancesErrorCode = "unexpected-error" ) // Values returns all known values for DeleteQueuedReservedInstancesErrorCode. Note @@ -1198,10 +1198,10 @@ type EventType string // Enum values for EventType const ( - EventTypeInstance_change EventType = "instanceChange" - EventTypeBatch_change EventType = "fleetRequestChange" - EventTypeError EventType = "error" - EventTypeInformation EventType = "information" + EventTypeInstanceChange EventType = "instanceChange" + EventTypeBatchChange EventType = "fleetRequestChange" + EventTypeError EventType = "error" + EventTypeInformation EventType = "information" ) // Values returns all known values for EventType. Note that this can be expanded in @@ -1220,8 +1220,8 @@ type ExcessCapacityTerminationPolicy string // Enum values for ExcessCapacityTerminationPolicy const ( - ExcessCapacityTerminationPolicyNo_termination ExcessCapacityTerminationPolicy = "noTermination" - ExcessCapacityTerminationPolicyDefault ExcessCapacityTerminationPolicy = "default" + ExcessCapacityTerminationPolicyNoTermination ExcessCapacityTerminationPolicy = "noTermination" + ExcessCapacityTerminationPolicyDefault ExcessCapacityTerminationPolicy = "default" ) // Values returns all known values for ExcessCapacityTerminationPolicy. Note that @@ -1305,10 +1305,10 @@ type FleetActivityStatus string // Enum values for FleetActivityStatus const ( - FleetActivityStatusError FleetActivityStatus = "error" - FleetActivityStatusPending_fulfillment FleetActivityStatus = "pending_fulfillment" - FleetActivityStatusPending_termination FleetActivityStatus = "pending_termination" - FleetActivityStatusFulfilled FleetActivityStatus = "fulfilled" + FleetActivityStatusError FleetActivityStatus = "error" + FleetActivityStatusPendingFulfillment FleetActivityStatus = "pending_fulfillment" + FleetActivityStatusPendingTermination FleetActivityStatus = "pending_termination" + FleetActivityStatusFulfilled FleetActivityStatus = "fulfilled" ) // Values returns all known values for FleetActivityStatus. Note that this can be @@ -1327,7 +1327,7 @@ type FleetCapacityReservationUsageStrategy string // Enum values for FleetCapacityReservationUsageStrategy const ( - FleetCapacityReservationUsageStrategyUse_capacity_reservations_first FleetCapacityReservationUsageStrategy = "use-capacity-reservations-first" + FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst FleetCapacityReservationUsageStrategy = "use-capacity-reservations-first" ) // Values returns all known values for FleetCapacityReservationUsageStrategy. Note @@ -1344,9 +1344,9 @@ type FleetEventType string // Enum values for FleetEventType const ( - FleetEventTypeInstance_change FleetEventType = "instance-change" - FleetEventTypeFleet_change FleetEventType = "fleet-change" - FleetEventTypeService_error FleetEventType = "service-error" + FleetEventTypeInstanceChange FleetEventType = "instance-change" + FleetEventTypeFleetChange FleetEventType = "fleet-change" + FleetEventTypeServiceError FleetEventType = "service-error" ) // Values returns all known values for FleetEventType. Note that this can be @@ -1364,8 +1364,8 @@ type FleetExcessCapacityTerminationPolicy string // Enum values for FleetExcessCapacityTerminationPolicy const ( - FleetExcessCapacityTerminationPolicyNo_termination FleetExcessCapacityTerminationPolicy = "no-termination" - FleetExcessCapacityTerminationPolicyTermination FleetExcessCapacityTerminationPolicy = "termination" + FleetExcessCapacityTerminationPolicyNoTermination FleetExcessCapacityTerminationPolicy = "no-termination" + FleetExcessCapacityTerminationPolicyTermination FleetExcessCapacityTerminationPolicy = "termination" ) // Values returns all known values for FleetExcessCapacityTerminationPolicy. Note @@ -1383,8 +1383,8 @@ type FleetOnDemandAllocationStrategy string // Enum values for FleetOnDemandAllocationStrategy const ( - FleetOnDemandAllocationStrategyLowest_price FleetOnDemandAllocationStrategy = "lowest-price" - FleetOnDemandAllocationStrategyPrioritized FleetOnDemandAllocationStrategy = "prioritized" + FleetOnDemandAllocationStrategyLowestPrice FleetOnDemandAllocationStrategy = "lowest-price" + FleetOnDemandAllocationStrategyPrioritized FleetOnDemandAllocationStrategy = "prioritized" ) // Values returns all known values for FleetOnDemandAllocationStrategy. Note that @@ -1402,13 +1402,13 @@ type FleetStateCode string // Enum values for FleetStateCode const ( - FleetStateCodeSubmitted FleetStateCode = "submitted" - FleetStateCodeActive FleetStateCode = "active" - FleetStateCodeDeleted FleetStateCode = "deleted" - FleetStateCodeFailed FleetStateCode = "failed" - FleetStateCodeDeleted_running FleetStateCode = "deleted_running" - FleetStateCodeDeleted_terminating_instances FleetStateCode = "deleted_terminating" - FleetStateCodeModifying FleetStateCode = "modifying" + FleetStateCodeSubmitted FleetStateCode = "submitted" + FleetStateCodeActive FleetStateCode = "active" + FleetStateCodeDeleted FleetStateCode = "deleted" + FleetStateCodeFailed FleetStateCode = "failed" + FleetStateCodeDeletedRunning FleetStateCode = "deleted_running" + FleetStateCodeDeletedTerminatingInstances FleetStateCode = "deleted_terminating" + FleetStateCodeModifying FleetStateCode = "modifying" ) // Values returns all known values for FleetStateCode. Note that this can be @@ -1743,8 +1743,8 @@ type InstanceHealthStatus string // Enum values for InstanceHealthStatus const ( - InstanceHealthStatusHealthy_status InstanceHealthStatus = "healthy" - InstanceHealthStatusUnhealthy_status InstanceHealthStatus = "unhealthy" + InstanceHealthStatusHealthyStatus InstanceHealthStatus = "healthy" + InstanceHealthStatusUnhealthyStatus InstanceHealthStatus = "unhealthy" ) // Values returns all known values for InstanceHealthStatus. Note that this can be @@ -1781,8 +1781,8 @@ type InstanceLifecycle string // Enum values for InstanceLifecycle const ( - InstanceLifecycleSpot InstanceLifecycle = "spot" - InstanceLifecycleOn_demand InstanceLifecycle = "on-demand" + InstanceLifecycleSpot InstanceLifecycle = "spot" + InstanceLifecycleOnDemand InstanceLifecycle = "on-demand" ) // Values returns all known values for InstanceLifecycle. Note that this can be @@ -2664,12 +2664,12 @@ type LaunchTemplateErrorCode string // Enum values for LaunchTemplateErrorCode const ( - LaunchTemplateErrorCodeLaunch_template_id_does_not_exist LaunchTemplateErrorCode = "launchTemplateIdDoesNotExist" - LaunchTemplateErrorCodeLaunch_template_id_malformed LaunchTemplateErrorCode = "launchTemplateIdMalformed" - LaunchTemplateErrorCodeLaunch_template_name_does_not_exist LaunchTemplateErrorCode = "launchTemplateNameDoesNotExist" - LaunchTemplateErrorCodeLaunch_template_name_malformed LaunchTemplateErrorCode = "launchTemplateNameMalformed" - LaunchTemplateErrorCodeLaunch_template_version_does_not_exist LaunchTemplateErrorCode = "launchTemplateVersionDoesNotExist" - LaunchTemplateErrorCodeUnexpected_error LaunchTemplateErrorCode = "unexpectedError" + LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist LaunchTemplateErrorCode = "launchTemplateIdDoesNotExist" + LaunchTemplateErrorCodeLaunchTemplateIdMalformed LaunchTemplateErrorCode = "launchTemplateIdMalformed" + LaunchTemplateErrorCodeLaunchTemplateNameDoesNotExist LaunchTemplateErrorCode = "launchTemplateNameDoesNotExist" + LaunchTemplateErrorCodeLaunchTemplateNameMalformed LaunchTemplateErrorCode = "launchTemplateNameMalformed" + LaunchTemplateErrorCodeLaunchTemplateVersionDoesNotExist LaunchTemplateErrorCode = "launchTemplateVersionDoesNotExist" + LaunchTemplateErrorCodeUnexpectedError LaunchTemplateErrorCode = "unexpectedError" ) // Values returns all known values for LaunchTemplateErrorCode. Note that this can @@ -3155,8 +3155,8 @@ type OnDemandAllocationStrategy string // Enum values for OnDemandAllocationStrategy const ( - OnDemandAllocationStrategyLowest_price OnDemandAllocationStrategy = "lowestPrice" - OnDemandAllocationStrategyPrioritized OnDemandAllocationStrategy = "prioritized" + OnDemandAllocationStrategyLowestPrice OnDemandAllocationStrategy = "lowestPrice" + OnDemandAllocationStrategyPrioritized OnDemandAllocationStrategy = "prioritized" ) // Values returns all known values for OnDemandAllocationStrategy. Note that this @@ -3191,9 +3191,9 @@ type PaymentOption string // Enum values for PaymentOption const ( - PaymentOptionAll_upfront PaymentOption = "AllUpfront" - PaymentOptionPartial_upfront PaymentOption = "PartialUpfront" - PaymentOptionNo_upfront PaymentOption = "NoUpfront" + PaymentOptionAllUpfront PaymentOption = "AllUpfront" + PaymentOptionPartialUpfront PaymentOption = "PartialUpfront" + PaymentOptionNoUpfront PaymentOption = "NoUpfront" ) // Values returns all known values for PaymentOption. Note that this can be @@ -3453,10 +3453,10 @@ type ReservationState string // Enum values for ReservationState const ( - ReservationStatePayment_pending ReservationState = "payment-pending" - ReservationStatePayment_failed ReservationState = "payment-failed" - ReservationStateActive ReservationState = "active" - ReservationStateRetired ReservationState = "retired" + ReservationStatePaymentPending ReservationState = "payment-pending" + ReservationStatePaymentFailed ReservationState = "payment-failed" + ReservationStateActive ReservationState = "active" + ReservationStateRetired ReservationState = "retired" ) // Values returns all known values for ReservationState. Note that this can be @@ -3756,8 +3756,8 @@ type Scope string // Enum values for Scope const ( - ScopeAvailability_zone Scope = "Availability Zone" - ScopeRegional Scope = "Region" + ScopeAvailabilityZone Scope = "Availability Zone" + ScopeRegional Scope = "Region" ) // Values returns all known values for Scope. Note that this can be expanded in the @@ -3872,9 +3872,9 @@ type SpotAllocationStrategy string // Enum values for SpotAllocationStrategy const ( - SpotAllocationStrategyLowest_price SpotAllocationStrategy = "lowest-price" - SpotAllocationStrategyDiversified SpotAllocationStrategy = "diversified" - SpotAllocationStrategyCapacity_optimized SpotAllocationStrategy = "capacity-optimized" + SpotAllocationStrategyLowestPrice SpotAllocationStrategy = "lowest-price" + SpotAllocationStrategyDiversified SpotAllocationStrategy = "diversified" + SpotAllocationStrategyCapacityOptimized SpotAllocationStrategy = "capacity-optimized" ) // Values returns all known values for SpotAllocationStrategy. Note that this can @@ -4610,10 +4610,10 @@ type UnsuccessfulInstanceCreditSpecificationErrorCode string // Enum values for UnsuccessfulInstanceCreditSpecificationErrorCode const ( - UnsuccessfulInstanceCreditSpecificationErrorCodeInvalid_instance_id UnsuccessfulInstanceCreditSpecificationErrorCode = "InvalidInstanceID.Malformed" - UnsuccessfulInstanceCreditSpecificationErrorCodeInstance_not_found UnsuccessfulInstanceCreditSpecificationErrorCode = "InvalidInstanceID.NotFound" - UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrect_instance_state UnsuccessfulInstanceCreditSpecificationErrorCode = "IncorrectInstanceState" - UnsuccessfulInstanceCreditSpecificationErrorCodeInstance_credit_specification_not_supported UnsuccessfulInstanceCreditSpecificationErrorCode = "InstanceCreditSpecification.NotSupported" + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceId UnsuccessfulInstanceCreditSpecificationErrorCode = "InvalidInstanceID.Malformed" + UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceNotFound UnsuccessfulInstanceCreditSpecificationErrorCode = "InvalidInstanceID.NotFound" + UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrectInstanceState UnsuccessfulInstanceCreditSpecificationErrorCode = "IncorrectInstanceState" + UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported UnsuccessfulInstanceCreditSpecificationErrorCode = "InstanceCreditSpecification.NotSupported" ) // Values returns all known values for diff --git a/service/ec2/types/types.go b/service/ec2/types/types.go index 9d452684a19..e0a94ebe817 100644 --- a/service/ec2/types/types.go +++ b/service/ec2/types/types.go @@ -413,11 +413,11 @@ type CapacityReservation struct { // Indicates the way in which the Capacity Reservation ends. A Capacity Reservation // can have one of the following end types: // - // * unlimited - The Capacity - // Reservation remains active until you explicitly cancel it. + // * unlimited - The Capacity Reservation + // remains active until you explicitly cancel it. // - // * limited - The - // Capacity Reservation expires automatically at a specified date and time. + // * limited - The Capacity + // Reservation expires automatically at a specified date and time. EndDateType EndDateType // Indicates whether the Capacity Reservation supports instances with temporary, @@ -427,16 +427,16 @@ type CapacityReservation struct { // Indicates the type of instance launches that the Capacity Reservation accepts. // The options include: // - // * open - The Capacity Reservation accepts all - // instances that have matching attributes (instance type, platform, and - // Availability Zone). Instances that have matching attributes launch into the - // Capacity Reservation automatically without specifying any additional - // parameters. + // * open - The Capacity Reservation accepts all instances + // that have matching attributes (instance type, platform, and Availability Zone). + // Instances that have matching attributes launch into the Capacity Reservation + // automatically without specifying any additional parameters. // - // * targeted - The Capacity Reservation only accepts instances - // that have matching attributes (instance type, platform, and Availability Zone), - // and explicitly target the Capacity Reservation. This ensures that only permitted - // instances can use the reserved capacity. + // * targeted - The + // Capacity Reservation only accepts instances that have matching attributes + // (instance type, platform, and Availability Zone), and explicitly target the + // Capacity Reservation. This ensures that only permitted instances can use the + // reserved capacity. InstanceMatchCriteria InstanceMatchCriteria // The type of operating system for which the Capacity Reservation reserves @@ -452,24 +452,24 @@ type CapacityReservation struct { // The current state of the Capacity Reservation. A Capacity Reservation can be in // one of the following states: // - // * active - The Capacity Reservation is active - // and the capacity is available for your use. + // * active - The Capacity Reservation is active and + // the capacity is available for your use. // - // * expired - The Capacity - // Reservation expired automatically at the date and time specified in your - // request. The reserved capacity is no longer available for your use. + // * expired - The Capacity Reservation + // expired automatically at the date and time specified in your request. The + // reserved capacity is no longer available for your use. // - // * - // cancelled - The Capacity Reservation was manually cancelled. The reserved - // capacity is no longer available for your use. + // * cancelled - The + // Capacity Reservation was manually cancelled. The reserved capacity is no longer + // available for your use. // - // * pending - The Capacity - // Reservation request was successful but the capacity provisioning is still - // pending. + // * pending - The Capacity Reservation request was + // successful but the capacity provisioning is still pending. // - // * failed - The Capacity Reservation request has failed. A request - // might fail due to invalid request parameters, capacity constraints, or instance - // limit constraints. Failed requests are retained for 60 minutes. + // * failed - The + // Capacity Reservation request has failed. A request might fail due to invalid + // request parameters, capacity constraints, or instance limit constraints. Failed + // requests are retained for 60 minutes. State CapacityReservationState // Any tags assigned to the Capacity Reservation. @@ -478,11 +478,11 @@ type CapacityReservation struct { // Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can // have one of the following tenancy settings: // - // * default - The Capacity + // * default - The Capacity // Reservation is created on hardware that is shared with other AWS accounts. // - // - // * dedicated - The Capacity Reservation is created on single-tenant hardware that + // * + // dedicated - The Capacity Reservation is created on single-tenant hardware that // is dedicated to a single AWS account. Tenancy CapacityReservationTenancy @@ -562,12 +562,12 @@ type CapacityReservationSpecification struct { // Indicates the instance's Capacity Reservation preferences. Possible preferences // include: // - // * open - The instance can run in any open Capacity Reservation - // that has matching attributes (instance type, platform, Availability Zone). - // + // * open - The instance can run in any open Capacity Reservation that + // has matching attributes (instance type, platform, Availability Zone). // - // * none - The instance avoids running in a Capacity Reservation even if one is - // available. The instance runs as an On-Demand Instance. + // * none - + // The instance avoids running in a Capacity Reservation even if one is available. + // The instance runs as an On-Demand Instance. CapacityReservationPreference CapacityReservationPreference // Information about the target Capacity Reservation or Capacity Reservation group. @@ -586,12 +586,12 @@ type CapacityReservationSpecificationResponse struct { // Describes the instance's Capacity Reservation preferences. Possible preferences // include: // - // * open - The instance can run in any open Capacity Reservation - // that has matching attributes (instance type, platform, Availability Zone). - // + // * open - The instance can run in any open Capacity Reservation that + // has matching attributes (instance type, platform, Availability Zone). // - // * none - The instance avoids running in a Capacity Reservation even if one is - // available. The instance runs in On-Demand capacity. + // * none - + // The instance avoids running in a Capacity Reservation even if one is available. + // The instance runs in On-Demand capacity. CapacityReservationPreference CapacityReservationPreference // Information about the targeted Capacity Reservation or Capacity Reservation @@ -926,20 +926,20 @@ type ClientVpnEndpointStatus struct { // The state of the Client VPN endpoint. Possible states include: // - // * + // * // pending-associate - The Client VPN endpoint has been created but no target // networks have been associated. The Client VPN endpoint cannot accept // connections. // - // * available - The Client VPN endpoint has been created and a + // * available - The Client VPN endpoint has been created and a // target network has been associated. The Client VPN endpoint can accept // connections. // - // * deleting - The Client VPN endpoint is being deleted. The - // Client VPN endpoint cannot accept connections. + // * deleting - The Client VPN endpoint is being deleted. The Client + // VPN endpoint cannot accept connections. // - // * deleted - The Client VPN - // endpoint has been deleted. The Client VPN endpoint cannot accept connections. + // * deleted - The Client VPN endpoint has + // been deleted. The Client VPN endpoint cannot accept connections. Code ClientVpnEndpointStatusCode // A message about the status of the Client VPN endpoint. @@ -1374,11 +1374,11 @@ type DescribeFastSnapshotRestoreSuccessItem struct { // The reason for the state transition. The possible values are as follows: // - // * + // * // Client.UserInitiated - The state successfully transitioned to enabling or // disabling. // - // * Client.UserInitiated - Lifecycle state transition - The state + // * Client.UserInitiated - Lifecycle state transition - The state // successfully transitioned to optimizing, enabled, or disabled. StateTransitionReason *string } @@ -1536,11 +1536,11 @@ type DisableFastSnapshotRestoreSuccessItem struct { // The reason for the state transition. The possible values are as follows: // - // * + // * // Client.UserInitiated - The state successfully transitioned to enabling or // disabling. // - // * Client.UserInitiated - Lifecycle state transition - The state + // * Client.UserInitiated - Lifecycle state transition - The state // successfully transitioned to optimizing, enabled, or disabled. StateTransitionReason *string } @@ -1973,11 +1973,11 @@ type EnableFastSnapshotRestoreSuccessItem struct { // The reason for the state transition. The possible values are as follows: // - // * + // * // Client.UserInitiated - The state successfully transitioned to enabling or // disabling. // - // * Client.UserInitiated - Lifecycle state transition - The state + // * Client.UserInitiated - Lifecycle state transition - The state // successfully transitioned to optimizing, enabled, or disabled. StateTransitionReason *string } @@ -1990,77 +1990,77 @@ type EventInformation struct { // The event. The following are the error events: // - // * iamFleetRoleInvalid - The - // EC2 Fleet or Spot Fleet did not have the required permissions either to launch - // or terminate an instance. + // * iamFleetRoleInvalid - The EC2 + // Fleet or Spot Fleet did not have the required permissions either to launch or + // terminate an instance. // - // * spotFleetRequestConfigurationInvalid - The + // * spotFleetRequestConfigurationInvalid - The // configuration is not valid. For more information, see the description of the // event. // - // * spotInstanceCountLimitExceeded - You've reached the limit on the + // * spotInstanceCountLimitExceeded - You've reached the limit on the // number of Spot Instances that you can launch. // // The following are the // fleetRequestChange events: // - // * active - The EC2 Fleet or Spot Fleet request - // has been validated and Amazon EC2 is attempting to maintain the target number of + // * active - The EC2 Fleet or Spot Fleet request has + // been validated and Amazon EC2 is attempting to maintain the target number of // running Spot Instances. // - // * cancelled - The EC2 Fleet or Spot Fleet request - // is canceled and has no running Spot Instances. The EC2 Fleet or Spot Fleet will - // be deleted two days after its instances were terminated. + // * cancelled - The EC2 Fleet or Spot Fleet request is + // canceled and has no running Spot Instances. The EC2 Fleet or Spot Fleet will be + // deleted two days after its instances were terminated. // - // * - // cancelled_running - The EC2 Fleet or Spot Fleet request is canceled and does not - // launch additional Spot Instances. Existing Spot Instances continue to run until - // they are interrupted or terminated. + // * cancelled_running - The + // EC2 Fleet or Spot Fleet request is canceled and does not launch additional Spot + // Instances. Existing Spot Instances continue to run until they are interrupted or + // terminated. // - // * cancelled_terminating - The EC2 Fleet - // or Spot Fleet request is canceled and its Spot Instances are terminating. + // * cancelled_terminating - The EC2 Fleet or Spot Fleet request is + // canceled and its Spot Instances are terminating. // - // * - // expired - The EC2 Fleet or Spot Fleet request has expired. A subsequent event - // indicates that the instances were terminated, if the request was created with + // * expired - The EC2 Fleet or + // Spot Fleet request has expired. A subsequent event indicates that the instances + // were terminated, if the request was created with // TerminateInstancesWithExpiration set. // - // * modify_in_progress - A request to + // * modify_in_progress - A request to // modify the EC2 Fleet or Spot Fleet request was accepted and is in progress. // + // * + // modify_successful - The EC2 Fleet or Spot Fleet request was modified. // - // * modify_successful - The EC2 Fleet or Spot Fleet request was modified. - // - // * + // * // price_update - The price for a launch configuration was adjusted because it was // too high. This change is permanent. // - // * submitted - The EC2 Fleet or Spot - // Fleet request is being evaluated and Amazon EC2 is preparing to launch the - // target number of Spot Instances. + // * submitted - The EC2 Fleet or Spot Fleet + // request is being evaluated and Amazon EC2 is preparing to launch the target + // number of Spot Instances. // // The following are the instanceChange events: // + // * + // launched - A request was fulfilled and a new instance was launched. // - // * launched - A request was fulfilled and a new instance was launched. - // - // * + // * // terminated - An instance was terminated by the user. // // The following are the // Information events: // - // * launchSpecTemporarilyBlacklisted - The configuration - // is not valid and several attempts to launch instances have failed. For more + // * launchSpecTemporarilyBlacklisted - The configuration is + // not valid and several attempts to launch instances have failed. For more // information, see the description of the event. // - // * launchSpecUnusable - The - // price in a launch specification is not valid because it is below the Spot price - // or the Spot price is above the On-Demand price. + // * launchSpecUnusable - The price + // in a launch specification is not valid because it is below the Spot price or the + // Spot price is above the On-Demand price. // - // * fleetProgressHalted - The - // price in every launch specification is not valid. A launch specification might - // become valid if the Spot price changes. + // * fleetProgressHalted - The price in + // every launch specification is not valid. A launch specification might become + // valid if the Spot price changes. EventSubType *string // The ID of the instance. This information is available only for instanceChange @@ -2213,28 +2213,28 @@ type FederatedAuthenticationRequest struct { // supported by a describe operation are documented with the describe operation. // For example: // -// * DescribeAvailabilityZones +// * DescribeAvailabilityZones // -// * DescribeImages +// * DescribeImages // -// * +// * // DescribeInstances // -// * DescribeKeyPairs +// * DescribeKeyPairs // -// * DescribeSecurityGroups +// * DescribeSecurityGroups // -// * +// * // DescribeSnapshots // -// * DescribeSubnets +// * DescribeSubnets // -// * DescribeTags +// * DescribeTags // -// * -// DescribeVolumes +// * DescribeVolumes // -// * DescribeVpcs +// * +// DescribeVpcs type Filter struct { // The name of the filter. Filter names are case-sensitive. @@ -2626,16 +2626,16 @@ type FpgaImageState struct { // The state. The following are the possible values: // - // * pending - AFI bitstream + // * pending - AFI bitstream // generation is in progress. // - // * available - The AFI is available for use. - // + // * available - The AFI is available for use. // - // * failed - AFI bitstream generation failed. + // * + // failed - AFI bitstream generation failed. // - // * unavailable - The AFI is no - // longer available for use. + // * unavailable - The AFI is no longer + // available for use. Code FpgaImageStateCode // If the state is failed, this is the error message. @@ -2729,15 +2729,15 @@ type HistoryRecord struct { // The event type. // - // * error - An error with the Spot Fleet request. + // * error - An error with the Spot Fleet request. // - // * + // * // fleetRequestChange - A change in the status or configuration of the Spot Fleet // request. // - // * instanceChange - An instance was launched or terminated. + // * instanceChange - An instance was launched or terminated. // - // * + // * // Information - An informational event. EventType EventType @@ -3910,22 +3910,22 @@ type InstanceState struct { // decimal values between 0 and 255. The valid values for instance-state-code will // all be in the range of the low byte and they are: // - // * 0 : pending + // * 0 : pending // - // * 16 : + // * 16 : // running // - // * 32 : shutting-down + // * 32 : shutting-down // - // * 48 : terminated + // * 48 : terminated // - // * 64 : stopping + // * 64 : stopping // + // * 80 : + // stopped // - // * 80 : stopped - // - // You can ignore the high byte value by zeroing out all of the - // bits above 2^8 or 256 in decimal. + // You can ignore the high byte value by zeroing out all of the bits above + // 2^8 or 256 in decimal. Code *int32 // The current state of the instance. @@ -4462,12 +4462,12 @@ type LaunchTemplateCapacityReservationSpecificationRequest struct { // Indicates the instance's Capacity Reservation preferences. Possible preferences // include: // - // * open - The instance can run in any open Capacity Reservation - // that has matching attributes (instance type, platform, Availability Zone). - // + // * open - The instance can run in any open Capacity Reservation that + // has matching attributes (instance type, platform, Availability Zone). // - // * none - The instance avoids running in a Capacity Reservation even if one is - // available. The instance runs in On-Demand capacity. + // * none - + // The instance avoids running in a Capacity Reservation even if one is available. + // The instance runs in On-Demand capacity. CapacityReservationPreference CapacityReservationPreference // Information about the target Capacity Reservation or Capacity Reservation group. @@ -4480,12 +4480,12 @@ type LaunchTemplateCapacityReservationSpecificationResponse struct { // Indicates the instance's Capacity Reservation preferences. Possible preferences // include: // - // * open - The instance can run in any open Capacity Reservation - // that has matching attributes (instance type, platform, Availability Zone). + // * open - The instance can run in any open Capacity Reservation that + // has matching attributes (instance type, platform, Availability Zone). // - // - // * none - The instance avoids running in a Capacity Reservation even if one is - // available. The instance runs in On-Demand capacity. + // * none - + // The instance avoids running in a Capacity Reservation even if one is available. + // The instance runs in On-Demand capacity. CapacityReservationPreference CapacityReservationPreference // Information about the target Capacity Reservation or Capacity Reservation group. @@ -5477,21 +5477,21 @@ type ModifyVpnTunnelOptionsSpecification struct { // gateway. Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The // following CIDR blocks are reserved and cannot be used: // - // * 169.254.0.0/30 - // + // * 169.254.0.0/30 // - // * 169.254.1.0/30 + // * + // 169.254.1.0/30 // - // * 169.254.2.0/30 + // * 169.254.2.0/30 // - // * 169.254.3.0/30 + // * 169.254.3.0/30 // - // * - // 169.254.4.0/30 + // * 169.254.4.0/30 // - // * 169.254.5.0/30 + // * + // 169.254.5.0/30 // - // * 169.254.169.252/30 + // * 169.254.169.252/30 TunnelInsideCidr *string // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks @@ -5537,26 +5537,26 @@ type NatGateway struct { // If the NAT gateway could not be created, specifies the error message for the // failure, that corresponds to the error code. // - // * For + // * For // InsufficientFreeAddressesInSubnet: "Subnet has insufficient free addresses to // create this NAT gateway" // - // * For Gateway.NotAttached: "Network vpc-xxxxxxxx - // has no Internet gateway attached" + // * For Gateway.NotAttached: "Network vpc-xxxxxxxx has + // no Internet gateway attached" // - // * For InvalidAllocationID.NotFound: - // "Elastic IP address eipalloc-xxxxxxxx could not be associated with this NAT - // gateway" + // * For InvalidAllocationID.NotFound: "Elastic IP + // address eipalloc-xxxxxxxx could not be associated with this NAT gateway" // - // * For Resource.AlreadyAssociated: "Elastic IP address - // eipalloc-xxxxxxxx is already associated" + // * For + // Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx is already + // associated" // - // * For InternalError: "Network - // interface eni-xxxxxxxx, created and used internally by this NAT gateway is in an - // invalid state. Please try again." + // * For InternalError: "Network interface eni-xxxxxxxx, created and + // used internally by this NAT gateway is in an invalid state. Please try + // again." // - // * For InvalidSubnetID.NotFound: "The - // specified subnet subnet-xxxxxxxx does not exist or could not be found." + // * For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx + // does not exist or could not be found." FailureMessage *string // Information about the IP addresses and network interface associated with the NAT @@ -5573,22 +5573,22 @@ type NatGateway struct { // The state of the NAT gateway. // - // * pending: The NAT gateway is being created - // and is not ready to process traffic. + // * pending: The NAT gateway is being created and + // is not ready to process traffic. // - // * failed: The NAT gateway could not be + // * failed: The NAT gateway could not be // created. Check the failureCode and failureMessage fields for the reason. // - // * + // * // available: The NAT gateway is able to process traffic. This status remains until // you delete the NAT gateway, and does not indicate the health of the NAT // gateway. // - // * deleting: The NAT gateway is in the process of being terminated - // and may still be processing traffic. + // * deleting: The NAT gateway is in the process of being terminated and + // may still be processing traffic. // - // * deleted: The NAT gateway has been - // terminated and is no longer processing traffic. + // * deleted: The NAT gateway has been terminated + // and is no longer processing traffic. State NatGatewayState // The ID of the subnet in which the NAT gateway is located. @@ -7202,14 +7202,14 @@ type Route struct { // Describes how the route was created. // - // * CreateRouteTable - The route was + // * CreateRouteTable - The route was // automatically created when the route table was created. // - // * CreateRoute - The + // * CreateRoute - The // route was manually added to the route table. // - // * EnableVgwRoutePropagation - - // The route was propagated by route propagation. + // * EnableVgwRoutePropagation - The + // route was propagated by route propagation. Origin RouteOrigin // The state of the route. The blackhole state indicates that the route's target @@ -8403,12 +8403,12 @@ type SpotInstanceRequest struct { // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). // - // * For a + // * For a // persistent request, the request remains active until the validUntil date and // time is reached. Otherwise, the request remains active until you cancel it. // - // - // * For a one-time request, the request remains active until all instances launch, + // * + // For a one-time request, the request remains active until all instances launch, // the request is canceled, or the validUntil date and time is reached. By default, // the request is valid for 7 days from the date the request was created. ValidUntil *time.Time @@ -8470,13 +8470,13 @@ type SpotMarketOptions struct { // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported // only for persistent requests. // - // * For a persistent request, the request - // remains active until the ValidUntil date and time is reached. Otherwise, the - // request remains active until you cancel it. + // * For a persistent request, the request remains + // active until the ValidUntil date and time is reached. Otherwise, the request + // remains active until you cancel it. // - // * For a one-time request, - // ValidUntil is not supported. The request remains active until all instances - // launch or you cancel the request. + // * For a one-time request, ValidUntil is not + // supported. The request remains active until all instances launch or you cancel + // the request. ValidUntil *time.Time } @@ -8649,48 +8649,48 @@ type StateReason struct { // The message for the state change. // - // * Server.InsufficientInstanceCapacity: - // There was insufficient capacity available to satisfy the launch request. + // * Server.InsufficientInstanceCapacity: There + // was insufficient capacity available to satisfy the launch request. // - // * + // * // Server.InternalError: An internal error caused the instance to terminate during // launch. // - // * Server.ScheduledStop: The instance was stopped due to a scheduled + // * Server.ScheduledStop: The instance was stopped due to a scheduled // retirement. // - // * Server.SpotInstanceShutdown: The instance was stopped because - // the number of Spot requests with a maximum price equal to or higher than the - // Spot price exceeded available capacity or because of an increase in the Spot + // * Server.SpotInstanceShutdown: The instance was stopped because the + // number of Spot requests with a maximum price equal to or higher than the Spot + // price exceeded available capacity or because of an increase in the Spot // price. // - // * Server.SpotInstanceTermination: The instance was terminated - // because the number of Spot requests with a maximum price equal to or higher than - // the Spot price exceeded available capacity or because of an increase in the Spot + // * Server.SpotInstanceTermination: The instance was terminated because + // the number of Spot requests with a maximum price equal to or higher than the + // Spot price exceeded available capacity or because of an increase in the Spot // price. // - // * Client.InstanceInitiatedShutdown: The instance was shut down using - // the shutdown -h command from the instance. + // * Client.InstanceInitiatedShutdown: The instance was shut down using the + // shutdown -h command from the instance. // - // * Client.InstanceTerminated: The + // * Client.InstanceTerminated: The // instance was terminated or rebooted during AMI creation. // - // * + // * // Client.InternalError: A client error caused the instance to terminate during // launch. // - // * Client.InvalidSnapshot.NotFound: The specified snapshot was not + // * Client.InvalidSnapshot.NotFound: The specified snapshot was not // found. // - // * Client.UserInitiatedHibernate: Hibernation was initiated on the + // * Client.UserInitiatedHibernate: Hibernation was initiated on the // instance. // - // * Client.UserInitiatedShutdown: The instance was shut down using - // the Amazon EC2 API. + // * Client.UserInitiatedShutdown: The instance was shut down using the + // Amazon EC2 API. // - // * Client.VolumeLimitExceeded: The limit on the number - // of EBS volumes or total storage was exceeded. Decrease usage or request an - // increase in your account limits. + // * Client.VolumeLimitExceeded: The limit on the number of EBS + // volumes or total storage was exceeded. Decrease usage or request an increase in + // your account limits. Message *string } @@ -10659,21 +10659,21 @@ type VpnTunnelOptionsSpecification struct { // gateway. Constraints: A size /30 CIDR block from the 169.254.0.0/16 range. The // following CIDR blocks are reserved and cannot be used: // - // * 169.254.0.0/30 - // + // * 169.254.0.0/30 // - // * 169.254.1.0/30 + // * + // 169.254.1.0/30 // - // * 169.254.2.0/30 + // * 169.254.2.0/30 // - // * 169.254.3.0/30 + // * 169.254.3.0/30 // - // * - // 169.254.4.0/30 + // * 169.254.4.0/30 // - // * 169.254.5.0/30 + // * + // 169.254.5.0/30 // - // * 169.254.169.252/30 + // * 169.254.169.252/30 TunnelInsideCidr *string // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks diff --git a/service/ecr/types/enums.go b/service/ecr/types/enums.go index 1dd073f86ee..f9c87a2b415 100644 --- a/service/ecr/types/enums.go +++ b/service/ecr/types/enums.go @@ -148,10 +148,10 @@ type LifecyclePolicyPreviewStatus string // Enum values for LifecyclePolicyPreviewStatus const ( - LifecyclePolicyPreviewStatusIn_progress LifecyclePolicyPreviewStatus = "IN_PROGRESS" - LifecyclePolicyPreviewStatusComplete LifecyclePolicyPreviewStatus = "COMPLETE" - LifecyclePolicyPreviewStatusExpired LifecyclePolicyPreviewStatus = "EXPIRED" - LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" + LifecyclePolicyPreviewStatusInProgress LifecyclePolicyPreviewStatus = "IN_PROGRESS" + LifecyclePolicyPreviewStatusComplete LifecyclePolicyPreviewStatus = "COMPLETE" + LifecyclePolicyPreviewStatusExpired LifecyclePolicyPreviewStatus = "EXPIRED" + LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" ) // Values returns all known values for LifecyclePolicyPreviewStatus. Note that this @@ -170,9 +170,9 @@ type ScanStatus string // Enum values for ScanStatus const ( - ScanStatusIn_progress ScanStatus = "IN_PROGRESS" - ScanStatusComplete ScanStatus = "COMPLETE" - ScanStatusFailed ScanStatus = "FAILED" + ScanStatusInProgress ScanStatus = "IN_PROGRESS" + ScanStatusComplete ScanStatus = "COMPLETE" + ScanStatusFailed ScanStatus = "FAILED" ) // Values returns all known values for ScanStatus. Note that this can be expanded diff --git a/service/ecs/api_op_CreateCapacityProvider.go b/service/ecs/api_op_CreateCapacityProvider.go index ade36222658..d6406854a19 100644 --- a/service/ecs/api_op_CreateCapacityProvider.go +++ b/service/ecs/api_op_CreateCapacityProvider.go @@ -50,31 +50,30 @@ type CreateCapacityProviderInput struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50 + // * Maximum number of + // tags per resource - 50 // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8 - // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8 // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8 // + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/ecs/api_op_CreateCluster.go b/service/ecs/api_op_CreateCluster.go index 478fe902963..819bc39b07d 100644 --- a/service/ecs/api_op_CreateCluster.go +++ b/service/ecs/api_op_CreateCluster.go @@ -82,31 +82,30 @@ type CreateClusterInput struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/ecs/api_op_CreateService.go b/service/ecs/api_op_CreateService.go index 33e21771f36..f69c93b9d96 100644 --- a/service/ecs/api_op_CreateService.go +++ b/service/ecs/api_op_CreateService.go @@ -27,22 +27,22 @@ import ( // is reported as healthy by the load balancer. There are two service scheduler // strategies available: // -// * REPLICA - The replica scheduling strategy places -// and maintains the desired number of tasks across your cluster. By default, the +// * REPLICA - The replica scheduling strategy places and +// maintains the desired number of tasks across your cluster. By default, the // service scheduler spreads tasks across Availability Zones. You can use task // placement strategies and constraints to customize task placement decisions. For // more information, see Service Scheduler Concepts // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) // in the Amazon Elastic Container Service Developer Guide. // -// * DAEMON - The -// daemon scheduling strategy deploys exactly one task on each active container -// instance that meets all of the task placement constraints that you specify in -// your cluster. The service scheduler also evaluates the task placement -// constraints for running tasks and will stop tasks that do not meet the placement -// constraints. When using this strategy, you don't need to specify a desired -// number of tasks, a task placement strategy, or use Service Auto Scaling -// policies. For more information, see Service Scheduler Concepts +// * DAEMON - The daemon +// scheduling strategy deploys exactly one task on each active container instance +// that meets all of the task placement constraints that you specify in your +// cluster. The service scheduler also evaluates the task placement constraints for +// running tasks and will stop tasks that do not meet the placement constraints. +// When using this strategy, you don't need to specify a desired number of tasks, a +// task placement strategy, or use Service Auto Scaling policies. For more +// information, see Service Scheduler Concepts // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) // in the Amazon Elastic Container Service Developer Guide. // @@ -91,25 +91,25 @@ import ( // scheduler launches new tasks, it determines task placement in your cluster using // the following logic: // -// * Determine which of the container instances in your +// * Determine which of the container instances in your // cluster can support your service's task definition (for example, they have the // required CPU, memory, ports, and container instance attributes). // -// * By -// default, the service scheduler attempts to balance tasks across Availability -// Zones in this manner (although you can choose a different placement strategy) -// with the placementStrategy parameter): +// * By default, +// the service scheduler attempts to balance tasks across Availability Zones in +// this manner (although you can choose a different placement strategy) with the +// placementStrategy parameter): // -// * Sort the valid container -// instances, giving priority to instances that have the fewest number of running -// tasks for this service in their respective Availability Zone. For example, if -// zone A has one running service task and zones B and C each have zero, valid -// container instances in either zone B or C are considered optimal for -// placement. +// * Sort the valid container instances, giving +// priority to instances that have the fewest number of running tasks for this +// service in their respective Availability Zone. For example, if zone A has one +// running service task and zones B and C each have zero, valid container instances +// in either zone B or C are considered optimal for placement. // -// * Place the new service task on a valid container instance -// in an optimal Availability Zone (based on the previous steps), favoring -// container instances with the fewest number of running tasks for this service. +// * Place the new +// service task on a valid container instance in an optimal Availability Zone +// (based on the previous steps), favoring container instances with the fewest +// number of running tasks for this service. func (c *Client) CreateService(ctx context.Context, params *CreateServiceInput, optFns ...func(*Options)) (*CreateServiceOutput, error) { if params == nil { params = &CreateServiceInput{} @@ -301,22 +301,22 @@ type CreateServiceInput struct { // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). // There are two service scheduler strategies available: // - // * REPLICA-The replica + // * REPLICA-The replica // scheduling strategy places and maintains the desired number of tasks across your // cluster. By default, the service scheduler spreads tasks across Availability // Zones. You can use task placement strategies and constraints to customize task // placement decisions. This scheduler strategy is required if the service is using // the CODE_DEPLOY or EXTERNAL deployment controller types. // - // * DAEMON-The - // daemon scheduling strategy deploys exactly one task on each active container - // instance that meets all of the task placement constraints that you specify in - // your cluster. The service scheduler also evaluates the task placement - // constraints for running tasks and will stop tasks that do not meet the placement - // constraints. When you're using this strategy, you don't need to specify a - // desired number of tasks, a task placement strategy, or use Service Auto Scaling - // policies. Tasks using the Fargate launch type or the CODE_DEPLOY or EXTERNAL - // deployment controller types don't support the DAEMON scheduling strategy. + // * DAEMON-The daemon + // scheduling strategy deploys exactly one task on each active container instance + // that meets all of the task placement constraints that you specify in your + // cluster. The service scheduler also evaluates the task placement constraints for + // running tasks and will stop tasks that do not meet the placement constraints. + // When you're using this strategy, you don't need to specify a desired number of + // tasks, a task placement strategy, or use Service Auto Scaling policies. Tasks + // using the Fargate launch type or the CODE_DEPLOY or EXTERNAL deployment + // controller types don't support the DAEMON scheduling strategy. SchedulingStrategy types.SchedulingStrategy // The details of the service discovery registries to assign to this service. For @@ -332,30 +332,30 @@ type CreateServiceInput struct { // define. When a service is deleted, the tags are deleted as well. The following // basic restrictions apply to tags: // - // * Maximum number of tags per resource - - // 50 + // * Maximum number of tags per resource - 50 // - // * For each resource, each tag key must be unique, and each tag key can - // have only one value. + // * + // For each resource, each tag key must be unique, and each tag key can have only + // one value. // - // * Maximum key length - 128 Unicode characters in - // UTF-8 + // * Maximum key length - 128 Unicode characters in UTF-8 // - // * Maximum value length - 256 Unicode characters in UTF-8 + // * Maximum + // value length - 256 Unicode characters in UTF-8 // - // * If - // your tagging schema is used across multiple services and resources, remember - // that other services may have restrictions on allowed characters. Generally - // allowed characters are: letters, numbers, and spaces representable in UTF-8, and - // the following characters: + - = . _ : / @. + // * If your tagging schema is used + // across multiple services and resources, remember that other services may have + // restrictions on allowed characters. Generally allowed characters are: letters, + // numbers, and spaces representable in UTF-8, and the following characters: + - = + // . _ : / @. // - // * Tag keys and values are - // case-sensitive. + // * Tag keys and values are case-sensitive. // - // * Do not use aws:, AWS:, or any upper or lowercase - // combination of such as a prefix for either keys or values as it is reserved for - // AWS use. You cannot edit or delete tag keys or values with this prefix. Tags - // with this prefix do not count against your tags per resource limit. + // * Do not use aws:, AWS:, + // or any upper or lowercase combination of such as a prefix for either keys or + // values as it is reserved for AWS use. You cannot edit or delete tag keys or + // values with this prefix. Tags with this prefix do not count against your tags + // per resource limit. Tags []*types.Tag // The family and revision (family:revision) or full ARN of the task definition to diff --git a/service/ecs/api_op_CreateTaskSet.go b/service/ecs/api_op_CreateTaskSet.go index 3cdf4912b00..b7b0693c998 100644 --- a/service/ecs/api_op_CreateTaskSet.go +++ b/service/ecs/api_op_CreateTaskSet.go @@ -113,30 +113,30 @@ type CreateTaskSetInput struct { // define. When a service is deleted, the tags are deleted as well. The following // basic restrictions apply to tags: // - // * Maximum number of tags per resource - - // 50 + // * Maximum number of tags per resource - 50 // - // * For each resource, each tag key must be unique, and each tag key can - // have only one value. + // * + // For each resource, each tag key must be unique, and each tag key can have only + // one value. // - // * Maximum key length - 128 Unicode characters in - // UTF-8 + // * Maximum key length - 128 Unicode characters in UTF-8 // - // * Maximum value length - 256 Unicode characters in UTF-8 + // * Maximum + // value length - 256 Unicode characters in UTF-8 // - // * If - // your tagging schema is used across multiple services and resources, remember - // that other services may have restrictions on allowed characters. Generally - // allowed characters are: letters, numbers, and spaces representable in UTF-8, and - // the following characters: + - = . _ : / @. + // * If your tagging schema is used + // across multiple services and resources, remember that other services may have + // restrictions on allowed characters. Generally allowed characters are: letters, + // numbers, and spaces representable in UTF-8, and the following characters: + - = + // . _ : / @. // - // * Tag keys and values are - // case-sensitive. + // * Tag keys and values are case-sensitive. // - // * Do not use aws:, AWS:, or any upper or lowercase - // combination of such as a prefix for either keys or values as it is reserved for - // AWS use. You cannot edit or delete tag keys or values with this prefix. Tags - // with this prefix do not count against your tags per resource limit. + // * Do not use aws:, AWS:, + // or any upper or lowercase combination of such as a prefix for either keys or + // values as it is reserved for AWS use. You cannot edit or delete tag keys or + // values with this prefix. Tags with this prefix do not count against your tags + // per resource limit. Tags []*types.Tag } diff --git a/service/ecs/api_op_DescribeClusters.go b/service/ecs/api_op_DescribeClusters.go index 28d7561b7fb..b666cf202c9 100644 --- a/service/ecs/api_op_DescribeClusters.go +++ b/service/ecs/api_op_DescribeClusters.go @@ -40,28 +40,28 @@ type DescribeClustersInput struct { // the settings for the cluster are included. If STATISTICS is specified, the // following additional information, separated by launch type, is included: // - // * + // * // runningEC2TasksCount // - // * runningFargateTasksCount + // * runningFargateTasksCount // - // * - // pendingEC2TasksCount + // * pendingEC2TasksCount // - // * pendingFargateTasksCount + // * + // pendingFargateTasksCount // - // * - // activeEC2ServiceCount + // * activeEC2ServiceCount // - // * activeFargateServiceCount + // * + // activeFargateServiceCount // - // * - // drainingEC2ServiceCount + // * drainingEC2ServiceCount // - // * drainingFargateServiceCount + // * + // drainingFargateServiceCount // - // If TAGS is - // specified, the metadata tags associated with the cluster are included. + // If TAGS is specified, the metadata tags associated + // with the cluster are included. Include []types.ClusterField } diff --git a/service/ecs/api_op_DescribeTaskDefinition.go b/service/ecs/api_op_DescribeTaskDefinition.go index 5b307db1c96..802f7df207d 100644 --- a/service/ecs/api_op_DescribeTaskDefinition.go +++ b/service/ecs/api_op_DescribeTaskDefinition.go @@ -51,31 +51,30 @@ type DescribeTaskDefinitionOutput struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50 + // * Maximum number of + // tags per resource - 50 // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8 - // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8 // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8 // + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag // The full task definition description. diff --git a/service/ecs/api_op_RegisterContainerInstance.go b/service/ecs/api_op_RegisterContainerInstance.go index 6224907f39a..93ff0f1d6f1 100644 --- a/service/ecs/api_op_RegisterContainerInstance.go +++ b/service/ecs/api_op_RegisterContainerInstance.go @@ -60,31 +60,30 @@ type RegisterContainerInstanceInput struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50 + // * Maximum number of + // tags per resource - 50 // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8 - // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8 // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8 // + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag // The resources available on the instance. diff --git a/service/ecs/api_op_RegisterTaskDefinition.go b/service/ecs/api_op_RegisterTaskDefinition.go index 8af3945d996..1dc020ae5e2 100644 --- a/service/ecs/api_op_RegisterTaskDefinition.go +++ b/service/ecs/api_op_RegisterTaskDefinition.go @@ -76,22 +76,21 @@ type RegisterTaskDefinitionInput struct { // following values, which determines your range of supported values for the memory // parameter: // - // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 - // (1 GB), 2048 (2 GB) + // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 + // GB), 2048 (2 GB) // - // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), - // 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) + // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 + // (2 GB), 3072 (3 GB), 4096 (4 GB) // - // * 1024 (1 vCPU) - Available memory - // values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 - // GB), 8192 (8 GB) + // * 1024 (1 vCPU) - Available memory values: + // 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), + // 8192 (8 GB) // - // * 2048 (2 vCPU) - Available memory values: Between 4096 (4 - // GB) and 16384 (16 GB) in increments of 1024 (1 GB) + // * 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and + // 16384 (16 GB) in increments of 1024 (1 GB) // - // * 4096 (4 vCPU) - - // Available memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of - // 1024 (1 GB) + // * 4096 (4 vCPU) - Available memory + // values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) Cpu *string // The Amazon Resource Name (ARN) of the task execution role that grants the Amazon @@ -124,12 +123,12 @@ type RegisterTaskDefinitionInput struct { // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) // in the Amazon Elastic Container Service Developer Guide. // - // * For tasks that - // use the host IPC mode, IPC namespace related systemControls are not supported. + // * For tasks that use + // the host IPC mode, IPC namespace related systemControls are not supported. // - // - // * For tasks that use the task IPC mode, IPC namespace related systemControls - // will apply to all containers within a task. + // * + // For tasks that use the task IPC mode, IPC namespace related systemControls will + // apply to all containers within a task. // // This parameter is not supported for // Windows containers or tasks using the Fargate launch type. @@ -145,22 +144,21 @@ type RegisterTaskDefinitionInput struct { // and you must use one of the following values, which determines your range of // supported values for the cpu parameter: // - // * 512 (0.5 GB), 1024 (1 GB), 2048 - // (2 GB) - Available cpu values: 256 (.25 vCPU) - // - // * 1024 (1 GB), 2048 (2 GB), - // 3072 (3 GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 + // GB) - Available cpu values: 256 (.25 vCPU) // - // * 2048 (2 - // GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 - // GB) - Available cpu values: 1024 (1 vCPU) + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 + // GB), 4096 (4 GB) - Available cpu values: 512 (.5 vCPU) // - // * Between 4096 (4 GB) and 16384 - // (16 GB) in increments of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) + // * 2048 (2 GB), 3072 (3 + // GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available + // cpu values: 1024 (1 vCPU) // + // * Between 4096 (4 GB) and 16384 (16 GB) in increments + // of 1024 (1 GB) - Available cpu values: 2048 (2 vCPU) // - // * Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available - // cpu values: 4096 (4 vCPU) + // * Between 8192 (8 GB) and + // 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Memory *string // The Docker networking mode to use for the containers in the task. The valid @@ -233,31 +231,30 @@ type RegisterTaskDefinitionInput struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50 - // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * Maximum number of + // tags per resource - 50 // - // * Maximum key length - - // 128 Unicode characters in UTF-8 + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8 // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8 // + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag // The short name or full Amazon Resource Name (ARN) of the IAM role that diff --git a/service/ecs/api_op_RunTask.go b/service/ecs/api_op_RunTask.go index 1a3d4f1074d..ca890a0cf3f 100644 --- a/service/ecs/api_op_RunTask.go +++ b/service/ecs/api_op_RunTask.go @@ -25,18 +25,18 @@ import ( // in mind when you carry out an API command that immediately follows a previous // API command. To manage eventual consistency, you can do the following: // -// * +// * // Confirm the state of the resource before you run a command to modify it. Run the // DescribeTasks command using an exponential backoff algorithm to ensure that you // allow enough time for the previous command to propagate through the system. To // do this, run the DescribeTasks command repeatedly, starting with a couple of -// seconds of wait time and increasing gradually up to five minutes of wait time. +// seconds of wait time and increasing gradually up to five minutes of wait +// time. // -// -// * Add wait time between subsequent commands, even if the DescribeTasks command -// returns an accurate response. Apply an exponential backoff algorithm starting -// with a couple of seconds of wait time, and increase gradually up to about five -// minutes of wait time. +// * Add wait time between subsequent commands, even if the DescribeTasks +// command returns an accurate response. Apply an exponential backoff algorithm +// starting with a couple of seconds of wait time, and increase gradually up to +// about five minutes of wait time. func (c *Client) RunTask(ctx context.Context, params *RunTaskInput, optFns ...func(*Options)) (*RunTaskOutput, error) { if params == nil { params = &RunTaskInput{} @@ -163,31 +163,30 @@ type RunTaskInput struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/ecs/api_op_StartTask.go b/service/ecs/api_op_StartTask.go index 1e9a0669f31..df4c1db3b69 100644 --- a/service/ecs/api_op_StartTask.go +++ b/service/ecs/api_op_StartTask.go @@ -96,31 +96,30 @@ type StartTaskInput struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/ecs/api_op_TagResource.go b/service/ecs/api_op_TagResource.go index 4e6a24e0027..bf9cc3d0200 100644 --- a/service/ecs/api_op_TagResource.go +++ b/service/ecs/api_op_TagResource.go @@ -42,31 +42,30 @@ type TagResourceInput struct { // The tags to add to the resource. A tag is an array of key-value pairs. The // following basic restrictions apply to tags: // - // * Maximum number of tags per + // * Maximum number of tags per // resource - 50 // - // * For each resource, each tag key must be unique, and each - // tag key can have only one value. + // * For each resource, each tag key must be unique, and each tag + // key can have only one value. // - // * Maximum key length - 128 Unicode - // characters in UTF-8 - // - // * Maximum value length - 256 Unicode characters in + // * Maximum key length - 128 Unicode characters in // UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8 + // + // * If your + // tagging schema is used across multiple services and resources, remember that + // other services may have restrictions on allowed characters. Generally allowed + // characters are: letters, numbers, and spaces representable in UTF-8, and the + // following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. // // This member is required. Tags []*types.Tag diff --git a/service/ecs/api_op_UpdateContainerInstancesState.go b/service/ecs/api_op_UpdateContainerInstancesState.go index db56e6f71d7..294afea2b22 100644 --- a/service/ecs/api_op_UpdateContainerInstancesState.go +++ b/service/ecs/api_op_UpdateContainerInstancesState.go @@ -27,32 +27,32 @@ import ( // minimumHealthyPercent and maximumPercent. You can change the deployment // configuration of your service using UpdateService. // -// * If -// minimumHealthyPercent is below 100%, the scheduler can ignore desiredCount -// temporarily during task replacement. For example, desiredCount is four tasks, a -// minimum of 50% allows the scheduler to stop two existing tasks before starting -// two new tasks. If the minimum is 100%, the service scheduler can't remove -// existing tasks until the replacement tasks are considered healthy. Tasks for -// services that do not use a load balancer are considered healthy if they are in -// the RUNNING state. Tasks for services that use a load balancer are considered -// healthy if they are in the RUNNING state and the container instance they are -// hosted on is reported as healthy by the load balancer. +// * If minimumHealthyPercent +// is below 100%, the scheduler can ignore desiredCount temporarily during task +// replacement. For example, desiredCount is four tasks, a minimum of 50% allows +// the scheduler to stop two existing tasks before starting two new tasks. If the +// minimum is 100%, the service scheduler can't remove existing tasks until the +// replacement tasks are considered healthy. Tasks for services that do not use a +// load balancer are considered healthy if they are in the RUNNING state. Tasks for +// services that use a load balancer are considered healthy if they are in the +// RUNNING state and the container instance they are hosted on is reported as +// healthy by the load balancer. // -// * The maximumPercent -// parameter represents an upper limit on the number of running tasks during task -// replacement, which enables you to define the replacement batch size. For -// example, if desiredCount is four tasks, a maximum of 200% starts four new tasks -// before stopping the four tasks to be drained, provided that the cluster -// resources required to do this are available. If the maximum is 100%, then -// replacement tasks can't start until the draining tasks have stopped. +// * The maximumPercent parameter represents an +// upper limit on the number of running tasks during task replacement, which +// enables you to define the replacement batch size. For example, if desiredCount +// is four tasks, a maximum of 200% starts four new tasks before stopping the four +// tasks to be drained, provided that the cluster resources required to do this are +// available. If the maximum is 100%, then replacement tasks can't start until the +// draining tasks have stopped. // -// Any -// PENDING or RUNNING tasks that do not belong to a service are not affected. You -// must wait for them to finish or stop them manually. A container instance has -// completed draining when it has no more RUNNING tasks. You can verify this using -// ListTasks. When a container instance has been drained, you can set a container -// instance to ACTIVE status and once it has reached that status the Amazon ECS -// scheduler can begin scheduling tasks on the instance again. +// Any PENDING or RUNNING tasks that do not belong to +// a service are not affected. You must wait for them to finish or stop them +// manually. A container instance has completed draining when it has no more +// RUNNING tasks. You can verify this using ListTasks. When a container instance +// has been drained, you can set a container instance to ACTIVE status and once it +// has reached that status the Amazon ECS scheduler can begin scheduling tasks on +// the instance again. func (c *Client) UpdateContainerInstancesState(ctx context.Context, params *UpdateContainerInstancesStateInput, optFns ...func(*Options)) (*UpdateContainerInstancesStateOutput, error) { if params == nil { params = &UpdateContainerInstancesStateInput{} diff --git a/service/ecs/api_op_UpdateService.go b/service/ecs/api_op_UpdateService.go index ed5ab7d379f..503a0cb0d9f 100644 --- a/service/ecs/api_op_UpdateService.go +++ b/service/ecs/api_op_UpdateService.go @@ -48,8 +48,8 @@ import ( // deployment configuration parameters, minimumHealthyPercent and maximumPercent, // to determine the deployment strategy. // -// * If minimumHealthyPercent is below -// 100%, the scheduler can ignore desiredCount temporarily during a deployment. For +// * If minimumHealthyPercent is below 100%, +// the scheduler can ignore desiredCount temporarily during a deployment. For // example, if desiredCount is four tasks, a minimum of 50% allows the scheduler to // stop two existing tasks before starting two new tasks. Tasks for services that // do not use a load balancer are considered healthy if they are in the RUNNING @@ -57,7 +57,7 @@ import ( // they are in the RUNNING state and the container instance they are hosted on is // reported as healthy by the load balancer. // -// * The maximumPercent parameter +// * The maximumPercent parameter // represents an upper limit on the number of running tasks during a deployment, // which enables you to define the deployment batch size. For example, if // desiredCount is four tasks, a maximum of 200% starts four new tasks before @@ -72,39 +72,39 @@ import ( // When the service scheduler launches new tasks, it determines task placement in // your cluster with the following logic: // -// * Determine which of the container +// * Determine which of the container // instances in your cluster can support your service's task definition (for // example, they have the required CPU, memory, ports, and container instance // attributes). // -// * By default, the service scheduler attempts to balance tasks +// * By default, the service scheduler attempts to balance tasks // across Availability Zones in this manner (although you can choose a different // placement strategy): // -// * Sort the valid container instances by the fewest -// number of running tasks for this service in the same Availability Zone as the -// instance. For example, if zone A has one running service task and zones B and C -// each have zero, valid container instances in either zone B or C are considered -// optimal for placement. +// * Sort the valid container instances by the fewest number +// of running tasks for this service in the same Availability Zone as the instance. +// For example, if zone A has one running service task and zones B and C each have +// zero, valid container instances in either zone B or C are considered optimal for +// placement. // -// * Place the new service task on a valid -// container instance in an optimal Availability Zone (based on the previous -// steps), favoring container instances with the fewest number of running tasks for -// this service. +// * Place the new service task on a valid container instance in an +// optimal Availability Zone (based on the previous steps), favoring container +// instances with the fewest number of running tasks for this service. // -// When the service scheduler stops running tasks, it attempts to -// maintain balance across the Availability Zones in your cluster using the -// following logic: +// When the +// service scheduler stops running tasks, it attempts to maintain balance across +// the Availability Zones in your cluster using the following logic: // -// * Sort the container instances by the largest number of -// running tasks for this service in the same Availability Zone as the instance. -// For example, if zone A has one running service task and zones B and C each have -// two, container instances in either zone B or C are considered optimal for -// termination. +// * Sort the +// container instances by the largest number of running tasks for this service in +// the same Availability Zone as the instance. For example, if zone A has one +// running service task and zones B and C each have two, container instances in +// either zone B or C are considered optimal for termination. // -// * Stop the task on a container instance in an optimal -// Availability Zone (based on the previous steps), favoring container instances -// with the largest number of running tasks for this service. +// * Stop the task on a +// container instance in an optimal Availability Zone (based on the previous +// steps), favoring container instances with the largest number of running tasks +// for this service. func (c *Client) UpdateService(ctx context.Context, params *UpdateServiceInput, optFns ...func(*Options)) (*UpdateServiceOutput, error) { if params == nil { params = &UpdateServiceInput{} diff --git a/service/ecs/types/enums.go b/service/ecs/types/enums.go index cebb6069410..a3d5bd779d6 100644 --- a/service/ecs/types/enums.go +++ b/service/ecs/types/enums.go @@ -84,9 +84,9 @@ type CapacityProviderUpdateStatus string // Enum values for CapacityProviderUpdateStatus const ( - CapacityProviderUpdateStatusDelete_in_progress CapacityProviderUpdateStatus = "DELETE_IN_PROGRESS" - CapacityProviderUpdateStatusDelete_complete CapacityProviderUpdateStatus = "DELETE_COMPLETE" - CapacityProviderUpdateStatusDelete_failed CapacityProviderUpdateStatus = "DELETE_FAILED" + CapacityProviderUpdateStatusDeleteInProgress CapacityProviderUpdateStatus = "DELETE_IN_PROGRESS" + CapacityProviderUpdateStatusDeleteComplete CapacityProviderUpdateStatus = "DELETE_COMPLETE" + CapacityProviderUpdateStatusDeleteFailed CapacityProviderUpdateStatus = "DELETE_FAILED" ) // Values returns all known values for CapacityProviderUpdateStatus. Note that this @@ -126,7 +126,7 @@ type ClusterSettingName string // Enum values for ClusterSettingName const ( - ClusterSettingNameContainer_insights ClusterSettingName = "containerInsights" + ClusterSettingNameContainerInsights ClusterSettingName = "containerInsights" ) // Values returns all known values for ClusterSettingName. Note that this can be @@ -216,11 +216,11 @@ type ContainerInstanceStatus string // Enum values for ContainerInstanceStatus const ( - ContainerInstanceStatusActive ContainerInstanceStatus = "ACTIVE" - ContainerInstanceStatusDraining ContainerInstanceStatus = "DRAINING" - ContainerInstanceStatusRegistering ContainerInstanceStatus = "REGISTERING" - ContainerInstanceStatusDeregistering ContainerInstanceStatus = "DEREGISTERING" - ContainerInstanceStatusRegistration_failed ContainerInstanceStatus = "REGISTRATION_FAILED" + ContainerInstanceStatusActive ContainerInstanceStatus = "ACTIVE" + ContainerInstanceStatusDraining ContainerInstanceStatus = "DRAINING" + ContainerInstanceStatusRegistering ContainerInstanceStatus = "REGISTERING" + ContainerInstanceStatusDeregistering ContainerInstanceStatus = "DEREGISTERING" + ContainerInstanceStatusRegistrationFailed ContainerInstanceStatus = "REGISTRATION_FAILED" ) // Values returns all known values for ContainerInstanceStatus. Note that this can @@ -240,9 +240,9 @@ type DeploymentControllerType string // Enum values for DeploymentControllerType const ( - DeploymentControllerTypeEcs DeploymentControllerType = "ECS" - DeploymentControllerTypeCode_deploy DeploymentControllerType = "CODE_DEPLOY" - DeploymentControllerTypeExternal DeploymentControllerType = "EXTERNAL" + DeploymentControllerTypeEcs DeploymentControllerType = "ECS" + DeploymentControllerTypeCodeDeploy DeploymentControllerType = "CODE_DEPLOY" + DeploymentControllerTypeExternal DeploymentControllerType = "EXTERNAL" ) // Values returns all known values for DeploymentControllerType. Note that this can @@ -428,7 +428,7 @@ type LogDriver string // Enum values for LogDriver const ( - LogDriverJson_file LogDriver = "json-file" + LogDriverJsonFile LogDriver = "json-file" LogDriverSyslog LogDriver = "syslog" LogDriverJournald LogDriver = "journald" LogDriverGelf LogDriver = "gelf" @@ -534,8 +534,8 @@ type PlacementConstraintType string // Enum values for PlacementConstraintType const ( - PlacementConstraintTypeDistinct_instance PlacementConstraintType = "distinctInstance" - PlacementConstraintTypeMember_of PlacementConstraintType = "memberOf" + PlacementConstraintTypeDistinctInstance PlacementConstraintType = "distinctInstance" + PlacementConstraintTypeMemberOf PlacementConstraintType = "memberOf" ) // Values returns all known values for PlacementConstraintType. Note that this can @@ -588,8 +588,8 @@ type PropagateTags string // Enum values for PropagateTags const ( - PropagateTagsTask_definition PropagateTags = "TASK_DEFINITION" - PropagateTagsService PropagateTags = "SERVICE" + PropagateTagsTaskDefinition PropagateTags = "TASK_DEFINITION" + PropagateTagsService PropagateTags = "SERVICE" ) // Values returns all known values for PropagateTags. Note that this can be @@ -622,8 +622,8 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeGpu ResourceType = "GPU" - ResourceTypeInference_accelerator ResourceType = "InferenceAccelerator" + ResourceTypeGpu ResourceType = "GPU" + ResourceTypeInferenceAccelerator ResourceType = "InferenceAccelerator" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -708,11 +708,11 @@ type SettingName string // Enum values for SettingName const ( - SettingNameService_long_arn_format SettingName = "serviceLongArnFormat" - SettingNameTask_long_arn_format SettingName = "taskLongArnFormat" - SettingNameContainer_instance_long_arn_format SettingName = "containerInstanceLongArnFormat" - SettingNameAwsvpc_trunking SettingName = "awsvpcTrunking" - SettingNameContainer_insights SettingName = "containerInsights" + SettingNameServiceLongArnFormat SettingName = "serviceLongArnFormat" + SettingNameTaskLongArnFormat SettingName = "taskLongArnFormat" + SettingNameContainerInstanceLongArnFormat SettingName = "containerInstanceLongArnFormat" + SettingNameAwsvpcTrunking SettingName = "awsvpcTrunking" + SettingNameContainerInsights SettingName = "containerInsights" ) // Values returns all known values for SettingName. Note that this can be expanded @@ -750,8 +750,8 @@ type StabilityStatus string // Enum values for StabilityStatus const ( - StabilityStatusSteady_state StabilityStatus = "STEADY_STATE" - StabilityStatusStabilizing StabilityStatus = "STABILIZING" + StabilityStatusSteadyState StabilityStatus = "STEADY_STATE" + StabilityStatusStabilizing StabilityStatus = "STABILIZING" ) // Values returns all known values for StabilityStatus. Note that this can be @@ -768,7 +768,7 @@ type TargetType string // Enum values for TargetType const ( - TargetTypeContainer_instance TargetType = "container-instance" + TargetTypeContainerInstance TargetType = "container-instance" ) // Values returns all known values for TargetType. Note that this can be expanded @@ -820,7 +820,7 @@ type TaskDefinitionPlacementConstraintType string // Enum values for TaskDefinitionPlacementConstraintType const ( - TaskDefinitionPlacementConstraintTypeMember_of TaskDefinitionPlacementConstraintType = "memberOf" + TaskDefinitionPlacementConstraintTypeMemberOf TaskDefinitionPlacementConstraintType = "memberOf" ) // Values returns all known values for TaskDefinitionPlacementConstraintType. Note @@ -887,9 +887,9 @@ type TaskStopCode string // Enum values for TaskStopCode const ( - TaskStopCodeTask_failed_to_start TaskStopCode = "TaskFailedToStart" - TaskStopCodeEssential_container_exited TaskStopCode = "EssentialContainerExited" - TaskStopCodeUser_initiated TaskStopCode = "UserInitiated" + TaskStopCodeTaskFailedToStart TaskStopCode = "TaskFailedToStart" + TaskStopCodeEssentialContainerExited TaskStopCode = "EssentialContainerExited" + TaskStopCodeUserInitiated TaskStopCode = "UserInitiated" ) // Values returns all known values for TaskStopCode. Note that this can be expanded diff --git a/service/ecs/types/types.go b/service/ecs/types/types.go index 57ed4821ae3..c1fcd921602 100644 --- a/service/ecs/types/types.go +++ b/service/ecs/types/types.go @@ -135,31 +135,30 @@ type CapacityProvider struct { // organize it. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*Tag // The update status of the capacity provider. The following are the possible @@ -255,24 +254,24 @@ type Cluster struct { // Additional information about your clusters that are separated by launch type, // including: // - // * runningEC2TasksCount + // * runningEC2TasksCount // - // * RunningFargateTasksCount + // * RunningFargateTasksCount // - // * + // * // pendingEC2TasksCount // - // * pendingFargateTasksCount + // * pendingFargateTasksCount // - // * - // activeEC2ServiceCount + // * activeEC2ServiceCount // - // * activeFargateServiceCount + // * + // activeFargateServiceCount // - // * - // drainingEC2ServiceCount + // * drainingEC2ServiceCount // - // * drainingFargateServiceCount + // * + // drainingFargateServiceCount Statistics []*KeyValuePair // The status of the cluster. The following are the possible states that will be @@ -293,31 +292,30 @@ type Cluster struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*Tag } @@ -438,19 +436,18 @@ type ContainerDefinition struct { // values below 2 in your container definitions. For CPU values below 2 (including // null), the behavior varies based on your Amazon ECS container agent version: // + // * + // Agent versions less than or equal to 1.1.0: Null and zero CPU values are passed + // to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 + // are passed to Docker as 1, which the Linux kernel converts to two CPU shares. // - // * Agent versions less than or equal to 1.1.0: Null and zero CPU values are - // passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU - // values of 1 are passed to Docker as 1, which the Linux kernel converts to two - // CPU shares. + // * + // Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values of 1 + // are passed to Docker as 2. // - // * Agent versions greater than or equal to 1.2.0: Null, zero, - // and CPU values of 1 are passed to Docker as 2. - // - // On Windows container instances, - // the CPU limit is enforced as an absolute limit, or a quota. Windows containers - // only have access to the specified amount of CPU that is described in the task - // definition. + // On Windows container instances, the CPU limit is + // enforced as an absolute limit, or a quota. Windows containers only have access + // to the specified amount of CPU that is described in the task definition. Cpu *int32 // The dependencies defined for container startup and shutdown. A container can @@ -625,27 +622,27 @@ type ContainerDefinition struct { // the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) and the IMAGE // parameter of docker run (https://docs.docker.com/engine/reference/run/). // - // * - // When a new task starts, the Amazon ECS container agent pulls the latest version - // of the specified image and tag for the container to use. However, subsequent + // * When + // a new task starts, the Amazon ECS container agent pulls the latest version of + // the specified image and tag for the container to use. However, subsequent // updates to a repository image are not propagated to already running tasks. // - // - // * Images in Amazon ECR repositories can be specified by either using the full + // * + // Images in Amazon ECR repositories can be specified by either using the full // registry/repository:tag or registry/repository@digest. For example, // 012345678910.dkr.ecr..amazonaws.com/:latest or // 012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE. // - // - // * Images in official repositories on Docker Hub use a single name (for example, + // * + // Images in official repositories on Docker Hub use a single name (for example, // ubuntu or mongo). // - // * Images in other repositories on Docker Hub are - // qualified with an organization name (for example, amazon/amazon-ecs-agent). - // + // * Images in other repositories on Docker Hub are qualified + // with an organization name (for example, amazon/amazon-ecs-agent). // - // * Images in other online repositories are qualified further by a domain name - // (for example, quay.io/assemblyline/ubuntu). + // * Images in + // other online repositories are qualified further by a domain name (for example, + // quay.io/assemblyline/ubuntu). Image *string // When this parameter is true, this allows you to deploy containerized @@ -920,21 +917,20 @@ type ContainerDefinition struct { // use the following formats. If specifying a UID or GID, you must specify it as a // positive integer. // - // * user + // * user // - // * user:group + // * user:group // - // * uid + // * uid // - // * uid:gid + // * uid:gid // - // * - // user:gid + // * user:gid // - // * uid:group + // * + // uid:group // - // This parameter is not supported for Windows - // containers. + // This parameter is not supported for Windows containers. User *string // Data volumes to mount from another container. This parameter maps to VolumesFrom @@ -975,20 +971,20 @@ type ContainerDependency struct { // The dependency condition of the container. The following are the available // conditions and their behavior: // - // * START - This condition emulates the - // behavior of links and volumes today. It validates that a dependent container is - // started before permitting other containers to start. + // * START - This condition emulates the behavior + // of links and volumes today. It validates that a dependent container is started + // before permitting other containers to start. // - // * COMPLETE - This - // condition validates that a dependent container runs to completion (exits) before + // * COMPLETE - This condition + // validates that a dependent container runs to completion (exits) before // permitting other containers to start. This can be useful for nonessential // containers that run a script and then exit. // - // * SUCCESS - This condition is - // the same as COMPLETE, but it also requires that the container exits with a zero + // * SUCCESS - This condition is the + // same as COMPLETE, but it also requires that the container exits with a zero // status. // - // * HEALTHY - This condition validates that the dependent container + // * HEALTHY - This condition validates that the dependent container // passes its Docker health check before permitting other containers to start. This // requires that the dependent container has health checks configured. This // condition is confirmed only at task startup. @@ -1089,31 +1085,30 @@ type ContainerInstance struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50 - // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * Maximum number of + // tags per resource - 50 // - // * Maximum key length - - // 128 Unicode characters in UTF-8 + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8 // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8 // + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*Tag // The version counter for the container instance. Every time a container instance @@ -1518,49 +1513,48 @@ type FirelensConfiguration struct { // task details in the console. The following describes the possible healthStatus // values for a container: // -// * HEALTHY-The container health check has passed +// * HEALTHY-The container health check has passed // successfully. // -// * UNHEALTHY-The container health check has failed. +// * UNHEALTHY-The container health check has failed. // -// * -// UNKNOWN-The container health check is being evaluated or there is no container -// health check defined. +// * UNKNOWN-The +// container health check is being evaluated or there is no container health check +// defined. // -// The following describes the possible healthStatus values -// for a task. The container health check status of nonessential containers do not -// have an effect on the health status of a task. +// The following describes the possible healthStatus values for a task. +// The container health check status of nonessential containers do not have an +// effect on the health status of a task. // -// * HEALTHY-All essential -// containers within the task have passed their health checks. +// * HEALTHY-All essential containers +// within the task have passed their health checks. // -// * UNHEALTHY-One -// or more essential containers have failed their health check. +// * UNHEALTHY-One or more +// essential containers have failed their health check. // -// * UNKNOWN-The -// essential containers within the task are still having their health checks -// evaluated or there are no container health checks defined. +// * UNKNOWN-The essential +// containers within the task are still having their health checks evaluated or +// there are no container health checks defined. // -// If a task is run -// manually, and not as part of a service, the task will continue its lifecycle -// regardless of its health status. For tasks that are part of a service, if the -// task reports as unhealthy then the task will be stopped and the service -// scheduler will replace it. The following are notes about container health check -// support: +// If a task is run manually, and +// not as part of a service, the task will continue its lifecycle regardless of its +// health status. For tasks that are part of a service, if the task reports as +// unhealthy then the task will be stopped and the service scheduler will replace +// it. The following are notes about container health check support: // -// * Container health checks require version 1.17.0 or greater of the -// Amazon ECS container agent. For more information, see Updating the Amazon ECS -// Container Agent +// * Container +// health checks require version 1.17.0 or greater of the Amazon ECS container +// agent. For more information, see Updating the Amazon ECS Container Agent // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html). // -// -// * Container health checks are supported for Fargate tasks if you are using +// * +// Container health checks are supported for Fargate tasks if you are using // platform version 1.1.0 or greater. For more information, see AWS Fargate // Platform Versions // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html). // -// -// * Container health checks are not supported for tasks that are part of a service +// * +// Container health checks are not supported for tasks that are part of a service // that is configured to use a Classic Load Balancer. type HealthCheck struct { @@ -1840,16 +1834,16 @@ type LoadBalancer struct { // documentation. The following should be noted when specifying a log configuration // for your containers: // -// * Amazon ECS currently supports a subset of the -// logging drivers available to the Docker daemon (shown in the valid values -// below). Additional log drivers may be available in future releases of the Amazon -// ECS container agent. +// * Amazon ECS currently supports a subset of the logging +// drivers available to the Docker daemon (shown in the valid values below). +// Additional log drivers may be available in future releases of the Amazon ECS +// container agent. // -// * This parameter requires version 1.18 of the Docker -// Remote API or greater on your container instance. +// * This parameter requires version 1.18 of the Docker Remote +// API or greater on your container instance. // -// * For tasks using the EC2 -// launch type, the Amazon ECS container agent running on a container instance must +// * For tasks using the EC2 launch +// type, the Amazon ECS container agent running on a container instance must // register the logging drivers available on that instance with the // ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on // that instance can use these log configuration options. For more information, see @@ -1857,8 +1851,8 @@ type LoadBalancer struct { // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon Elastic Container Service Developer Guide. // -// * For tasks using -// the Fargate launch type, because you do not have access to the underlying +// * For tasks using the +// Fargate launch type, because you do not have access to the underlying // infrastructure your tasks are hosted on, any additional software needed will // have to be installed outside of the task. For example, the Fluentd output // aggregators or a remote host running Logstash to send Gelf logs to. @@ -2119,35 +2113,34 @@ type ProxyConfiguration struct { // The set of network configuration parameters to provide the Container Network // Interface (CNI) plugin, specified as key-value pairs. // - // * IgnoredUID - - // (Required) The user ID (UID) of the proxy container as defined by the user - // parameter in a container definition. This is used to ensure the proxy ignores - // its own traffic. If IgnoredGID is specified, this field can be empty. - // - // * - // IgnoredGID - (Required) The group ID (GID) of the proxy container as defined by - // the user parameter in a container definition. This is used to ensure the proxy - // ignores its own traffic. If IgnoredUID is specified, this field can be empty. - // + // * IgnoredUID - (Required) + // The user ID (UID) of the proxy container as defined by the user parameter in a + // container definition. This is used to ensure the proxy ignores its own traffic. + // If IgnoredGID is specified, this field can be empty. // - // * AppPorts - (Required) The list of ports that the application uses. Network - // traffic to these ports is forwarded to the ProxyIngressPort and - // ProxyEgressPort. + // * IgnoredGID - (Required) + // The group ID (GID) of the proxy container as defined by the user parameter in a + // container definition. This is used to ensure the proxy ignores its own traffic. + // If IgnoredUID is specified, this field can be empty. // - // * ProxyIngressPort - (Required) Specifies the port that - // incoming traffic to the AppPorts is directed to. + // * AppPorts - (Required) + // The list of ports that the application uses. Network traffic to these ports is + // forwarded to the ProxyIngressPort and ProxyEgressPort. // - // * ProxyEgressPort - - // (Required) Specifies the port that outgoing traffic from the AppPorts is - // directed to. + // * ProxyIngressPort - + // (Required) Specifies the port that incoming traffic to the AppPorts is directed + // to. // - // * EgressIgnoredPorts - (Required) The egress traffic going to - // the specified ports is ignored and not redirected to the ProxyEgressPort. It can - // be an empty list. + // * ProxyEgressPort - (Required) Specifies the port that outgoing traffic + // from the AppPorts is directed to. // - // * EgressIgnoredIPs - (Required) The egress traffic going - // to the specified IP addresses is ignored and not redirected to the + // * EgressIgnoredPorts - (Required) The egress + // traffic going to the specified ports is ignored and not redirected to the // ProxyEgressPort. It can be an empty list. + // + // * EgressIgnoredIPs - (Required) The + // egress traffic going to the specified IP addresses is ignored and not redirected + // to the ProxyEgressPort. It can be an empty list. Properties []*KeyValuePair // The proxy type. The only supported value is APPMESH. @@ -2234,15 +2227,15 @@ type Scale struct { // An object representing the secret to expose to your container. Secrets can be // exposed to a container in the following ways: // -// * To inject sensitive data -// into your containers as environment variables, use the secrets container -// definition parameter. -// -// * To reference sensitive information in the log -// configuration of a container, use the secretOptions container definition +// * To inject sensitive data into +// your containers as environment variables, use the secrets container definition // parameter. // -// For more information, see Specifying Sensitive Data +// * To reference sensitive information in the log configuration of a +// container, use the secretOptions container definition parameter. +// +// For more +// information, see Specifying Sensitive Data // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) // in the Amazon Elastic Container Service Developer Guide. type Secret struct { @@ -2360,18 +2353,18 @@ type Service struct { // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). // There are two service scheduler strategies available: // - // * REPLICA-The replica + // * REPLICA-The replica // scheduling strategy places and maintains the desired number of tasks across your // cluster. By default, the service scheduler spreads tasks across Availability // Zones. You can use task placement strategies and constraints to customize task // placement decisions. // - // * DAEMON-The daemon scheduling strategy deploys - // exactly one task on each active container instance that meets all of the task - // placement constraints that you specify in your cluster. The service scheduler - // also evaluates the task placement constraints for running tasks and will stop - // tasks that do not meet the placement constraints. Fargate tasks do not support - // the DAEMON scheduling strategy. + // * DAEMON-The daemon scheduling strategy deploys exactly + // one task on each active container instance that meets all of the task placement + // constraints that you specify in your cluster. The service scheduler also + // evaluates the task placement constraints for running tasks and will stop tasks + // that do not meet the placement constraints. Fargate tasks do not support the + // DAEMON scheduling strategy. SchedulingStrategy SchedulingStrategy // The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, @@ -2398,31 +2391,30 @@ type Service struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*Tag // The task definition to use for tasks in the service. This value is specified @@ -2504,13 +2496,13 @@ type Setting struct { // for multiple containers in a single task that also uses either the awsvpc or // host network mode for the following reasons: // -// * For tasks that use the -// awsvpc network mode, if you set systemControls for any container, it applies to -// all containers in the task. If you set different systemControls for multiple +// * For tasks that use the awsvpc +// network mode, if you set systemControls for any container, it applies to all +// containers in the task. If you set different systemControls for multiple // containers in a single task, the container that is started last determines which // systemControls take effect. // -// * For tasks that use the host network mode, the +// * For tasks that use the host network mode, the // systemControls parameter applies to the container instance's kernel parameter as // well as that of all containers of any tasks running on that container instance. type SystemControl struct { @@ -2526,31 +2518,30 @@ type SystemControl struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // -// * Maximum number of +// * Maximum number of // tags per resource - 50 // -// * For each resource, each tag key must be unique, -// and each tag key can have only one value. +// * For each resource, each tag key must be unique, and +// each tag key can have only one value. // -// * Maximum key length - 128 -// Unicode characters in UTF-8 +// * Maximum key length - 128 Unicode +// characters in UTF-8 // -// * Maximum value length - 256 Unicode characters -// in UTF-8 +// * Maximum value length - 256 Unicode characters in UTF-8 // -// * If your tagging schema is used across multiple services and -// resources, remember that other services may have restrictions on allowed -// characters. Generally allowed characters are: letters, numbers, and spaces -// representable in UTF-8, and the following characters: + - = . _ : / @. +// * +// If your tagging schema is used across multiple services and resources, remember +// that other services may have restrictions on allowed characters. Generally +// allowed characters are: letters, numbers, and spaces representable in UTF-8, and +// the following characters: + - = . _ : / @. // -// * -// Tag keys and values are case-sensitive. +// * Tag keys and values are +// case-sensitive. // -// * Do not use aws:, AWS:, or any -// upper or lowercase combination of such as a prefix for either keys or values as -// it is reserved for AWS use. You cannot edit or delete tag keys or values with -// this prefix. Tags with this prefix do not count against your tags per resource -// limit. +// * Do not use aws:, AWS:, or any upper or lowercase combination +// of such as a prefix for either keys or values as it is reserved for AWS use. You +// cannot edit or delete tag keys or values with this prefix. Tags with this prefix +// do not count against your tags per resource limit. type Tag struct { // One part of a key-value pair that make up a tag. A key is a general label that @@ -2603,22 +2594,22 @@ type Task struct { // must use one of the following values, which determines your range of supported // values for the memory parameter: // - // * 256 (.25 vCPU) - Available memory - // values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) + // * 256 (.25 vCPU) - Available memory values: + // 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) // - // * 512 (.5 vCPU) - Available - // memory values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) + // * 512 (.5 vCPU) - Available memory + // values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) // - // * 1024 (1 - // vCPU) - Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 - // GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) + // * 1024 (1 vCPU) - + // Available memory values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), + // 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) // - // * 2048 (2 vCPU) - Available - // memory values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 - // GB) + // * 2048 (2 vCPU) - Available memory + // values: Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) // - // * 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and - // 30720 (30 GB) in increments of 1024 (1 GB) + // * + // 4096 (4 vCPU) - Available memory values: Between 8192 (8 GB) and 30720 (30 GB) + // in increments of 1024 (1 GB) Cpu *string // The Unix timestamp for when the task was created (the task entered the PENDING @@ -2669,21 +2660,21 @@ type Task struct { // one of the following values, which determines your range of supported values for // the cpu parameter: // - // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu // values: 256 (.25 vCPU) // - // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - // - Available cpu values: 512 (.5 vCPU) + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - + // Available cpu values: 512 (.5 vCPU) // - // * 2048 (2 GB), 3072 (3 GB), 4096 (4 - // GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: - // 1024 (1 vCPU) + // * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), + // 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 + // (1 vCPU) // - // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 - // (1 GB) - Available cpu values: 2048 (2 vCPU) + // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - + // Available cpu values: 2048 (2 vCPU) // - // * Between 8192 (8 GB) and - // 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) + // * Between 8192 (8 GB) and 30720 (30 GB) in + // increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Memory *string // One or more container overrides. @@ -2731,31 +2722,30 @@ type Task struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*Tag // The Amazon Resource Name (ARN) of the task. @@ -2797,23 +2787,22 @@ type TaskDefinition struct { // launch type, this field is required and you must use one of the following // values, which determines your range of valid values for the memory parameter: // - // - // * 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 + // * + // 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 // GB) // - // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), - // 3072 (3 GB), 4096 (4 GB) + // * 512 (.5 vCPU) - Available memory values: 1024 (1 GB), 2048 (2 GB), 3072 + // (3 GB), 4096 (4 GB) // - // * 1024 (1 vCPU) - Available memory values: 2048 (2 - // GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 - // GB) + // * 1024 (1 vCPU) - Available memory values: 2048 (2 GB), + // 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) // - // * 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and - // 16384 (16 GB) in increments of 1024 (1 GB) + // * + // 2048 (2 vCPU) - Available memory values: Between 4096 (4 GB) and 16384 (16 GB) + // in increments of 1024 (1 GB) // - // * 4096 (4 vCPU) - Available - // memory values: Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 - // GB) + // * 4096 (4 vCPU) - Available memory values: Between + // 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) Cpu *string // The Amazon Resource Name (ARN) of the task execution role that grants the Amazon @@ -2854,12 +2843,12 @@ type TaskDefinition struct { // (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) // in the Amazon Elastic Container Service Developer Guide. // - // * For tasks that - // use the host IPC mode, IPC namespace related systemControls are not supported. + // * For tasks that use + // the host IPC mode, IPC namespace related systemControls are not supported. // - // - // * For tasks that use the task IPC mode, IPC namespace related systemControls - // will apply to all containers within a task. + // * + // For tasks that use the task IPC mode, IPC namespace related systemControls will + // apply to all containers within a task. // // This parameter is not supported for // Windows containers or tasks using the Fargate launch type. @@ -2872,21 +2861,21 @@ type TaskDefinition struct { // following values, which determines your range of valid values for the cpu // parameter: // - // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: - // 256 (.25 vCPU) + // * 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu values: 256 + // (.25 vCPU) // - // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - - // Available cpu values: 512 (.5 vCPU) + // * 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu + // values: 512 (.5 vCPU) // - // * 2048 (2 GB), 3072 (3 GB), 4096 (4 - // GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: - // 1024 (1 vCPU) + // * 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), + // 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu values: 1024 (1 vCPU) // - // * Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 - // (1 GB) - Available cpu values: 2048 (2 vCPU) + // * + // Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available + // cpu values: 2048 (2 vCPU) // - // * Between 8192 (8 GB) and - // 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) + // * Between 8192 (8 GB) and 30720 (30 GB) in increments + // of 1024 (1 GB) - Available cpu values: 4096 (4 vCPU) Memory *string // The Docker networking mode to use for the containers in the task. The valid @@ -3111,20 +3100,19 @@ type TaskSet struct { // state. If the following conditions are met, the task set will be in // STEADY_STATE: // - // * The task runningCount is equal to the - // computedDesiredCount. + // * The task runningCount is equal to the computedDesiredCount. // - // * The pendingCount is 0. + // * + // The pendingCount is 0. // - // * There are no tasks - // running on container instances in the DRAINING status. + // * There are no tasks running on container instances in + // the DRAINING status. // - // * All tasks are - // reporting a healthy status from the load balancers, service discovery, and - // container health checks. + // * All tasks are reporting a healthy status from the load + // balancers, service discovery, and container health checks. // - // If any of those conditions are not met, the stability - // status returns STABILIZING. + // If any of those + // conditions are not met, the stability status returns STABILIZING. StabilityStatus StabilityStatus // The Unix timestamp for when the task set stability status was retrieved. @@ -3145,31 +3133,30 @@ type TaskSet struct { // them. Each tag consists of a key and an optional value, both of which you // define. The following basic restrictions apply to tags: // - // * Maximum number of + // * Maximum number of // tags per resource - 50 // - // * For each resource, each tag key must be unique, - // and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - 128 - // Unicode characters in UTF-8 + // * Maximum key length - 128 Unicode + // characters in UTF-8 // - // * Maximum value length - 256 Unicode characters - // in UTF-8 + // * Maximum value length - 256 Unicode characters in UTF-8 // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * + // If your tagging schema is used across multiple services and resources, remember + // that other services may have restrictions on allowed characters. Generally + // allowed characters are: letters, numbers, and spaces representable in UTF-8, and + // the following characters: + - = . _ : / @. // - // * - // Tag keys and values are case-sensitive. + // * Tag keys and values are + // case-sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for either keys or values as - // it is reserved for AWS use. You cannot edit or delete tag keys or values with - // this prefix. Tags with this prefix do not count against your tags per resource - // limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for either keys or values as it is reserved for AWS use. You + // cannot edit or delete tag keys or values with this prefix. Tags with this prefix + // do not count against your tags per resource limit. Tags []*Tag // The task definition the task set is using. diff --git a/service/efs/api_op_CreateFileSystem.go b/service/efs/api_op_CreateFileSystem.go index 6e2d8b80a57..6274c147a25 100644 --- a/service/efs/api_op_CreateFileSystem.go +++ b/service/efs/api_op_CreateFileSystem.go @@ -19,21 +19,21 @@ import ( // currently exist that is owned by the caller's AWS account with the specified // creation token, this operation does the following: // -// * Creates a new, empty -// file system. The file system will have an Amazon EFS assigned ID, and an initial +// * Creates a new, empty file +// system. The file system will have an Amazon EFS assigned ID, and an initial // lifecycle state creating. // -// * Returns with the description of the created -// file system. +// * Returns with the description of the created file +// system. // -// Otherwise, this operation returns a FileSystemAlreadyExists error -// with the ID of the existing file system. For basic use cases, you can use a -// randomly generated UUID for the creation token. The idempotent operation allows -// you to retry a CreateFileSystem call without risk of creating an extra file -// system. This can happen when an initial call fails in a way that leaves it -// uncertain whether or not a file system was actually created. An example might be -// that a transport level timeout occurred or your connection was reset. As long as -// you use the same creation token, if the initial call had succeeded in creating a +// Otherwise, this operation returns a FileSystemAlreadyExists error with +// the ID of the existing file system. For basic use cases, you can use a randomly +// generated UUID for the creation token. The idempotent operation allows you to +// retry a CreateFileSystem call without risk of creating an extra file system. +// This can happen when an initial call fails in a way that leaves it uncertain +// whether or not a file system was actually created. An example might be that a +// transport level timeout occurred or your connection was reset. As long as you +// use the same creation token, if the initial call had succeeded in creating a // file system, the client can learn of its existence from the // FileSystemAlreadyExists error. The CreateFileSystem call returns while the file // system's lifecycle state is still creating. You can check the file system @@ -89,18 +89,18 @@ type CreateFileSystemInput struct { // parameter is not specified, the default CMK for Amazon EFS is used. This ID can // be in one of the following formats: // - // * Key ID - A unique identifier of the - // key, for example 1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID - A unique identifier of the key, + // for example 1234abcd-12ab-34cd-56ef-1234567890ab. // - // * ARN - An Amazon - // Resource Name (ARN) for the key, for example + // * ARN - An Amazon Resource + // Name (ARN) for the key, for example // arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // - // * Key alias - A previously created display name for a key, for example + // * + // Key alias - A previously created display name for a key, for example // alias/projectKey1. // - // * Key alias ARN - An ARN for a key alias, for example + // * Key alias ARN - An ARN for a key alias, for example // arn:aws:kms:us-west-2:444455556666:alias/projectKey1. // // If KmsKeyId is specified, diff --git a/service/efs/api_op_CreateMountTarget.go b/service/efs/api_op_CreateMountTarget.go index a967d2a1f0d..4b72487bfd4 100644 --- a/service/efs/api_op_CreateMountTarget.go +++ b/service/efs/api_op_CreateMountTarget.go @@ -24,22 +24,22 @@ import ( // information, see DescribeFileSystems. In the request, you also provide a subnet // ID, which determines the following: // -// * VPC in which Amazon EFS creates the -// mount target -// -// * Availability Zone in which Amazon EFS creates the mount +// * VPC in which Amazon EFS creates the mount // target // -// * IP address range from which Amazon EFS selects the IP address of -// the mount target (if you don't specify an IP address in the request) +// * Availability Zone in which Amazon EFS creates the mount target +// +// * IP +// address range from which Amazon EFS selects the IP address of the mount target +// (if you don't specify an IP address in the request) // -// After -// creating the mount target, Amazon EFS returns a response that includes, a -// MountTargetId and an IpAddress. You use this IP address when mounting the file -// system in an EC2 instance. You can also use the mount target's DNS name when -// mounting the file system. The EC2 instance on which you mount the file system by -// using the mount target can resolve the mount target's DNS name to its IP -// address. For more information, see How it Works: Implementation Overview +// After creating the mount +// target, Amazon EFS returns a response that includes, a MountTargetId and an +// IpAddress. You use this IP address when mounting the file system in an EC2 +// instance. You can also use the mount target's DNS name when mounting the file +// system. The EC2 instance on which you mount the file system by using the mount +// target can resolve the mount target's DNS name to its IP address. For more +// information, see How it Works: Implementation Overview // (https://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation). // Note that you can create mount targets for a file system in only one VPC, and // there can be only one mount target per Availability Zone. That is, if the file @@ -47,71 +47,70 @@ import ( // specified in the request to add another mount target must meet the following // requirements: // -// * Must belong to the same VPC as the subnets of the existing +// * Must belong to the same VPC as the subnets of the existing // mount targets // -// * Must not be in the same Availability Zone as any of the -// subnets of the existing mount targets -// -// If the request satisfies the -// requirements, Amazon EFS does the following: -// -// * Creates a new mount target -// in the specified subnet. -// -// * Also creates a new network interface in the -// subnet as follows: -// -// * If the request provides an IpAddress, Amazon EFS -// assigns that IP address to the network interface. Otherwise, Amazon EFS assigns -// a free address in the subnet (in the same way that the Amazon EC2 -// CreateNetworkInterface call does when a request does not specify a primary -// private IP address). -// -// * If the request provides SecurityGroups, this -// network interface is associated with those security groups. Otherwise, it -// belongs to the default security group for the subnet's VPC. -// -// * Assigns -// the description Mount target fsmt-id for file system fs-id where fsmt-id is -// the mount target ID, and fs-id is the FileSystemId. -// -// * Sets the -// requesterManaged property of the network interface to true, and the requesterId -// value to EFS. -// -// Each Amazon EFS mount target has one corresponding -// requester-managed EC2 network interface. After the network interface is created, -// Amazon EFS sets the NetworkInterfaceId field in the mount target's description -// to the network interface ID, and the IpAddress field to its address. If network -// interface creation fails, the entire CreateMountTarget operation fails. -// -// The -// CreateMountTarget call returns only after creating the network interface, but -// while the mount target state is still creating, you can check the mount target -// creation status by calling the DescribeMountTargets operation, which among other -// things returns the mount target state. We recommend that you create a mount -// target in each of the Availability Zones. There are cost considerations for -// using a file system in an Availability Zone through a mount target created in -// another Availability Zone. For more information, see Amazon EFS -// (http://aws.amazon.com/efs/). In addition, by always using a mount target local -// to the instance's Availability Zone, you eliminate a partial failure scenario. -// If the Availability Zone in which your mount target is created goes down, then -// you can't access your file system through that mount target. This operation -// requires permissions for the following action on the file system: -// -// * +// * Must not be in the same Availability Zone as any of the subnets +// of the existing mount targets +// +// If the request satisfies the requirements, Amazon +// EFS does the following: +// +// * Creates a new mount target in the specified +// subnet. +// +// * Also creates a new network interface in the subnet as follows: +// +// * If +// the request provides an IpAddress, Amazon EFS assigns that IP address to the +// network interface. Otherwise, Amazon EFS assigns a free address in the subnet +// (in the same way that the Amazon EC2 CreateNetworkInterface call does when a +// request does not specify a primary private IP address). +// +// * If the request +// provides SecurityGroups, this network interface is associated with those +// security groups. Otherwise, it belongs to the default security group for the +// subnet's VPC. +// +// * Assigns the description Mount target fsmt-id for file system +// fs-id where fsmt-id is the mount target ID, and fs-id is the +// FileSystemId. +// +// * Sets the requesterManaged property of the network interface to +// true, and the requesterId value to EFS. +// +// Each Amazon EFS mount target has one +// corresponding requester-managed EC2 network interface. After the network +// interface is created, Amazon EFS sets the NetworkInterfaceId field in the mount +// target's description to the network interface ID, and the IpAddress field to its +// address. If network interface creation fails, the entire CreateMountTarget +// operation fails. +// +// The CreateMountTarget call returns only after creating the +// network interface, but while the mount target state is still creating, you can +// check the mount target creation status by calling the DescribeMountTargets +// operation, which among other things returns the mount target state. We recommend +// that you create a mount target in each of the Availability Zones. There are cost +// considerations for using a file system in an Availability Zone through a mount +// target created in another Availability Zone. For more information, see Amazon +// EFS (http://aws.amazon.com/efs/). In addition, by always using a mount target +// local to the instance's Availability Zone, you eliminate a partial failure +// scenario. If the Availability Zone in which your mount target is created goes +// down, then you can't access your file system through that mount target. This +// operation requires permissions for the following action on the file system: +// +// * // elasticfilesystem:CreateMountTarget // // This operation also requires permissions // for the following Amazon EC2 actions: // -// * ec2:DescribeSubnets +// * ec2:DescribeSubnets // -// * +// * // ec2:DescribeNetworkInterfaces // -// * ec2:CreateNetworkInterface +// * ec2:CreateNetworkInterface func (c *Client) CreateMountTarget(ctx context.Context, params *CreateMountTargetInput, optFns ...func(*Options)) (*CreateMountTargetOutput, error) { if params == nil { params = &CreateMountTargetInput{} diff --git a/service/efs/api_op_DeleteMountTarget.go b/service/efs/api_op_DeleteMountTarget.go index ca806543e7d..b611f1638a4 100644 --- a/service/efs/api_op_DeleteMountTarget.go +++ b/service/efs/api_op_DeleteMountTarget.go @@ -21,17 +21,16 @@ import ( // mount target. This operation requires permissions for the following action on // the file system: // -// * elasticfilesystem:DeleteMountTarget +// * elasticfilesystem:DeleteMountTarget // -// The -// DeleteMountTarget call returns while the mount target state is still deleting. -// You can check the mount target deletion by calling the DescribeMountTargets -// operation, which returns a list of mount target descriptions for the given file -// system. The operation also requires permissions for the following Amazon EC2 -// action on the mount target's network interface: +// The DeleteMountTarget +// call returns while the mount target state is still deleting. You can check the +// mount target deletion by calling the DescribeMountTargets operation, which +// returns a list of mount target descriptions for the given file system. The +// operation also requires permissions for the following Amazon EC2 action on the +// mount target's network interface: // -// * -// ec2:DeleteNetworkInterface +// * ec2:DeleteNetworkInterface func (c *Client) DeleteMountTarget(ctx context.Context, params *DeleteMountTargetInput, optFns ...func(*Options)) (*DeleteMountTargetOutput, error) { if params == nil { params = &DeleteMountTargetInput{} diff --git a/service/efs/api_op_DescribeMountTargetSecurityGroups.go b/service/efs/api_op_DescribeMountTargetSecurityGroups.go index e4eab3cebdc..bcda4369412 100644 --- a/service/efs/api_op_DescribeMountTargetSecurityGroups.go +++ b/service/efs/api_op_DescribeMountTargetSecurityGroups.go @@ -15,11 +15,11 @@ import ( // created and the lifecycle state of the mount target is not deleted. This // operation requires permissions for the following actions: // -// * +// * // elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount target's // file system. // -// * ec2:DescribeNetworkInterfaceAttribute action on the mount +// * ec2:DescribeNetworkInterfaceAttribute action on the mount // target's network interface. func (c *Client) DescribeMountTargetSecurityGroups(ctx context.Context, params *DescribeMountTargetSecurityGroupsInput, optFns ...func(*Options)) (*DescribeMountTargetSecurityGroupsOutput, error) { if params == nil { diff --git a/service/efs/api_op_ModifyMountTargetSecurityGroups.go b/service/efs/api_op_ModifyMountTargetSecurityGroups.go index b14a68d4594..e5f38023026 100644 --- a/service/efs/api_op_ModifyMountTargetSecurityGroups.go +++ b/service/efs/api_op_ModifyMountTargetSecurityGroups.go @@ -19,12 +19,11 @@ import ( // mount target is not deleted. The operation requires permissions for the // following actions: // -// * elasticfilesystem:ModifyMountTargetSecurityGroups -// action on the mount target's file system. +// * elasticfilesystem:ModifyMountTargetSecurityGroups action +// on the mount target's file system. // -// * -// ec2:ModifyNetworkInterfaceAttribute action on the mount target's network -// interface. +// * ec2:ModifyNetworkInterfaceAttribute action +// on the mount target's network interface. func (c *Client) ModifyMountTargetSecurityGroups(ctx context.Context, params *ModifyMountTargetSecurityGroupsInput, optFns ...func(*Options)) (*ModifyMountTargetSecurityGroupsOutput, error) { if params == nil { params = &ModifyMountTargetSecurityGroupsInput{} diff --git a/service/efs/api_op_PutLifecycleConfiguration.go b/service/efs/api_op_PutLifecycleConfiguration.go index 0f60322ef3f..3fc1c073bfa 100644 --- a/service/efs/api_op_PutLifecycleConfiguration.go +++ b/service/efs/api_op_PutLifecycleConfiguration.go @@ -23,19 +23,18 @@ import ( // LifecycleConfiguration and disables lifecycle management. In the request, // specify the following: // -// * The ID for the file system for which you are -// enabling, disabling, or modifying lifecycle management. +// * The ID for the file system for which you are enabling, +// disabling, or modifying lifecycle management. // -// * A -// LifecyclePolicies array of LifecyclePolicy objects that define when files are -// moved to the IA storage class. The array can contain only one LifecyclePolicy -// item. +// * A LifecyclePolicies array of +// LifecyclePolicy objects that define when files are moved to the IA storage +// class. The array can contain only one LifecyclePolicy item. // -// This operation requires permissions for the -// elasticfilesystem:PutLifecycleConfiguration operation. To apply a -// LifecycleConfiguration object to an encrypted file system, you need the same AWS -// Key Management Service (AWS KMS) permissions as when you created the encrypted -// file system. +// This operation +// requires permissions for the elasticfilesystem:PutLifecycleConfiguration +// operation. To apply a LifecycleConfiguration object to an encrypted file system, +// you need the same AWS Key Management Service (AWS KMS) permissions as when you +// created the encrypted file system. func (c *Client) PutLifecycleConfiguration(ctx context.Context, params *PutLifecycleConfigurationInput, optFns ...func(*Options)) (*PutLifecycleConfigurationOutput, error) { if params == nil { params = &PutLifecycleConfigurationInput{} diff --git a/service/efs/types/enums.go b/service/efs/types/enums.go index 87cace44d16..d5a74fad40f 100644 --- a/service/efs/types/enums.go +++ b/service/efs/types/enums.go @@ -30,8 +30,8 @@ type PerformanceMode string // Enum values for PerformanceMode const ( - PerformanceModeGeneral_purpose PerformanceMode = "generalPurpose" - PerformanceModeMax_io PerformanceMode = "maxIO" + PerformanceModeGeneralPurpose PerformanceMode = "generalPurpose" + PerformanceModeMaxIo PerformanceMode = "maxIO" ) // Values returns all known values for PerformanceMode. Note that this can be @@ -88,11 +88,11 @@ type TransitionToIARules string // Enum values for TransitionToIARules const ( - TransitionToIARulesAfter_7_days TransitionToIARules = "AFTER_7_DAYS" - TransitionToIARulesAfter_14_days TransitionToIARules = "AFTER_14_DAYS" - TransitionToIARulesAfter_30_days TransitionToIARules = "AFTER_30_DAYS" - TransitionToIARulesAfter_60_days TransitionToIARules = "AFTER_60_DAYS" - TransitionToIARulesAfter_90_days TransitionToIARules = "AFTER_90_DAYS" + TransitionToIARulesAfter7Days TransitionToIARules = "AFTER_7_DAYS" + TransitionToIARulesAfter14Days TransitionToIARules = "AFTER_14_DAYS" + TransitionToIARulesAfter30Days TransitionToIARules = "AFTER_30_DAYS" + TransitionToIARulesAfter60Days TransitionToIARules = "AFTER_60_DAYS" + TransitionToIARulesAfter90Days TransitionToIARules = "AFTER_90_DAYS" ) // Values returns all known values for TransitionToIARules. Note that this can be diff --git a/service/efs/types/types.go b/service/efs/types/types.go index d16dcb0f4d7..f45060589de 100644 --- a/service/efs/types/types.go +++ b/service/efs/types/types.go @@ -49,16 +49,16 @@ type BackupPolicy struct { // Describes the status of the file system's backup policy. // - // * ENABLED - EFS is + // * ENABLED - EFS is // automatically backing up the file system. // - // * ENABLING - EFS is turning on + // * ENABLING - EFS is turning on // automatic backups for the file system. // - // * DISABLED - automatic back ups are + // * DISABLED - automatic back ups are // turned off for the file system. // - // * DISABLED - EFS is turning off automatic + // * DISABLED - EFS is turning off automatic // backups for the file system. // // This member is required. diff --git a/service/eks/types/enums.go b/service/eks/types/enums.go index f3d587cdfd0..0dff3f19cd2 100644 --- a/service/eks/types/enums.go +++ b/service/eks/types/enums.go @@ -6,9 +6,9 @@ type AMITypes string // Enum values for AMITypes const ( - AMITypesAl2_x86_64 AMITypes = "AL2_x86_64" - AMITypesAl2_x86_64_gpu AMITypes = "AL2_x86_64_GPU" - AMITypesAl2_arm_64 AMITypes = "AL2_ARM_64" + AMITypesAl2X8664 AMITypes = "AL2_x86_64" + AMITypesAl2X8664Gpu AMITypes = "AL2_x86_64_GPU" + AMITypesAl2Arm64 AMITypes = "AL2_ARM_64" ) // Values returns all known values for AMITypes. Note that this can be expanded in @@ -50,18 +50,18 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeSubnet_not_found ErrorCode = "SubnetNotFound" - ErrorCodeSecurity_group_not_found ErrorCode = "SecurityGroupNotFound" - ErrorCodeEni_limit_reached ErrorCode = "EniLimitReached" - ErrorCodeIp_not_available ErrorCode = "IpNotAvailable" - ErrorCodeAccess_denied ErrorCode = "AccessDenied" - ErrorCodeOperation_not_permitted ErrorCode = "OperationNotPermitted" - ErrorCodeVpc_id_not_found ErrorCode = "VpcIdNotFound" - ErrorCodeUnknown ErrorCode = "Unknown" - ErrorCodeNode_creation_failure ErrorCode = "NodeCreationFailure" - ErrorCodePod_eviction_failure ErrorCode = "PodEvictionFailure" - ErrorCodeInsufficient_free_addresses ErrorCode = "InsufficientFreeAddresses" - ErrorCodeCluster_unreachable ErrorCode = "ClusterUnreachable" + ErrorCodeSubnetNotFound ErrorCode = "SubnetNotFound" + ErrorCodeSecurityGroupNotFound ErrorCode = "SecurityGroupNotFound" + ErrorCodeEniLimitReached ErrorCode = "EniLimitReached" + ErrorCodeIpNotAvailable ErrorCode = "IpNotAvailable" + ErrorCodeAccessDenied ErrorCode = "AccessDenied" + ErrorCodeOperationNotPermitted ErrorCode = "OperationNotPermitted" + ErrorCodeVpcIdNotFound ErrorCode = "VpcIdNotFound" + ErrorCodeUnknown ErrorCode = "Unknown" + ErrorCodeNodeCreationFailure ErrorCode = "NodeCreationFailure" + ErrorCodePodEvictionFailure ErrorCode = "PodEvictionFailure" + ErrorCodeInsufficientFreeAddresses ErrorCode = "InsufficientFreeAddresses" + ErrorCodeClusterUnreachable ErrorCode = "ClusterUnreachable" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -88,11 +88,11 @@ type FargateProfileStatus string // Enum values for FargateProfileStatus const ( - FargateProfileStatusCreating FargateProfileStatus = "CREATING" - FargateProfileStatusActive FargateProfileStatus = "ACTIVE" - FargateProfileStatusDeleting FargateProfileStatus = "DELETING" - FargateProfileStatusCreate_failed FargateProfileStatus = "CREATE_FAILED" - FargateProfileStatusDelete_failed FargateProfileStatus = "DELETE_FAILED" + FargateProfileStatusCreating FargateProfileStatus = "CREATING" + FargateProfileStatusActive FargateProfileStatus = "ACTIVE" + FargateProfileStatusDeleting FargateProfileStatus = "DELETING" + FargateProfileStatusCreateFailed FargateProfileStatus = "CREATE_FAILED" + FargateProfileStatusDeleteFailed FargateProfileStatus = "DELETE_FAILED" ) // Values returns all known values for FargateProfileStatus. Note that this can be @@ -112,11 +112,11 @@ type LogType string // Enum values for LogType const ( - LogTypeApi LogType = "api" - LogTypeAudit LogType = "audit" - LogTypeAuthenticator LogType = "authenticator" - LogTypeController_manager LogType = "controllerManager" - LogTypeScheduler LogType = "scheduler" + LogTypeApi LogType = "api" + LogTypeAudit LogType = "audit" + LogTypeAuthenticator LogType = "authenticator" + LogTypeControllerManager LogType = "controllerManager" + LogTypeScheduler LogType = "scheduler" ) // Values returns all known values for LogType. Note that this can be expanded in @@ -136,24 +136,24 @@ type NodegroupIssueCode string // Enum values for NodegroupIssueCode const ( - NodegroupIssueCodeAuto_scaling_group_not_found NodegroupIssueCode = "AutoScalingGroupNotFound" - NodegroupIssueCodeAuto_scaling_group_invalid_configuration NodegroupIssueCode = "AutoScalingGroupInvalidConfiguration" - NodegroupIssueCodeEc2_security_group_not_found NodegroupIssueCode = "Ec2SecurityGroupNotFound" - NodegroupIssueCodeEc2_security_group_deletion_failure NodegroupIssueCode = "Ec2SecurityGroupDeletionFailure" - NodegroupIssueCodeEc2_launch_template_not_found NodegroupIssueCode = "Ec2LaunchTemplateNotFound" - NodegroupIssueCodeEc2_launch_template_version_mismatch NodegroupIssueCode = "Ec2LaunchTemplateVersionMismatch" - NodegroupIssueCodeEc2_subnet_not_found NodegroupIssueCode = "Ec2SubnetNotFound" - NodegroupIssueCodeEc2_subnet_invalid_configuration NodegroupIssueCode = "Ec2SubnetInvalidConfiguration" - NodegroupIssueCodeIam_instance_profile_not_found NodegroupIssueCode = "IamInstanceProfileNotFound" - NodegroupIssueCodeIam_limit_exceeded NodegroupIssueCode = "IamLimitExceeded" - NodegroupIssueCodeIam_node_role_not_found NodegroupIssueCode = "IamNodeRoleNotFound" - NodegroupIssueCodeNode_creation_failure NodegroupIssueCode = "NodeCreationFailure" - NodegroupIssueCodeAsg_instance_launch_failures NodegroupIssueCode = "AsgInstanceLaunchFailures" - NodegroupIssueCodeInstance_limit_exceeded NodegroupIssueCode = "InstanceLimitExceeded" - NodegroupIssueCodeInsufficient_free_addresses NodegroupIssueCode = "InsufficientFreeAddresses" - NodegroupIssueCodeAccess_denied NodegroupIssueCode = "AccessDenied" - NodegroupIssueCodeInternal_failure NodegroupIssueCode = "InternalFailure" - NodegroupIssueCodeCluster_unreachable NodegroupIssueCode = "ClusterUnreachable" + NodegroupIssueCodeAutoScalingGroupNotFound NodegroupIssueCode = "AutoScalingGroupNotFound" + NodegroupIssueCodeAutoScalingGroupInvalidConfiguration NodegroupIssueCode = "AutoScalingGroupInvalidConfiguration" + NodegroupIssueCodeEc2SecurityGroupNotFound NodegroupIssueCode = "Ec2SecurityGroupNotFound" + NodegroupIssueCodeEc2SecurityGroupDeletionFailure NodegroupIssueCode = "Ec2SecurityGroupDeletionFailure" + NodegroupIssueCodeEc2LaunchTemplateNotFound NodegroupIssueCode = "Ec2LaunchTemplateNotFound" + NodegroupIssueCodeEc2LaunchTemplateVersionMismatch NodegroupIssueCode = "Ec2LaunchTemplateVersionMismatch" + NodegroupIssueCodeEc2SubnetNotFound NodegroupIssueCode = "Ec2SubnetNotFound" + NodegroupIssueCodeEc2SubnetInvalidConfiguration NodegroupIssueCode = "Ec2SubnetInvalidConfiguration" + NodegroupIssueCodeIamInstanceProfileNotFound NodegroupIssueCode = "IamInstanceProfileNotFound" + NodegroupIssueCodeIamLimitExceeded NodegroupIssueCode = "IamLimitExceeded" + NodegroupIssueCodeIamNodeRoleNotFound NodegroupIssueCode = "IamNodeRoleNotFound" + NodegroupIssueCodeNodeCreationFailure NodegroupIssueCode = "NodeCreationFailure" + NodegroupIssueCodeAsgInstanceLaunchFailures NodegroupIssueCode = "AsgInstanceLaunchFailures" + NodegroupIssueCodeInstanceLimitExceeded NodegroupIssueCode = "InstanceLimitExceeded" + NodegroupIssueCodeInsufficientFreeAddresses NodegroupIssueCode = "InsufficientFreeAddresses" + NodegroupIssueCodeAccessDenied NodegroupIssueCode = "AccessDenied" + NodegroupIssueCodeInternalFailure NodegroupIssueCode = "InternalFailure" + NodegroupIssueCodeClusterUnreachable NodegroupIssueCode = "ClusterUnreachable" ) // Values returns all known values for NodegroupIssueCode. Note that this can be @@ -186,13 +186,13 @@ type NodegroupStatus string // Enum values for NodegroupStatus const ( - NodegroupStatusCreating NodegroupStatus = "CREATING" - NodegroupStatusActive NodegroupStatus = "ACTIVE" - NodegroupStatusUpdating NodegroupStatus = "UPDATING" - NodegroupStatusDeleting NodegroupStatus = "DELETING" - NodegroupStatusCreate_failed NodegroupStatus = "CREATE_FAILED" - NodegroupStatusDelete_failed NodegroupStatus = "DELETE_FAILED" - NodegroupStatusDegraded NodegroupStatus = "DEGRADED" + NodegroupStatusCreating NodegroupStatus = "CREATING" + NodegroupStatusActive NodegroupStatus = "ACTIVE" + NodegroupStatusUpdating NodegroupStatus = "UPDATING" + NodegroupStatusDeleting NodegroupStatus = "DELETING" + NodegroupStatusCreateFailed NodegroupStatus = "CREATE_FAILED" + NodegroupStatusDeleteFailed NodegroupStatus = "DELETE_FAILED" + NodegroupStatusDegraded NodegroupStatus = "DEGRADED" ) // Values returns all known values for NodegroupStatus. Note that this can be @@ -214,18 +214,18 @@ type UpdateParamType string // Enum values for UpdateParamType const ( - UpdateParamTypeVersion UpdateParamType = "Version" - UpdateParamTypePlatform_version UpdateParamType = "PlatformVersion" - UpdateParamTypeEndpoint_private_access UpdateParamType = "EndpointPrivateAccess" - UpdateParamTypeEndpoint_public_access UpdateParamType = "EndpointPublicAccess" - UpdateParamTypeCluster_logging UpdateParamType = "ClusterLogging" - UpdateParamTypeDesired_size UpdateParamType = "DesiredSize" - UpdateParamTypeLabels_to_add UpdateParamType = "LabelsToAdd" - UpdateParamTypeLabels_to_remove UpdateParamType = "LabelsToRemove" - UpdateParamTypeMax_size UpdateParamType = "MaxSize" - UpdateParamTypeMin_size UpdateParamType = "MinSize" - UpdateParamTypeRelease_version UpdateParamType = "ReleaseVersion" - UpdateParamTypePublic_access_cidrs UpdateParamType = "PublicAccessCidrs" + UpdateParamTypeVersion UpdateParamType = "Version" + UpdateParamTypePlatformVersion UpdateParamType = "PlatformVersion" + UpdateParamTypeEndpointPrivateAccess UpdateParamType = "EndpointPrivateAccess" + UpdateParamTypeEndpointPublicAccess UpdateParamType = "EndpointPublicAccess" + UpdateParamTypeClusterLogging UpdateParamType = "ClusterLogging" + UpdateParamTypeDesiredSize UpdateParamType = "DesiredSize" + UpdateParamTypeLabelsToAdd UpdateParamType = "LabelsToAdd" + UpdateParamTypeLabelsToRemove UpdateParamType = "LabelsToRemove" + UpdateParamTypeMaxSize UpdateParamType = "MaxSize" + UpdateParamTypeMinSize UpdateParamType = "MinSize" + UpdateParamTypeReleaseVersion UpdateParamType = "ReleaseVersion" + UpdateParamTypePublicAccessCidrs UpdateParamType = "PublicAccessCidrs" ) // Values returns all known values for UpdateParamType. Note that this can be @@ -252,10 +252,10 @@ type UpdateStatus string // Enum values for UpdateStatus const ( - UpdateStatusIn_progress UpdateStatus = "InProgress" - UpdateStatusFailed UpdateStatus = "Failed" - UpdateStatusCancelled UpdateStatus = "Cancelled" - UpdateStatusSuccessful UpdateStatus = "Successful" + UpdateStatusInProgress UpdateStatus = "InProgress" + UpdateStatusFailed UpdateStatus = "Failed" + UpdateStatusCancelled UpdateStatus = "Cancelled" + UpdateStatusSuccessful UpdateStatus = "Successful" ) // Values returns all known values for UpdateStatus. Note that this can be expanded @@ -274,10 +274,10 @@ type UpdateType string // Enum values for UpdateType const ( - UpdateTypeVersion_update UpdateType = "VersionUpdate" - UpdateTypeEndpoint_access_update UpdateType = "EndpointAccessUpdate" - UpdateTypeLogging_update UpdateType = "LoggingUpdate" - UpdateTypeConfig_update UpdateType = "ConfigUpdate" + UpdateTypeVersionUpdate UpdateType = "VersionUpdate" + UpdateTypeEndpointAccessUpdate UpdateType = "EndpointAccessUpdate" + UpdateTypeLoggingUpdate UpdateType = "LoggingUpdate" + UpdateTypeConfigUpdate UpdateType = "ConfigUpdate" ) // Values returns all known values for UpdateType. Note that this can be expanded diff --git a/service/eks/types/types.go b/service/eks/types/types.go index 0ec5eac803a..0e0786a7f07 100644 --- a/service/eks/types/types.go +++ b/service/eks/types/types.go @@ -105,28 +105,27 @@ type ErrorDetail struct { // A brief description of the error. // - // * SubnetNotFound: We couldn't find one of - // the subnets associated with the cluster. + // * SubnetNotFound: We couldn't find one of the + // subnets associated with the cluster. // - // * SecurityGroupNotFound: We - // couldn't find one of the security groups associated with the cluster. + // * SecurityGroupNotFound: We couldn't find + // one of the security groups associated with the cluster. // - // * - // EniLimitReached: You have reached the elastic network interface limit for your - // account. + // * EniLimitReached: You + // have reached the elastic network interface limit for your account. // - // * IpNotAvailable: A subnet associated with the cluster doesn't - // have any free IP addresses. + // * + // IpNotAvailable: A subnet associated with the cluster doesn't have any free IP + // addresses. // - // * AccessDenied: You don't have permissions to - // perform the specified operation. + // * AccessDenied: You don't have permissions to perform the specified + // operation. // - // * OperationNotPermitted: The service role - // associated with the cluster doesn't have the required access permissions for - // Amazon EKS. + // * OperationNotPermitted: The service role associated with the + // cluster doesn't have the required access permissions for Amazon EKS. // - // * VpcIdNotFound: We couldn't find the VPC associated with the - // cluster. + // * + // VpcIdNotFound: We couldn't find the VPC associated with the cluster. ErrorCode ErrorCode // A more complete description of the error. @@ -199,70 +198,69 @@ type Issue struct { // A brief description of the error. // - // * AutoScalingGroupNotFound: We couldn't - // find the Auto Scaling group associated with the managed node group. You may be - // able to recreate an Auto Scaling group with the same settings to recover. + // * AutoScalingGroupNotFound: We couldn't find + // the Auto Scaling group associated with the managed node group. You may be able + // to recreate an Auto Scaling group with the same settings to recover. // - // * + // * // Ec2SecurityGroupNotFound: We couldn't find the cluster security group for the // cluster. You must recreate your cluster. // - // * Ec2SecurityGroupDeletionFailure: - // We could not delete the remote access security group for your managed node - // group. Remove any dependencies from the security group. + // * Ec2SecurityGroupDeletionFailure: We + // could not delete the remote access security group for your managed node group. + // Remove any dependencies from the security group. // - // * - // Ec2LaunchTemplateNotFound: We couldn't find the Amazon EC2 launch template for - // your managed node group. You may be able to recreate a launch template with the - // same settings to recover. + // * Ec2LaunchTemplateNotFound: + // We couldn't find the Amazon EC2 launch template for your managed node group. You + // may be able to recreate a launch template with the same settings to recover. // - // * Ec2LaunchTemplateVersionMismatch: The Amazon - // EC2 launch template version for your managed node group does not match the - // version that Amazon EKS created. You may be able to revert to the version that - // Amazon EKS created to recover. + // * + // Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for + // your managed node group does not match the version that Amazon EKS created. You + // may be able to revert to the version that Amazon EKS created to recover. // - // * Ec2SubnetInvalidConfiguration: One or more - // Amazon EC2 subnets specified for a node group do not automatically assign public - // IP addresses to instances launched into it. If you want your instances to be - // assigned a public IP address, then you need to enable the auto-assign public IP - // address setting for the subnet. See Modifying the public IPv4 addressing - // attribute for your subnet + // * + // Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified for a + // node group do not automatically assign public IP addresses to instances launched + // into it. If you want your instances to be assigned a public IP address, then you + // need to enable the auto-assign public IP address setting for the subnet. See + // Modifying the public IPv4 addressing attribute for your subnet // (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip) // in the Amazon VPC User Guide. // - // * IamInstanceProfileNotFound: We couldn't - // find the IAM instance profile for your managed node group. You may be able to + // * IamInstanceProfileNotFound: We couldn't find + // the IAM instance profile for your managed node group. You may be able to // recreate an instance profile with the same settings to recover. // - // * + // * // IamNodeRoleNotFound: We couldn't find the IAM role for your managed node group. // You may be able to recreate an IAM role with the same settings to recover. // - // - // * AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures + // * + // AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures // while attempting to launch instances. // - // * NodeCreationFailure: Your launched + // * NodeCreationFailure: Your launched // instances are unable to register with your Amazon EKS cluster. Common causes of // this failure are insufficient worker node IAM role // (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html) // permissions or lack of outbound internet access for the nodes. // - // * + // * // InstanceLimitExceeded: Your AWS account is unable to launch any more instances // of the specified instance type. You may be able to request an Amazon EC2 // instance limit increase to recover. // - // * InsufficientFreeAddresses: One or - // more of the subnets associated with your managed node group does not have enough + // * InsufficientFreeAddresses: One or more of + // the subnets associated with your managed node group does not have enough // available IP addresses for new nodes. // - // * AccessDenied: Amazon EKS or one or - // more of your managed nodes is unable to communicate with your cluster API - // server. + // * AccessDenied: Amazon EKS or one or more + // of your managed nodes is unable to communicate with your cluster API server. // - // * InternalFailure: These errors are usually caused by an Amazon EKS - // server-side issue. + // * + // InternalFailure: These errors are usually caused by an Amazon EKS server-side + // issue. Code NodegroupIssueCode // The error message associated with the issue. @@ -281,17 +279,17 @@ type KubernetesNetworkConfigRequest struct { // overlap with resources in other networks that are peered or connected to your // VPC. The block must meet the following requirements: // - // * Within one of the + // * Within one of the // following private IP address blocks: 10.0.0.0/8, 172.16.0.0.0/12, or // 192.168.0.0/16. // - // * Doesn't overlap with any CIDR block assigned to the VPC - // that you selected for VPC. + // * Doesn't overlap with any CIDR block assigned to the VPC that + // you selected for VPC. // - // * Between /24 and /12. + // * Between /24 and /12. // - // You can only specify a - // custom CIDR block when you create a cluster and can't change this value once the + // You can only specify a custom + // CIDR block when you create a cluster and can't change this value once the // cluster is created. ServiceIpv4Cidr *string } diff --git a/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go b/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go index 5662324bd86..57cc0fb5669 100644 --- a/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go +++ b/service/elasticache/api_op_AuthorizeCacheSecurityGroupIngress.go @@ -56,12 +56,12 @@ type AuthorizeCacheSecurityGroupIngressOutput struct { // Represents the output of one of the following operations: // - // * + // * // AuthorizeCacheSecurityGroupIngress // - // * CreateCacheSecurityGroup + // * CreateCacheSecurityGroup // - // * + // * // RevokeCacheSecurityGroupIngress CacheSecurityGroup *types.CacheSecurityGroup diff --git a/service/elasticache/api_op_CopySnapshot.go b/service/elasticache/api_op_CopySnapshot.go index f839acf87a1..e0029d33b06 100644 --- a/service/elasticache/api_op_CopySnapshot.go +++ b/service/elasticache/api_op_CopySnapshot.go @@ -24,51 +24,51 @@ import ( // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/IAM.html). You // could receive the following error messages. Error Messages // -// * Error Message: -// The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket -// in the same region as your snapshot. For more information, see Step 1: Create an +// * Error Message: The +// S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in +// the same region as your snapshot. For more information, see Step 1: Create an // Amazon S3 Bucket // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket) // in the ElastiCache User Guide. // -// * Error Message: The S3 bucket %s does not +// * Error Message: The S3 bucket %s does not // exist. Solution: Create an Amazon S3 bucket in the same region as your snapshot. // For more information, see Step 1: Create an Amazon S3 Bucket // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket) // in the ElastiCache User Guide. // -// * Error Message: The S3 bucket %s is not -// owned by the authenticated user. Solution: Create an Amazon S3 bucket in the -// same region as your snapshot. For more information, see Step 1: Create an Amazon -// S3 Bucket +// * Error Message: The S3 bucket %s is not owned +// by the authenticated user. Solution: Create an Amazon S3 bucket in the same +// region as your snapshot. For more information, see Step 1: Create an Amazon S3 +// Bucket // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-create-s3-bucket) // in the ElastiCache User Guide. // -// * Error Message: The authenticated user does -// not have sufficient permissions to perform the desired activity. Solution: -// Contact your system administrator to get the needed permissions. +// * Error Message: The authenticated user does not +// have sufficient permissions to perform the desired activity. Solution: Contact +// your system administrator to get the needed permissions. // -// * Error -// Message: The S3 bucket %s already contains an object with key %s. Solution: Give -// the TargetSnapshotName a new and unique value. If exporting a snapshot, you -// could alternatively create a new Amazon S3 bucket and use this same value for +// * Error Message: The +// S3 bucket %s already contains an object with key %s. Solution: Give the +// TargetSnapshotName a new and unique value. If exporting a snapshot, you could +// alternatively create a new Amazon S3 bucket and use this same value for // TargetSnapshotName. // -// * Error Message: ElastiCache has not been granted READ +// * Error Message: ElastiCache has not been granted READ // permissions %s on the S3 Bucket. Solution: Add List and Read permissions on the // bucket. For more information, see Step 2: Grant ElastiCache Access to Your // Amazon S3 Bucket // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the ElastiCache User Guide. // -// * Error Message: ElastiCache has not been +// * Error Message: ElastiCache has not been // granted WRITE permissions %s on the S3 Bucket. Solution: Add Upload/Delete // permissions on the bucket. For more information, see Step 2: Grant ElastiCache // Access to Your Amazon S3 Bucket // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) // in the ElastiCache User Guide. // -// * Error Message: ElastiCache has not been +// * Error Message: ElastiCache has not been // granted READ_ACP permissions %s on the S3 Bucket. Solution: Add View Permissions // on the bucket. For more information, see Step 2: Grant ElastiCache Access to // Your Amazon S3 Bucket diff --git a/service/elasticache/api_op_CreateCacheCluster.go b/service/elasticache/api_op_CreateCacheCluster.go index 44427f36094..8f6043a73df 100644 --- a/service/elasticache/api_op_CreateCacheCluster.go +++ b/service/elasticache/api_op_CreateCacheCluster.go @@ -35,13 +35,13 @@ type CreateCacheClusterInput struct { // The node group (shard) identifier. This parameter is stored as a lowercase // string. Constraints: // - // * A name must contain from 1 to 50 alphanumeric - // characters or hyphens. + // * A name must contain from 1 to 50 alphanumeric characters + // or hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * A - // name cannot end with a hyphen or contain two consecutive hyphens. + // * A name cannot end with a + // hyphen or contain two consecutive hyphens. // // This member is required. CacheClusterId *string @@ -56,14 +56,14 @@ type CreateCacheClusterInput struct { // Reserved parameter. The password used to access a password protected server. // Password constraints: // - // * Must be only printable ASCII characters. + // * Must be only printable ASCII characters. // - // * - // Must be at least 16 characters and no more than 128 characters in length. + // * Must be at + // least 16 characters and no more than 128 characters in length. // - // * - // The only permitted printable special characters are !, &, #, $, ^, <, >, and -. - // Other printable special characters cannot be used in the AUTH token. + // * The only + // permitted printable special characters are !, &, #, $, ^, <, >, and -. Other + // printable special characters cannot be used in the AUTH token. // // For more // information, see AUTH password (http://redis.io/commands/AUTH) at @@ -78,56 +78,54 @@ type CreateCacheClusterInput struct { // current generation types provide more memory and computational power at lower // cost when compared to their equivalent previous generation counterparts. // - // * + // * // General purpose: // - // * Current generation: M5 node types: cache.m5.large, + // * Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: // cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, // cache.t2.small, cache.t2.medium // - // * Previous generation: (not - // recommended) T1 node types: cache.t1.micro M1 node types: cache.m1.small, - // cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 + // node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + // cache.m3.xlarge, cache.m3.2xlarge // - // * Compute optimized: + // * Compute optimized: // + // * Previous generation: + // (not recommended) C1 node types: cache.c1.xlarge // - // * Previous generation: (not recommended) C1 node types: cache.c1.xlarge + // * Memory optimized: // - // * - // Memory optimized: + // * Current + // generation: R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, + // cache.r4.8xlarge, cache.r4.16xlarge // - // * Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // - // * Previous generation: (not recommended) M2 node types: cache.m2.xlarge, - // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - // cache.r3.xlarge, cache.r3.2xlarge, - // - // cache.r3.4xlarge, + // cache.r3.4xlarge, // cache.r3.8xlarge // // Additional node type info // - // * All current generation - // instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only - // files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with - // automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration - // variables appendonly and appendfsync are not supported on Redis version 2.8.22 - // and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The name of the parameter group to associate with this cluster. If this argument @@ -207,20 +205,20 @@ type CreateCacheClusterInput struct { // ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 // minute period. Valid values for ddd are: // - // * sun + // * sun // - // * mon + // * mon // - // * tue + // * tue // - // * - // wed + // * wed // - // * thu + // * thu // - // * fri + // * + // fri // - // * sat + // * sat // // Example: sun:23:00-mon:01:30 PreferredMaintenanceWindow *string diff --git a/service/elasticache/api_op_CreateCacheParameterGroup.go b/service/elasticache/api_op_CreateCacheParameterGroup.go index 7d537545535..a2ba4032590 100644 --- a/service/elasticache/api_op_CreateCacheParameterGroup.go +++ b/service/elasticache/api_op_CreateCacheParameterGroup.go @@ -19,11 +19,11 @@ import ( // the newly created CacheParameterGroup you can change the values of specific // parameters. For more information, see: // -// * ModifyCacheParameterGroup +// * ModifyCacheParameterGroup // (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheParameterGroup.html) // in the ElastiCache API Reference. // -// * Parameters and Parameter Groups +// * Parameters and Parameter Groups // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ParameterGroups.html) // in the ElastiCache User Guide. func (c *Client) CreateCacheParameterGroup(ctx context.Context, params *CreateCacheParameterGroupInput, optFns ...func(*Options)) (*CreateCacheParameterGroupOutput, error) { diff --git a/service/elasticache/api_op_CreateCacheSecurityGroup.go b/service/elasticache/api_op_CreateCacheSecurityGroup.go index ba6371f5829..c3c5e1afd6f 100644 --- a/service/elasticache/api_op_CreateCacheSecurityGroup.go +++ b/service/elasticache/api_op_CreateCacheSecurityGroup.go @@ -52,12 +52,12 @@ type CreateCacheSecurityGroupOutput struct { // Represents the output of one of the following operations: // - // * + // * // AuthorizeCacheSecurityGroupIngress // - // * CreateCacheSecurityGroup + // * CreateCacheSecurityGroup // - // * + // * // RevokeCacheSecurityGroupIngress CacheSecurityGroup *types.CacheSecurityGroup diff --git a/service/elasticache/api_op_CreateCacheSubnetGroup.go b/service/elasticache/api_op_CreateCacheSubnetGroup.go index ed8779b593c..73152311f99 100644 --- a/service/elasticache/api_op_CreateCacheSubnetGroup.go +++ b/service/elasticache/api_op_CreateCacheSubnetGroup.go @@ -53,10 +53,10 @@ type CreateCacheSubnetGroupOutput struct { // Represents the output of one of the following operations: // - // * + // * // CreateCacheSubnetGroup // - // * ModifyCacheSubnetGroup + // * ModifyCacheSubnetGroup CacheSubnetGroup *types.CacheSubnetGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_CreateGlobalReplicationGroup.go b/service/elasticache/api_op_CreateGlobalReplicationGroup.go index 2fc1364eab6..d787d085a19 100644 --- a/service/elasticache/api_op_CreateGlobalReplicationGroup.go +++ b/service/elasticache/api_op_CreateGlobalReplicationGroup.go @@ -17,10 +17,10 @@ import ( // low-latency reads and disaster recovery across regions. For more information, // see Replication Across Regions Using Global Datastore. // -// * The +// * The // GlobalReplicationGroupIdSuffix is the name of the Global Datastore. // -// * The +// * The // PrimaryReplicationGroupId represents the name of the primary cluster that // accepts writes and will replicate updates to the secondary cluster. func (c *Client) CreateGlobalReplicationGroup(ctx context.Context, params *CreateGlobalReplicationGroupInput, optFns ...func(*Options)) (*CreateGlobalReplicationGroupOutput, error) { @@ -70,8 +70,8 @@ type CreateGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_CreateReplicationGroup.go b/service/elasticache/api_op_CreateReplicationGroup.go index 0e24e044a7c..cb6d5ce28f6 100644 --- a/service/elasticache/api_op_CreateReplicationGroup.go +++ b/service/elasticache/api_op_CreateReplicationGroup.go @@ -55,13 +55,13 @@ type CreateReplicationGroupInput struct { // The replication group identifier. This parameter is stored as a lowercase // string. Constraints: // - // * A name must contain from 1 to 40 alphanumeric - // characters or hyphens. + // * A name must contain from 1 to 40 alphanumeric characters + // or hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * A - // name cannot end with a hyphen or contain two consecutive hyphens. + // * A name cannot end with a + // hyphen or contain two consecutive hyphens. // // This member is required. ReplicationGroupId *string @@ -80,18 +80,17 @@ type CreateReplicationGroupInput struct { // TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. Password // constraints: // - // * Must be only printable ASCII characters. + // * Must be only printable ASCII characters. // - // * Must be at - // least 16 characters and no more than 128 characters in length. + // * Must be at least 16 + // characters and no more than 128 characters in length. // - // * The only - // permitted printable special characters are !, &, #, $, ^, <, >, and -. Other - // printable special characters cannot be used in the AUTH token. + // * The only permitted + // printable special characters are !, &, #, $, ^, <, >, and -. Other printable + // special characters cannot be used in the AUTH token. // - // For more - // information, see AUTH password (http://redis.io/commands/AUTH) at - // http://redis.io/commands/AUTH. + // For more information, see + // AUTH password (http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH. AuthToken *string // This parameter is currently disabled. @@ -107,56 +106,54 @@ type CreateReplicationGroupInput struct { // current generation types provide more memory and computational power at lower // cost when compared to their equivalent previous generation counterparts. // - // * + // * // General purpose: // - // * Current generation: M5 node types: cache.m5.large, + // * Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: // cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, // cache.t2.small, cache.t2.medium // - // * Previous generation: (not - // recommended) T1 node types: cache.t1.micro M1 node types: cache.m1.small, - // cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 + // node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + // cache.m3.xlarge, cache.m3.2xlarge // - // * Compute optimized: + // * Compute optimized: // + // * Previous generation: + // (not recommended) C1 node types: cache.c1.xlarge // - // * Previous generation: (not recommended) C1 node types: cache.c1.xlarge + // * Memory optimized: // - // * - // Memory optimized: + // * Current + // generation: R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, + // cache.r4.8xlarge, cache.r4.16xlarge // - // * Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // - // * Previous generation: (not recommended) M2 node types: cache.m2.xlarge, - // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - // cache.r3.xlarge, cache.r3.2xlarge, - // - // cache.r3.4xlarge, + // cache.r3.4xlarge, // cache.r3.8xlarge // // Additional node type info // - // * All current generation - // instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only - // files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with - // automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration - // variables appendonly and appendfsync are not supported on Redis version 2.8.22 - // and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The name of the parameter group to associate with this replication group. If @@ -167,11 +164,11 @@ type CreateReplicationGroupInput struct { // or later, only one node group (shard), and want to use a default parameter // group, we recommend that you specify the parameter group by name. // - // * To - // create a Redis (cluster mode disabled) replication group, use + // * To create a + // Redis (cluster mode disabled) replication group, use // CacheParameterGroupName=default.redis3.2. // - // * To create a Redis (cluster mode + // * To create a Redis (cluster mode // enabled) replication group, use // CacheParameterGroupName=default.redis3.2.cluster.on. CacheParameterGroupName *string @@ -265,20 +262,20 @@ type CreateReplicationGroupInput struct { // ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 // minute period. Valid values for ddd are: // - // * sun + // * sun // - // * mon + // * mon // - // * tue + // * tue // - // * - // wed + // * wed // - // * thu + // * thu // - // * fri + // * + // fri // - // * sat + // * sat // // Example: sun:23:00-mon:01:30 PreferredMaintenanceWindow *string diff --git a/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go b/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go index ef9122e4b54..cd449912401 100644 --- a/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go +++ b/service/elasticache/api_op_DecreaseNodeGroupsInGlobalReplicationGroup.go @@ -68,8 +68,8 @@ type DecreaseNodeGroupsInGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_DecreaseReplicaCount.go b/service/elasticache/api_op_DecreaseReplicaCount.go index 1bce178764c..4b4662a97ff 100644 --- a/service/elasticache/api_op_DecreaseReplicaCount.go +++ b/service/elasticache/api_op_DecreaseReplicaCount.go @@ -50,16 +50,16 @@ type DecreaseReplicaCountInput struct { // replication group's node groups. The minimum number of replicas in a shard or // replication group is: // - // * Redis (cluster mode disabled) + // * Redis (cluster mode disabled) // - // * If - // Multi-AZ is enabled: 1 + // * If Multi-AZ is + // enabled: 1 // - // * If Multi-AZ is not enabled: 0 + // * If Multi-AZ is not enabled: 0 // - // * Redis - // (cluster mode enabled): 0 (though you will not be able to failover to a replica - // if your primary node fails) + // * Redis (cluster mode enabled): 0 + // (though you will not be able to failover to a replica if your primary node + // fails) NewReplicaCount *int32 // A list of ConfigureShard objects that can be used to configure each shard in a diff --git a/service/elasticache/api_op_DeleteCacheCluster.go b/service/elasticache/api_op_DeleteCacheCluster.go index 698a97daa89..4dc0bc0da00 100644 --- a/service/elasticache/api_op_DeleteCacheCluster.go +++ b/service/elasticache/api_op_DeleteCacheCluster.go @@ -17,18 +17,18 @@ import ( // deleting the cluster; you cannot cancel or revert this operation. This operation // is not valid for: // -// * Redis (cluster mode enabled) clusters +// * Redis (cluster mode enabled) clusters // -// * A cluster -// that is the last read replica of a replication group +// * A cluster that is +// the last read replica of a replication group // -// * A node group (shard) -// that has Multi-AZ mode enabled +// * A node group (shard) that has +// Multi-AZ mode enabled // -// * A cluster from a Redis (cluster mode -// enabled) replication group +// * A cluster from a Redis (cluster mode enabled) +// replication group // -// * A cluster that is not in the available state +// * A cluster that is not in the available state func (c *Client) DeleteCacheCluster(ctx context.Context, params *DeleteCacheClusterInput, optFns ...func(*Options)) (*DeleteCacheClusterOutput, error) { if params == nil { params = &DeleteCacheClusterInput{} diff --git a/service/elasticache/api_op_DeleteGlobalReplicationGroup.go b/service/elasticache/api_op_DeleteGlobalReplicationGroup.go index 98993ed703b..d8d17ce8662 100644 --- a/service/elasticache/api_op_DeleteGlobalReplicationGroup.go +++ b/service/elasticache/api_op_DeleteGlobalReplicationGroup.go @@ -13,11 +13,11 @@ import ( // Deleting a Global Datastore is a two-step process: // -// * First, you must +// * First, you must // DisassociateGlobalReplicationGroup to remove the secondary clusters in the // Global Datastore. // -// * Once the Global Datastore contains only the primary +// * Once the Global Datastore contains only the primary // cluster, you can use DeleteGlobalReplicationGroup API to delete the Global // Datastore while retainining the primary cluster using Retain…= true. // @@ -62,8 +62,8 @@ type DeleteGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_DescribeCacheEngineVersions.go b/service/elasticache/api_op_DescribeCacheEngineVersions.go index 750ca326fc2..c3817291fbf 100644 --- a/service/elasticache/api_op_DescribeCacheEngineVersions.go +++ b/service/elasticache/api_op_DescribeCacheEngineVersions.go @@ -34,13 +34,13 @@ type DescribeCacheEngineVersionsInput struct { // values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 | // redis4.0 | redis5.0 | Constraints: // - // * Must be 1 to 255 alphanumeric + // * Must be 1 to 255 alphanumeric // characters // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end with a - // hyphen or contain two consecutive hyphens + // * Cannot end with a hyphen or + // contain two consecutive hyphens CacheParameterGroupFamily *string // If true, specifies that only the default version of the specified engine or diff --git a/service/elasticache/api_op_DescribeReservedCacheNodes.go b/service/elasticache/api_op_DescribeReservedCacheNodes.go index de5373928eb..105bcd4fa7a 100644 --- a/service/elasticache/api_op_DescribeReservedCacheNodes.go +++ b/service/elasticache/api_op_DescribeReservedCacheNodes.go @@ -37,55 +37,54 @@ type DescribeReservedCacheNodesInput struct { // provide more memory and computational power at lower cost when compared to their // equivalent previous generation counterparts. // - // * General purpose: + // * General purpose: // - // * - // Current generation: M5 node types: cache.m5.large, cache.m5.xlarge, - // cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node - // types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // * Current + // generation: M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, // cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium // - // * - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 node - // types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node - // types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * Previous + // generation: (not recommended) T1 node types: cache.t1.micro M1 node types: + // cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * - // Compute optimized: + // * Compute + // optimized: // - // * Previous generation: (not recommended) C1 node - // types: cache.c1.xlarge + // * Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge // - // * Memory optimized: + // * Memory optimized: // - // * Current generation: - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge + // * Current generation: R5 node types: + // cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, + // cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge // - // * Previous generation: (not - // recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 node types: + // cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // - // * All current - // generation instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis - // append-only files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis - // Multi-AZ with automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis - // configuration variables appendonly and appendfsync are not supported on Redis - // version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The duration filter value, specified in years or seconds. Use this parameter to diff --git a/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go b/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go index de79490fa7d..a39d3ada01b 100644 --- a/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go +++ b/service/elasticache/api_op_DescribeReservedCacheNodesOfferings.go @@ -36,55 +36,54 @@ type DescribeReservedCacheNodesOfferingsInput struct { // provide more memory and computational power at lower cost when compared to their // equivalent previous generation counterparts. // - // * General purpose: + // * General purpose: // - // * - // Current generation: M5 node types: cache.m5.large, cache.m5.xlarge, - // cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node - // types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // * Current + // generation: M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, // cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium // - // * - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 node - // types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node - // types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * Previous + // generation: (not recommended) T1 node types: cache.t1.micro M1 node types: + // cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * - // Compute optimized: + // * Compute + // optimized: // - // * Previous generation: (not recommended) C1 node - // types: cache.c1.xlarge + // * Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge // - // * Memory optimized: + // * Memory optimized: // - // * Current generation: - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge + // * Current generation: R5 node types: + // cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, + // cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge // - // * Previous generation: (not - // recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 node types: + // cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // - // * All current - // generation instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis - // append-only files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis - // Multi-AZ with automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis - // configuration variables appendonly and appendfsync are not supported on Redis - // version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // Duration filter value, specified in years or seconds. Use this parameter to show diff --git a/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go b/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go index d2bb9d22afa..ca50e1a3e06 100644 --- a/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go +++ b/service/elasticache/api_op_DisassociateGlobalReplicationGroup.go @@ -54,8 +54,8 @@ type DisassociateGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_FailoverGlobalReplicationGroup.go b/service/elasticache/api_op_FailoverGlobalReplicationGroup.go index 23e7e2897e9..e49ed32901c 100644 --- a/service/elasticache/api_op_FailoverGlobalReplicationGroup.go +++ b/service/elasticache/api_op_FailoverGlobalReplicationGroup.go @@ -54,8 +54,8 @@ type FailoverGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go b/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go index 4a78784d8e5..93947644847 100644 --- a/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go +++ b/service/elasticache/api_op_IncreaseNodeGroupsInGlobalReplicationGroup.go @@ -57,8 +57,8 @@ type IncreaseNodeGroupsInGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_ModifyCacheCluster.go b/service/elasticache/api_op_ModifyCacheCluster.go index 17678a779ba..bd24a2ac883 100644 --- a/service/elasticache/api_op_ModifyCacheCluster.go +++ b/service/elasticache/api_op_ModifyCacheCluster.go @@ -59,27 +59,27 @@ type ModifyCacheClusterInput struct { // This parameter must be specified with the auth-token-update parameter. Password // constraints: // - // * Must be only printable ASCII characters + // * Must be only printable ASCII characters // - // * Must be at - // least 16 characters and no more than 128 characters in length + // * Must be at least 16 + // characters and no more than 128 characters in length // - // * Cannot - // contain any of the following characters: '/', '"', or '@', '%' + // * Cannot contain any of + // the following characters: '/', '"', or '@', '%' // - // For more - // information, see AUTH password at AUTH (http://redis.io/commands/AUTH). + // For more information, see AUTH + // password at AUTH (http://redis.io/commands/AUTH). AuthToken *string // Specifies the strategy to use to update the AUTH token. This parameter must be // specified with the auth-token parameter. Possible values: // - // * Rotate + // * Rotate // - // * - // Set + // * Set // - // For more information, see Authenticating Users with Redis AUTH + // For + // more information, see Authenticating Users with Redis AUTH // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) AuthTokenUpdateStrategy types.AuthTokenUpdateStrategyType @@ -127,16 +127,16 @@ type ModifyCacheClusterInput struct { // in this list must match the cache nodes being added in this request. This option // is only supported on Memcached clusters. Scenarios: // - // * Scenario 1: You have - // 3 active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and + // * Scenario 1: You have 3 + // active nodes and wish to add 2 nodes. Specify NumCacheNodes=5 (3 + 2) and // optionally specify two Availability Zones for the two new nodes. // - // * Scenario - // 2: You have 3 active nodes and 2 nodes pending creation (from the scenario 1 - // call) and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and + // * Scenario 2: + // You have 3 active nodes and 2 nodes pending creation (from the scenario 1 call) + // and want to add 1 more node. Specify NumCacheNodes=6 ((3 + 2) + 1) and // optionally specify an Availability Zone for the new node. // - // * Scenario 3: You + // * Scenario 3: You // want to cancel all pending operations. Specify NumCacheNodes=3 to cancel all // pending operations. // @@ -151,46 +151,45 @@ type ModifyCacheClusterInput struct { // (https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/CacheNodes.SupportedTypes.html). // Impact of new add/remove requests upon pending requests // - // * Scenario-1 + // * Scenario-1 // + // * Pending + // Action: Delete // - // * Pending Action: Delete + // * New Request: Delete // - // * New Request: Delete + // * Result: The new delete, pending or + // immediate, replaces the pending delete. // - // * Result: The - // new delete, pending or immediate, replaces the pending delete. + // * Scenario-2 // - // * - // Scenario-2 + // * Pending Action: + // Delete // - // * Pending Action: Delete - // - // * New Request: Create - // - // - // * Result: The new create, pending or immediate, replaces the pending delete. + // * New Request: Create // + // * Result: The new create, pending or immediate, + // replaces the pending delete. // // * Scenario-3 // - // * Pending Action: Create - // - // * New Request: Delete + // * Pending Action: Create // + // * New + // Request: Delete // - // * Result: The new delete, pending or immediate, replaces the pending create. - // + // * Result: The new delete, pending or immediate, replaces the + // pending create. // // * Scenario-4 // - // * Pending Action: Create - // - // * New Request: Create + // * Pending Action: Create // + // * New Request: + // Create // - // * Result: The new create is added to the pending create. Important: If the new - // create request is Apply Immediately - Yes, all creates are performed + // * Result: The new create is added to the pending create. Important: If + // the new create request is Apply Immediately - Yes, all creates are performed // immediately. If the new create request is Apply Immediately - No, all creates // are pending. NewAvailabilityZones []*string @@ -235,22 +234,22 @@ type ModifyCacheClusterInput struct { // Clock UTC). The minimum maintenance window is a 60 minute period. Valid values // for ddd are: // - // * sun - // - // * mon + // * sun // - // * tue + // * mon // - // * wed + // * tue // - // * thu + // * wed // - // * fri + // * thu // + // * fri // // * sat // - // Example: sun:23:00-mon:01:30 + // Example: + // sun:23:00-mon:01:30 PreferredMaintenanceWindow *string // Specifies the VPC Security Groups associated with the cluster. This parameter diff --git a/service/elasticache/api_op_ModifyCacheParameterGroup.go b/service/elasticache/api_op_ModifyCacheParameterGroup.go index 75ad0e2362f..b20ced164b3 100644 --- a/service/elasticache/api_op_ModifyCacheParameterGroup.go +++ b/service/elasticache/api_op_ModifyCacheParameterGroup.go @@ -47,10 +47,10 @@ type ModifyCacheParameterGroupInput struct { // Represents the output of one of the following operations: // -// * +// * // ModifyCacheParameterGroup // -// * ResetCacheParameterGroup +// * ResetCacheParameterGroup type ModifyCacheParameterGroupOutput struct { // The name of the cache parameter group. diff --git a/service/elasticache/api_op_ModifyCacheSubnetGroup.go b/service/elasticache/api_op_ModifyCacheSubnetGroup.go index 1495d14ea50..ec4eed072d4 100644 --- a/service/elasticache/api_op_ModifyCacheSubnetGroup.go +++ b/service/elasticache/api_op_ModifyCacheSubnetGroup.go @@ -48,10 +48,10 @@ type ModifyCacheSubnetGroupOutput struct { // Represents the output of one of the following operations: // - // * + // * // CreateCacheSubnetGroup // - // * ModifyCacheSubnetGroup + // * ModifyCacheSubnetGroup CacheSubnetGroup *types.CacheSubnetGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_ModifyGlobalReplicationGroup.go b/service/elasticache/api_op_ModifyGlobalReplicationGroup.go index 59e2fee8e99..eaa7b279e7a 100644 --- a/service/elasticache/api_op_ModifyGlobalReplicationGroup.go +++ b/service/elasticache/api_op_ModifyGlobalReplicationGroup.go @@ -64,8 +64,8 @@ type ModifyGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_ModifyReplicationGroup.go b/service/elasticache/api_op_ModifyReplicationGroup.go index 17e8866a64d..1ca98cb667e 100644 --- a/service/elasticache/api_op_ModifyReplicationGroup.go +++ b/service/elasticache/api_op_ModifyReplicationGroup.go @@ -13,12 +13,12 @@ import ( // Modifies the settings for a replication group. // -// * Scaling for Amazon -// ElastiCache for Redis (cluster mode enabled) +// * Scaling for Amazon ElastiCache +// for Redis (cluster mode enabled) // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/scaling-redis-cluster-mode-enabled.html) // in the ElastiCache User Guide // -// * ModifyReplicationGroupShardConfiguration +// * ModifyReplicationGroupShardConfiguration // (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyReplicationGroupShardConfiguration.html) // in the ElastiCache API Reference // @@ -58,27 +58,27 @@ type ModifyReplicationGroupInput struct { // This parameter must be specified with the auth-token-update-strategy parameter. // Password constraints: // - // * Must be only printable ASCII characters + // * Must be only printable ASCII characters // - // * Must - // be at least 16 characters and no more than 128 characters in length + // * Must be at + // least 16 characters and no more than 128 characters in length // - // * - // Cannot contain any of the following characters: '/', '"', or '@', '%' + // * Cannot contain + // any of the following characters: '/', '"', or '@', '%' // - // For more - // information, see AUTH password at AUTH (http://redis.io/commands/AUTH). + // For more information, + // see AUTH password at AUTH (http://redis.io/commands/AUTH). AuthToken *string // Specifies the strategy to use to update the AUTH token. This parameter must be // specified with the auth-token parameter. Possible values: // - // * Rotate + // * Rotate // - // * - // Set + // * Set // - // For more information, see Authenticating Users with Redis AUTH + // For + // more information, see Authenticating Users with Redis AUTH // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) AuthTokenUpdateStrategy types.AuthTokenUpdateStrategyType @@ -137,22 +137,22 @@ type ModifyReplicationGroupInput struct { // Clock UTC). The minimum maintenance window is a 60 minute period. Valid values // for ddd are: // - // * sun + // * sun // - // * mon + // * mon // - // * tue + // * tue // - // * wed + // * wed // - // * thu - // - // * fri + // * thu // + // * fri // // * sat // - // Example: sun:23:00-mon:01:30 + // Example: + // sun:23:00-mon:01:30 PreferredMaintenanceWindow *string // For replication groups with a single primary, if this parameter is specified, diff --git a/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go b/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go index 57e3750e83a..ad43e72cc49 100644 --- a/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go +++ b/service/elasticache/api_op_RebalanceSlotsInGlobalReplicationGroup.go @@ -48,8 +48,8 @@ type RebalanceSlotsInGlobalReplicationGroupOutput struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // - // * The GlobalReplicationGroupIdSuffix represents the name - // of the Global Datastore, which is what you use to associate a secondary cluster. + // * The GlobalReplicationGroupIdSuffix represents the name of + // the Global Datastore, which is what you use to associate a secondary cluster. GlobalReplicationGroup *types.GlobalReplicationGroup // Metadata pertaining to the operation's result. diff --git a/service/elasticache/api_op_ResetCacheParameterGroup.go b/service/elasticache/api_op_ResetCacheParameterGroup.go index b0a9288e658..6c84ef2229e 100644 --- a/service/elasticache/api_op_ResetCacheParameterGroup.go +++ b/service/elasticache/api_op_ResetCacheParameterGroup.go @@ -52,10 +52,10 @@ type ResetCacheParameterGroupInput struct { // Represents the output of one of the following operations: // -// * +// * // ModifyCacheParameterGroup // -// * ResetCacheParameterGroup +// * ResetCacheParameterGroup type ResetCacheParameterGroupOutput struct { // The name of the cache parameter group. diff --git a/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go b/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go index 7bfe5594604..1d82e7dc301 100644 --- a/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go +++ b/service/elasticache/api_op_RevokeCacheSecurityGroupIngress.go @@ -53,12 +53,12 @@ type RevokeCacheSecurityGroupIngressOutput struct { // Represents the output of one of the following operations: // - // * + // * // AuthorizeCacheSecurityGroupIngress // - // * CreateCacheSecurityGroup + // * CreateCacheSecurityGroup // - // * + // * // RevokeCacheSecurityGroupIngress CacheSecurityGroup *types.CacheSecurityGroup diff --git a/service/elasticache/api_op_TestFailover.go b/service/elasticache/api_op_TestFailover.go index 681507b67d2..f6bdfea4fd5 100644 --- a/service/elasticache/api_op_TestFailover.go +++ b/service/elasticache/api_op_TestFailover.go @@ -15,47 +15,46 @@ import ( // on a specified node group (called shard in the console) in a replication group // (called cluster in the console). Note the following // -// * A customer can use -// this operation to test automatic failover on up to 5 shards (called node groups -// in the ElastiCache API and AWS CLI) in any rolling 24-hour period. +// * A customer can use this +// operation to test automatic failover on up to 5 shards (called node groups in +// the ElastiCache API and AWS CLI) in any rolling 24-hour period. // -// * If -// calling this operation on shards in different clusters (called replication -// groups in the API and CLI), the calls can be made concurrently. +// * If calling +// this operation on shards in different clusters (called replication groups in the +// API and CLI), the calls can be made concurrently. // -// * If -// calling this operation multiple times on different shards in the same Redis -// (cluster mode enabled) replication group, the first node replacement must -// complete before a subsequent call can be made. +// * If calling this operation +// multiple times on different shards in the same Redis (cluster mode enabled) +// replication group, the first node replacement must complete before a subsequent +// call can be made. // -// * To determine whether the -// node replacement is complete you can check Events using the Amazon ElastiCache -// console, the AWS CLI, or the ElastiCache API. Look for the following automatic -// failover related events, listed here in order of occurrance: +// * To determine whether the node replacement is complete you +// can check Events using the Amazon ElastiCache console, the AWS CLI, or the +// ElastiCache API. Look for the following automatic failover related events, +// listed here in order of occurrance: // -// * -// Replication group message: Test Failover API called for node group +// * Replication group message: Test Failover +// API called for node group // -// * -// Cache cluster message: Failover from master node to replica node completed +// * Cache cluster message: Failover from master node to +// replica node completed // +// * Replication group message: Failover from master node +// to replica node completed // -// * Replication group message: Failover from master node to replica node -// completed +// * Cache cluster message: Recovering cache nodes // -// * Cache cluster message: Recovering cache nodes -// -// * +// * // Cache cluster message: Finished recovery for cache nodes // -// For more -// information see: +// For more information +// see: // -// * Viewing ElastiCache Events +// * Viewing ElastiCache Events // (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html) // in the ElastiCache User Guide // -// * DescribeEvents +// * DescribeEvents // (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html) // in the ElastiCache API Reference // diff --git a/service/elasticache/types/enums.go b/service/elasticache/types/enums.go index 328f1f0e08f..871d20c36a0 100644 --- a/service/elasticache/types/enums.go +++ b/service/elasticache/types/enums.go @@ -6,8 +6,8 @@ type AuthenticationType string // Enum values for AuthenticationType const ( - AuthenticationTypePassword AuthenticationType = "password" - AuthenticationTypeNo_password AuthenticationType = "no-password" + AuthenticationTypePassword AuthenticationType = "password" + AuthenticationTypeNoPassword AuthenticationType = "no-password" ) // Values returns all known values for AuthenticationType. Note that this can be @@ -84,8 +84,8 @@ type AZMode string // Enum values for AZMode const ( - AZModeSingle_az AZMode = "single-az" - AZModeCross_az AZMode = "cross-az" + AZModeSingleAz AZMode = "single-az" + AZModeCrossAz AZMode = "cross-az" ) // Values returns all known values for AZMode. Note that this can be expanded in @@ -156,12 +156,12 @@ type NodeUpdateStatus string // Enum values for NodeUpdateStatus const ( - NodeUpdateStatusNot_applied NodeUpdateStatus = "not-applied" - NodeUpdateStatusWaiting_to_start NodeUpdateStatus = "waiting-to-start" - NodeUpdateStatusIn_progress NodeUpdateStatus = "in-progress" - NodeUpdateStatusStopping NodeUpdateStatus = "stopping" - NodeUpdateStatusStopped NodeUpdateStatus = "stopped" - NodeUpdateStatusComplete NodeUpdateStatus = "complete" + NodeUpdateStatusNotApplied NodeUpdateStatus = "not-applied" + NodeUpdateStatusWaitingToStart NodeUpdateStatus = "waiting-to-start" + NodeUpdateStatusInProgress NodeUpdateStatus = "in-progress" + NodeUpdateStatusStopping NodeUpdateStatus = "stopping" + NodeUpdateStatusStopped NodeUpdateStatus = "stopped" + NodeUpdateStatusComplete NodeUpdateStatus = "complete" ) // Values returns all known values for NodeUpdateStatus. Note that this can be @@ -182,8 +182,8 @@ type OutpostMode string // Enum values for OutpostMode const ( - OutpostModeSingle_outpost OutpostMode = "single-outpost" - OutpostModeCross_outpost OutpostMode = "cross-outpost" + OutpostModeSingleOutpost OutpostMode = "single-outpost" + OutpostModeCrossOutpost OutpostMode = "cross-outpost" ) // Values returns all known values for OutpostMode. Note that this can be expanded @@ -261,7 +261,7 @@ type ServiceUpdateType string // Enum values for ServiceUpdateType const ( - ServiceUpdateTypeSecurity_update ServiceUpdateType = "security-update" + ServiceUpdateTypeSecurityUpdate ServiceUpdateType = "security-update" ) // Values returns all known values for ServiceUpdateType. Note that this can be @@ -325,15 +325,15 @@ type UpdateActionStatus string // Enum values for UpdateActionStatus const ( - UpdateActionStatusNot_applied UpdateActionStatus = "not-applied" - UpdateActionStatusWaiting_to_start UpdateActionStatus = "waiting-to-start" - UpdateActionStatusIn_progress UpdateActionStatus = "in-progress" - UpdateActionStatusStopping UpdateActionStatus = "stopping" - UpdateActionStatusStopped UpdateActionStatus = "stopped" - UpdateActionStatusComplete UpdateActionStatus = "complete" - UpdateActionStatusScheduling UpdateActionStatus = "scheduling" - UpdateActionStatusScheduled UpdateActionStatus = "scheduled" - UpdateActionStatusNot_applicable UpdateActionStatus = "not-applicable" + UpdateActionStatusNotApplied UpdateActionStatus = "not-applied" + UpdateActionStatusWaitingToStart UpdateActionStatus = "waiting-to-start" + UpdateActionStatusInProgress UpdateActionStatus = "in-progress" + UpdateActionStatusStopping UpdateActionStatus = "stopping" + UpdateActionStatusStopped UpdateActionStatus = "stopped" + UpdateActionStatusComplete UpdateActionStatus = "complete" + UpdateActionStatusScheduling UpdateActionStatus = "scheduling" + UpdateActionStatusScheduled UpdateActionStatus = "scheduled" + UpdateActionStatusNotApplicable UpdateActionStatus = "not-applicable" ) // Values returns all known values for UpdateActionStatus. Note that this can be diff --git a/service/elasticache/types/errors.go b/service/elasticache/types/errors.go index ba84c0df4fa..cff7e26a69a 100644 --- a/service/elasticache/types/errors.go +++ b/service/elasticache/types/errors.go @@ -1037,11 +1037,11 @@ func (e *SnapshotAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smi // You attempted one of the following operations: // -// * Creating a snapshot of a -// Redis cluster running on a cache.t1.micro cache node. +// * Creating a snapshot of a Redis +// cluster running on a cache.t1.micro cache node. // -// * Creating a snapshot -// of a cluster that is running Memcached rather than Redis. +// * Creating a snapshot of a +// cluster that is running Memcached rather than Redis. // // Neither of these are // supported by ElastiCache. diff --git a/service/elasticache/types/types.go b/service/elasticache/types/types.go index 5aa3f74f757..0da0505d1be 100644 --- a/service/elasticache/types/types.go +++ b/service/elasticache/types/types.go @@ -63,56 +63,54 @@ type CacheCluster struct { // current generation types provide more memory and computational power at lower // cost when compared to their equivalent previous generation counterparts. // - // * + // * // General purpose: // - // * Current generation: M5 node types: cache.m5.large, + // * Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: // cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, // cache.t2.small, cache.t2.medium // - // * Previous generation: (not - // recommended) T1 node types: cache.t1.micro M1 node types: cache.m1.small, - // cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * Previous generation: (not recommended) T1 + // node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + // cache.m3.xlarge, cache.m3.2xlarge // - // * Compute optimized: + // * Compute optimized: // + // * Previous generation: + // (not recommended) C1 node types: cache.c1.xlarge // - // * Previous generation: (not recommended) C1 node types: cache.c1.xlarge - // - // * - // Memory optimized: - // - // * Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // * Memory optimized: // + // * Current + // generation: R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, + // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended) M2 node types: cache.m2.xlarge, - // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - // cache.r3.xlarge, cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, + // cache.r3.4xlarge, // cache.r3.8xlarge // // Additional node type info // - // * All current generation - // instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only - // files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with - // automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration - // variables appendonly and appendfsync are not supported on Redis version 2.8.22 - // and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // A list of cache nodes that are members of the cluster. @@ -166,22 +164,22 @@ type CacheCluster struct { // Clock UTC). The minimum maintenance window is a 60 minute period. Valid values // for ddd are: // - // * sun - // - // * mon + // * sun // - // * tue + // * mon // - // * wed + // * tue // - // * thu + // * wed // - // * fri + // * thu // + // * fri // // * sat // - // Example: sun:23:00-mon:01:30 + // Example: + // sun:23:00-mon:01:30 PreferredMaintenanceWindow *string // The outpost ARN in which the cache cluster is created. @@ -240,55 +238,54 @@ type CacheEngineVersion struct { // computational power at lower cost when compared to their equivalent previous // generation counterparts. // -// * General purpose: +// * General purpose: // -// * Current generation: -// M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, -// cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: -// cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, -// cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium -// T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium +// * Current generation: M5 node +// types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, +// cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, +// cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node +// types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: +// cache.t2.micro, cache.t2.small, cache.t2.medium // -// * -// Previous generation: (not recommended) T1 node types: cache.t1.micro M1 node -// types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node -// types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// * Previous generation: (not +// recommended) T1 node types: cache.t1.micro M1 node types: cache.m1.small, +// cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, +// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // -// * -// Compute optimized: +// * Compute optimized: // -// * Previous generation: (not recommended) C1 node -// types: cache.c1.xlarge +// * +// Previous generation: (not recommended) C1 node types: cache.c1.xlarge // -// * Memory optimized: +// * Memory +// optimized: // -// * Current generation: -// R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, -// cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: -// cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, -// cache.r4.8xlarge, cache.r4.16xlarge +// * Current generation: R5 node types: cache.r5.large, +// cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, +// cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, +// cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge // -// * Previous generation: (not -// recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge -// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, +// * +// Previous generation: (not recommended) M2 node types: cache.m2.xlarge, +// cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, +// cache.r3.xlarge, cache.r3.2xlarge, // -// -// cache.r3.4xlarge, cache.r3.8xlarge +// cache.r3.4xlarge, +// cache.r3.8xlarge // // Additional node type info // -// * All current -// generation instance types are created in Amazon VPC by default. +// * All current generation instance +// types are created in Amazon VPC by default. // -// * Redis -// append-only files (AOF) are not supported for T1 or T2 instances. +// * Redis append-only files (AOF) are +// not supported for T1 or T2 instances. // -// * Redis -// Multi-AZ with automatic failover is not supported on T1 instances. +// * Redis Multi-AZ with automatic failover +// is not supported on T1 instances. // -// * Redis -// configuration variables appendonly and appendfsync are not supported on Redis -// version 2.8.22 and later. +// * Redis configuration variables appendonly +// and appendfsync are not supported on Redis version 2.8.22 and later. type CacheNode struct { // The date and time when the cache node was created. @@ -434,12 +431,12 @@ type CacheParameterGroupStatus struct { // Represents the output of one of the following operations: // -// * +// * // AuthorizeCacheSecurityGroupIngress // -// * CreateCacheSecurityGroup +// * CreateCacheSecurityGroup // -// * +// * // RevokeCacheSecurityGroupIngress type CacheSecurityGroup struct { @@ -474,10 +471,10 @@ type CacheSecurityGroupMembership struct { // Represents the output of one of the following operations: // -// * +// * // CreateCacheSubnetGroup // -// * ModifyCacheSubnetGroup +// * ModifyCacheSubnetGroup type CacheSubnetGroup struct { // The ARN (Amazon Resource Name) of the cache subnet group. @@ -506,16 +503,15 @@ type ConfigureShard struct { // type of Redis replication group you are working with. The minimum number of // replicas in a shard or replication group is: // - // * Redis (cluster mode - // disabled) + // * Redis (cluster mode disabled) // - // * If Multi-AZ: 1 + // * + // If Multi-AZ: 1 // - // * If Multi-AZ: 0 + // * If Multi-AZ: 0 // - // * Redis - // (cluster mode enabled): 0 (though you will not be able to failover to a replica - // if your primary node fails) + // * Redis (cluster mode enabled): 0 (though you + // will not be able to failover to a replica if your primary node fails) // // This member is required. NewReplicaCount *int32 @@ -642,8 +638,8 @@ type GlobalNodeGroup struct { // only reads. The primary cluster automatically replicates updates to the // secondary cluster. // -// * The GlobalReplicationGroupIdSuffix represents the name -// of the Global Datastore, which is what you use to associate a secondary cluster. +// * The GlobalReplicationGroupIdSuffix represents the name of +// the Global Datastore, which is what you use to associate a secondary cluster. type GlobalReplicationGroup struct { // The ARN (Amazon Resource Name) of the global replication group. @@ -1147,55 +1143,54 @@ type ReservedCacheNode struct { // provide more memory and computational power at lower cost when compared to their // equivalent previous generation counterparts. // - // * General purpose: + // * General purpose: // - // * - // Current generation: M5 node types: cache.m5.large, cache.m5.xlarge, - // cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node - // types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // * Current + // generation: M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, // cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium // - // * - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 node - // types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node - // types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // * - // Compute optimized: + // * Previous + // generation: (not recommended) T1 node types: cache.t1.micro M1 node types: + // cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * Previous generation: (not recommended) C1 node - // types: cache.c1.xlarge + // * Compute + // optimized: // - // * Memory optimized: + // * Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge // - // * Current generation: - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge + // * Memory optimized: // - // * Previous generation: (not - // recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // * Current generation: R5 node types: + // cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, + // cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge // + // * Previous generation: (not recommended) M2 node types: + // cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // - // * All current - // generation instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis - // append-only files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis - // Multi-AZ with automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis - // configuration variables appendonly and appendfsync are not supported on Redis - // version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The duration of the reservation in seconds. @@ -1241,55 +1236,54 @@ type ReservedCacheNodesOffering struct { // provide more memory and computational power at lower cost when compared to their // equivalent previous generation counterparts. // - // * General purpose: + // * General purpose: // - // * - // Current generation: M5 node types: cache.m5.large, cache.m5.xlarge, - // cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node - // types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, + // * Current + // generation: M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + // cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: + // cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, // cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, cache.t3.medium // T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium // - // * - // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 node - // types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node - // types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // * Previous + // generation: (not recommended) T1 node types: cache.t1.micro M1 node types: + // cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: + // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // - // * - // Compute optimized: + // * Compute + // optimized: // - // * Previous generation: (not recommended) C1 node - // types: cache.c1.xlarge + // * Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge // - // * Memory optimized: + // * Memory optimized: // - // * Current generation: - // R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: - // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, - // cache.r4.8xlarge, cache.r4.16xlarge + // * Current generation: R5 node types: + // cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, + // cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, + // cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + // cache.r4.16xlarge // - // * Previous generation: (not - // recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 node types: + // cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // - // cache.r3.4xlarge, cache.r3.8xlarge + // cache.r3.4xlarge, + // cache.r3.8xlarge // // Additional node type info // - // * All current - // generation instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis - // append-only files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis - // Multi-AZ with automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis - // configuration variables appendonly and appendfsync are not supported on Redis - // version 2.8.22 and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The duration of the offering. in seconds. @@ -1421,56 +1415,54 @@ type Snapshot struct { // current generation types provide more memory and computational power at lower // cost when compared to their equivalent previous generation counterparts. // - // * + // * // General purpose: // - // * Current generation: M5 node types: cache.m5.large, + // * Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, // cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T3 node types: // cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, // cache.t2.small, cache.t2.medium // - // * Previous generation: (not - // recommended) T1 node types: cache.t1.micro M1 node types: cache.m1.small, - // cache.m1.medium, cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, - // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge - // - // * Compute optimized: + // * Previous generation: (not recommended) T1 + // node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + // cache.m3.xlarge, cache.m3.2xlarge // + // * Compute optimized: // - // * Previous generation: (not recommended) C1 node types: cache.c1.xlarge + // * Previous generation: + // (not recommended) C1 node types: cache.c1.xlarge // - // * - // Memory optimized: - // - // * Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, - // cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge + // * Memory optimized: // + // * Current + // generation: R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, + // cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + // cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, + // cache.r4.8xlarge, cache.r4.16xlarge // - // * Previous generation: (not recommended) M2 node types: cache.m2.xlarge, - // cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, - // cache.r3.xlarge, cache.r3.2xlarge, + // * Previous generation: (not recommended) M2 + // node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: + // cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, // - // cache.r3.4xlarge, + // cache.r3.4xlarge, // cache.r3.8xlarge // // Additional node type info // - // * All current generation - // instance types are created in Amazon VPC by default. + // * All current generation instance + // types are created in Amazon VPC by default. // - // * Redis append-only - // files (AOF) are not supported for T1 or T2 instances. + // * Redis append-only files (AOF) are + // not supported for T1 or T2 instances. // - // * Redis Multi-AZ with - // automatic failover is not supported on T1 instances. + // * Redis Multi-AZ with automatic failover + // is not supported on T1 instances. // - // * Redis configuration - // variables appendonly and appendfsync are not supported on Redis version 2.8.22 - // and later. + // * Redis configuration variables appendonly + // and appendfsync are not supported on Redis version 2.8.22 and later. CacheNodeType *string // The cache parameter group that is associated with the source cluster. @@ -1512,22 +1504,22 @@ type Snapshot struct { // Clock UTC). The minimum maintenance window is a 60 minute period. Valid values // for ddd are: // - // * sun - // - // * mon + // * sun // - // * tue + // * mon // - // * wed + // * tue // - // * thu + // * wed // - // * fri + // * thu // + // * fri // // * sat // - // Example: sun:23:00-mon:01:30 + // Example: + // sun:23:00-mon:01:30 PreferredMaintenanceWindow *string // The ARN (Amazon Resource Name) of the preferred outpost. diff --git a/service/elasticbeanstalk/api_op_CheckDNSAvailability.go b/service/elasticbeanstalk/api_op_CheckDNSAvailability.go index 7947ecc699a..511719cb21d 100644 --- a/service/elasticbeanstalk/api_op_CheckDNSAvailability.go +++ b/service/elasticbeanstalk/api_op_CheckDNSAvailability.go @@ -40,10 +40,10 @@ type CheckDNSAvailabilityOutput struct { // Indicates if the specified CNAME is available: // - // * true : The CNAME is + // * true : The CNAME is // available. // - // * false : The CNAME is not available. + // * false : The CNAME is not available. Available *bool // The fully qualified CNAME to reserve when CreateEnvironment is called with the diff --git a/service/elasticbeanstalk/api_op_CreateConfigurationTemplate.go b/service/elasticbeanstalk/api_op_CreateConfigurationTemplate.go index dc53dfde98f..05309322451 100644 --- a/service/elasticbeanstalk/api_op_CreateConfigurationTemplate.go +++ b/service/elasticbeanstalk/api_op_CreateConfigurationTemplate.go @@ -19,12 +19,12 @@ import ( // configuration settings. Templates aren't associated with any environment. The // EnvironmentName response element is always null. Related Topics // -// * +// * // DescribeConfigurationOptions // -// * DescribeConfigurationSettings +// * DescribeConfigurationSettings // -// * +// * // ListAvailableSolutionStacks func (c *Client) CreateConfigurationTemplate(ctx context.Context, params *CreateConfigurationTemplateInput, optFns ...func(*Options)) (*CreateConfigurationTemplateOutput, error) { if params == nil { @@ -123,17 +123,17 @@ type CreateConfigurationTemplateOutput struct { // DeploymentStatus parameter indicates the deployment status of this configuration // set: // - // * null: This configuration is not associated with a running + // * null: This configuration is not associated with a running // environment. // - // * pending: This is a draft configuration that is not deployed - // to the associated environment but is in the process of deploying. + // * pending: This is a draft configuration that is not deployed to + // the associated environment but is in the process of deploying. // - // * - // deployed: This is the configuration that is currently deployed to the associated - // running environment. + // * deployed: This + // is the configuration that is currently deployed to the associated running + // environment. // - // * failed: This is a draft configuration that failed to + // * failed: This is a draft configuration that failed to // successfully deploy. DeploymentStatus types.ConfigurationDeploymentStatus diff --git a/service/elasticbeanstalk/api_op_CreateEnvironment.go b/service/elasticbeanstalk/api_op_CreateEnvironment.go index 8bbd3c51c77..60cb26a5960 100644 --- a/service/elasticbeanstalk/api_op_CreateEnvironment.go +++ b/service/elasticbeanstalk/api_op_CreateEnvironment.go @@ -159,20 +159,20 @@ type CreateEnvironmentOutput struct { // Describes the health status of the environment. AWS Elastic Beanstalk indicates // the failure levels for a running environment: // - // * Red: Indicates the - // environment is not responsive. Occurs when three or more consecutive failures - // occur for an environment. + // * Red: Indicates the environment + // is not responsive. Occurs when three or more consecutive failures occur for an + // environment. // - // * Yellow: Indicates that something is wrong. - // Occurs when two consecutive failures occur for an environment. + // * Yellow: Indicates that something is wrong. Occurs when two + // consecutive failures occur for an environment. // - // * Green: - // Indicates the environment is healthy and fully functional. + // * Green: Indicates the + // environment is healthy and fully functional. // - // * Grey: Default - // health for a new environment. The environment is not fully launched and health - // checks have not started or health checks are suspended during an - // UpdateEnvironment or RestartEnvironment request. + // * Grey: Default health for a new + // environment. The environment is not fully launched and health checks have not + // started or health checks are suspended during an UpdateEnvironment or + // RestartEnvironment request. // // Default: Grey Health types.EnvironmentHealth @@ -199,20 +199,20 @@ type CreateEnvironmentOutput struct { // The current operational status of the environment: // - // * Launching: Environment - // is in the process of initial deployment. + // * Launching: Environment is + // in the process of initial deployment. // - // * Updating: Environment is in the - // process of updating its configuration settings or application version. + // * Updating: Environment is in the process + // of updating its configuration settings or application version. // - // * - // Ready: Environment is available to have an action performed on it, such as - // update or terminate. + // * Ready: + // Environment is available to have an action performed on it, such as update or + // terminate. // - // * Terminating: Environment is in the shut-down - // process. + // * Terminating: Environment is in the shut-down process. // - // * Terminated: Environment is not running. + // * + // Terminated: Environment is not running. Status types.EnvironmentStatus // The name of the configuration template used to originally launch this diff --git a/service/elasticbeanstalk/api_op_DescribeConfigurationSettings.go b/service/elasticbeanstalk/api_op_DescribeConfigurationSettings.go index 5b50f032105..6fd36c56503 100644 --- a/service/elasticbeanstalk/api_op_DescribeConfigurationSettings.go +++ b/service/elasticbeanstalk/api_op_DescribeConfigurationSettings.go @@ -19,7 +19,7 @@ import ( // draft configuration of an environment that is either in the process of // deployment or that failed to deploy. Related Topics // -// * +// * // DeleteEnvironmentConfiguration func (c *Client) DescribeConfigurationSettings(ctx context.Context, params *DescribeConfigurationSettingsInput, optFns ...func(*Options)) (*DescribeConfigurationSettingsOutput, error) { if params == nil { diff --git a/service/elasticbeanstalk/api_op_ListPlatformBranches.go b/service/elasticbeanstalk/api_op_ListPlatformBranches.go index edd9644f6bf..baaec144aaf 100644 --- a/service/elasticbeanstalk/api_op_ListPlatformBranches.go +++ b/service/elasticbeanstalk/api_op_ListPlatformBranches.go @@ -39,30 +39,29 @@ type ListPlatformBranchesInput struct { // Most operators take a single value. The in and not_in operators can take // multiple values. // - // * Attribute = BranchName: + // * Attribute = BranchName: // - // * Operator: = | != | - // begins_with | ends_with | contains | in | not_in + // * Operator: = | != | begins_with | + // ends_with | contains | in | not_in // - // * Attribute = - // LifecycleState: + // * Attribute = LifecycleState: // - // * Operator: = | != | in | not_in + // * Operator: = + // | != | in | not_in // - // * Values: - // beta | supported | deprecated | retired + // * Values: beta | supported | deprecated | retired // - // * Attribute = PlatformName: + // * + // Attribute = PlatformName: // + // * Operator: = | != | begins_with | ends_with | + // contains | in | not_in // - // * Operator: = | != | begins_with | ends_with | contains | in | not_in + // * Attribute = TierType: // - // * - // Attribute = TierType: + // * Operator: = | != // - // * Operator: = | != - // - // * Values: + // * Values: // WebServer/Standard | Worker/SQS/HTTP // // Array size: limited to 10 SearchFilter diff --git a/service/elasticbeanstalk/api_op_RequestEnvironmentInfo.go b/service/elasticbeanstalk/api_op_RequestEnvironmentInfo.go index 754aeb90cad..c251b568fa1 100644 --- a/service/elasticbeanstalk/api_op_RequestEnvironmentInfo.go +++ b/service/elasticbeanstalk/api_op_RequestEnvironmentInfo.go @@ -19,7 +19,7 @@ import ( // support bundle logs. Use RetrieveEnvironmentInfo to obtain the set of logs. // Related Topics // -// * RetrieveEnvironmentInfo +// * RetrieveEnvironmentInfo func (c *Client) RequestEnvironmentInfo(ctx context.Context, params *RequestEnvironmentInfoInput, optFns ...func(*Options)) (*RequestEnvironmentInfoOutput, error) { if params == nil { params = &RequestEnvironmentInfoInput{} diff --git a/service/elasticbeanstalk/api_op_RetrieveEnvironmentInfo.go b/service/elasticbeanstalk/api_op_RetrieveEnvironmentInfo.go index 79a87c48004..9fd7e68049b 100644 --- a/service/elasticbeanstalk/api_op_RetrieveEnvironmentInfo.go +++ b/service/elasticbeanstalk/api_op_RetrieveEnvironmentInfo.go @@ -14,7 +14,7 @@ import ( // Retrieves the compiled information from a RequestEnvironmentInfo request. // Related Topics // -// * RequestEnvironmentInfo +// * RequestEnvironmentInfo func (c *Client) RetrieveEnvironmentInfo(ctx context.Context, params *RetrieveEnvironmentInfoInput, optFns ...func(*Options)) (*RetrieveEnvironmentInfoOutput, error) { if params == nil { params = &RetrieveEnvironmentInfoInput{} diff --git a/service/elasticbeanstalk/api_op_TerminateEnvironment.go b/service/elasticbeanstalk/api_op_TerminateEnvironment.go index af2b0403159..cdfb8eaefe7 100644 --- a/service/elasticbeanstalk/api_op_TerminateEnvironment.go +++ b/service/elasticbeanstalk/api_op_TerminateEnvironment.go @@ -48,12 +48,12 @@ type TerminateEnvironmentInput struct { // Indicates whether the associated AWS resources should shut down when the // environment is terminated: // - // * true: The specified environment as well as the + // * true: The specified environment as well as the // associated AWS resources, such as Auto Scaling group and LoadBalancer, are // terminated. // - // * false: AWS Elastic Beanstalk resource management is removed - // from the environment, but the AWS resources continue to operate. + // * false: AWS Elastic Beanstalk resource management is removed from + // the environment, but the AWS resources continue to operate. // // For more // information, see the AWS Elastic Beanstalk User Guide. @@ -105,20 +105,20 @@ type TerminateEnvironmentOutput struct { // Describes the health status of the environment. AWS Elastic Beanstalk indicates // the failure levels for a running environment: // - // * Red: Indicates the - // environment is not responsive. Occurs when three or more consecutive failures - // occur for an environment. + // * Red: Indicates the environment + // is not responsive. Occurs when three or more consecutive failures occur for an + // environment. // - // * Yellow: Indicates that something is wrong. - // Occurs when two consecutive failures occur for an environment. + // * Yellow: Indicates that something is wrong. Occurs when two + // consecutive failures occur for an environment. // - // * Green: - // Indicates the environment is healthy and fully functional. + // * Green: Indicates the + // environment is healthy and fully functional. // - // * Grey: Default - // health for a new environment. The environment is not fully launched and health - // checks have not started or health checks are suspended during an - // UpdateEnvironment or RestartEnvironment request. + // * Grey: Default health for a new + // environment. The environment is not fully launched and health checks have not + // started or health checks are suspended during an UpdateEnvironment or + // RestartEnvironment request. // // Default: Grey Health types.EnvironmentHealth @@ -145,20 +145,20 @@ type TerminateEnvironmentOutput struct { // The current operational status of the environment: // - // * Launching: Environment - // is in the process of initial deployment. + // * Launching: Environment is + // in the process of initial deployment. // - // * Updating: Environment is in the - // process of updating its configuration settings or application version. + // * Updating: Environment is in the process + // of updating its configuration settings or application version. // - // * - // Ready: Environment is available to have an action performed on it, such as - // update or terminate. + // * Ready: + // Environment is available to have an action performed on it, such as update or + // terminate. // - // * Terminating: Environment is in the shut-down - // process. + // * Terminating: Environment is in the shut-down process. // - // * Terminated: Environment is not running. + // * + // Terminated: Environment is not running. Status types.EnvironmentStatus // The name of the configuration template used to originally launch this diff --git a/service/elasticbeanstalk/api_op_UpdateConfigurationTemplate.go b/service/elasticbeanstalk/api_op_UpdateConfigurationTemplate.go index 2914822c41a..0112815e264 100644 --- a/service/elasticbeanstalk/api_op_UpdateConfigurationTemplate.go +++ b/service/elasticbeanstalk/api_op_UpdateConfigurationTemplate.go @@ -17,7 +17,7 @@ import ( // provided, its value remains unchanged. To clear such properties, specify an // empty string. Related Topics // -// * DescribeConfigurationOptions +// * DescribeConfigurationOptions func (c *Client) UpdateConfigurationTemplate(ctx context.Context, params *UpdateConfigurationTemplateInput, optFns ...func(*Options)) (*UpdateConfigurationTemplateOutput, error) { if params == nil { params = &UpdateConfigurationTemplateInput{} @@ -78,17 +78,17 @@ type UpdateConfigurationTemplateOutput struct { // DeploymentStatus parameter indicates the deployment status of this configuration // set: // - // * null: This configuration is not associated with a running + // * null: This configuration is not associated with a running // environment. // - // * pending: This is a draft configuration that is not deployed - // to the associated environment but is in the process of deploying. + // * pending: This is a draft configuration that is not deployed to + // the associated environment but is in the process of deploying. // - // * - // deployed: This is the configuration that is currently deployed to the associated - // running environment. + // * deployed: This + // is the configuration that is currently deployed to the associated running + // environment. // - // * failed: This is a draft configuration that failed to + // * failed: This is a draft configuration that failed to // successfully deploy. DeploymentStatus types.ConfigurationDeploymentStatus diff --git a/service/elasticbeanstalk/api_op_UpdateEnvironment.go b/service/elasticbeanstalk/api_op_UpdateEnvironment.go index ed39d8a01e3..ef6ec717d08 100644 --- a/service/elasticbeanstalk/api_op_UpdateEnvironment.go +++ b/service/elasticbeanstalk/api_op_UpdateEnvironment.go @@ -140,20 +140,20 @@ type UpdateEnvironmentOutput struct { // Describes the health status of the environment. AWS Elastic Beanstalk indicates // the failure levels for a running environment: // - // * Red: Indicates the - // environment is not responsive. Occurs when three or more consecutive failures - // occur for an environment. + // * Red: Indicates the environment + // is not responsive. Occurs when three or more consecutive failures occur for an + // environment. // - // * Yellow: Indicates that something is wrong. - // Occurs when two consecutive failures occur for an environment. + // * Yellow: Indicates that something is wrong. Occurs when two + // consecutive failures occur for an environment. // - // * Green: - // Indicates the environment is healthy and fully functional. + // * Green: Indicates the + // environment is healthy and fully functional. // - // * Grey: Default - // health for a new environment. The environment is not fully launched and health - // checks have not started or health checks are suspended during an - // UpdateEnvironment or RestartEnvironment request. + // * Grey: Default health for a new + // environment. The environment is not fully launched and health checks have not + // started or health checks are suspended during an UpdateEnvironment or + // RestartEnvironment request. // // Default: Grey Health types.EnvironmentHealth @@ -180,20 +180,20 @@ type UpdateEnvironmentOutput struct { // The current operational status of the environment: // - // * Launching: Environment - // is in the process of initial deployment. + // * Launching: Environment is + // in the process of initial deployment. // - // * Updating: Environment is in the - // process of updating its configuration settings or application version. + // * Updating: Environment is in the process + // of updating its configuration settings or application version. // - // * - // Ready: Environment is available to have an action performed on it, such as - // update or terminate. + // * Ready: + // Environment is available to have an action performed on it, such as update or + // terminate. // - // * Terminating: Environment is in the shut-down - // process. + // * Terminating: Environment is in the shut-down process. // - // * Terminated: Environment is not running. + // * + // Terminated: Environment is not running. Status types.EnvironmentStatus // The name of the configuration template used to originally launch this diff --git a/service/elasticbeanstalk/types/enums.go b/service/elasticbeanstalk/types/enums.go index e8d95ac614f..22c47dd0f74 100644 --- a/service/elasticbeanstalk/types/enums.go +++ b/service/elasticbeanstalk/types/enums.go @@ -92,9 +92,9 @@ type ComputeType string // Enum values for ComputeType const ( - ComputeTypeBuild_general1_small ComputeType = "BUILD_GENERAL1_SMALL" - ComputeTypeBuild_general1_medium ComputeType = "BUILD_GENERAL1_MEDIUM" - ComputeTypeBuild_general1_large ComputeType = "BUILD_GENERAL1_LARGE" + ComputeTypeBuildGeneral1Small ComputeType = "BUILD_GENERAL1_SMALL" + ComputeTypeBuildGeneral1Medium ComputeType = "BUILD_GENERAL1_MEDIUM" + ComputeTypeBuildGeneral1Large ComputeType = "BUILD_GENERAL1_LARGE" ) // Values returns all known values for ComputeType. Note that this can be expanded diff --git a/service/elasticbeanstalk/types/errors.go b/service/elasticbeanstalk/types/errors.go index ff58bd9272b..4d18cdb5427 100644 --- a/service/elasticbeanstalk/types/errors.go +++ b/service/elasticbeanstalk/types/errors.go @@ -186,12 +186,12 @@ func (e *ResourceTypeNotSupportedException) ErrorFault() smithy.ErrorFault { ret // The specified S3 bucket does not belong to the S3 region in which the service is // running. The following regions are supported: // -// * IAD/us-east-1 +// * IAD/us-east-1 // -// * +// * // PDX/us-west-2 // -// * DUB/eu-west-1 +// * DUB/eu-west-1 type S3LocationNotInServiceRegionException struct { Message *string } diff --git a/service/elasticbeanstalk/types/types.go b/service/elasticbeanstalk/types/types.go index 206af3b3ae0..ab6bb22e6d3 100644 --- a/service/elasticbeanstalk/types/types.go +++ b/service/elasticbeanstalk/types/types.go @@ -108,23 +108,23 @@ type ApplicationVersionDescription struct { // if you specified True for the Process parameter of the CreateApplicationVersion // action. The following list describes the possible values. // - // * Unprocessed – + // * Unprocessed – // Application version wasn't pre-processed or validated. Elastic Beanstalk will // validate configuration files during deployment of the application version to an // environment. // - // * Processing – Elastic Beanstalk is currently processing the + // * Processing – Elastic Beanstalk is currently processing the // application version. // - // * Building – Application version is currently - // undergoing an AWS CodeBuild build. + // * Building – Application version is currently undergoing + // an AWS CodeBuild build. // - // * Processed – Elastic Beanstalk was - // successfully pre-processed and validated. + // * Processed – Elastic Beanstalk was successfully + // pre-processed and validated. // - // * Failed – Either the AWS - // CodeBuild build failed or configuration files didn't pass validation. This - // application version isn't usable. + // * Failed – Either the AWS CodeBuild build failed + // or configuration files didn't pass validation. This application version isn't + // usable. Status ApplicationVersionStatus // A unique identifier for the application version. @@ -179,13 +179,13 @@ type BuildConfiguration struct { // Information about the compute resources the build project will use. // - // * + // * // BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds // - // * + // * // BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds // - // * + // * // BUILD_GENERAL1_LARGE: Use up to 15 GB memory and 8 vCPUs for builds ComputeType ComputeType @@ -208,17 +208,16 @@ type ConfigurationOptionDescription struct { // An indication of which action is required if the value for this configuration // option changes: // - // * NoInterruption : There is no interruption to the - // environment or application availability. + // * NoInterruption : There is no interruption to the environment + // or application availability. // - // * RestartEnvironment : The - // environment is entirely restarted, all AWS resources are deleted and recreated, - // and the environment is unavailable during the process. + // * RestartEnvironment : The environment is entirely + // restarted, all AWS resources are deleted and recreated, and the environment is + // unavailable during the process. // - // * - // RestartApplicationServer : The environment is available the entire time. - // However, a short application outage occurs when the application servers on the - // running Amazon EC2 instances are restarted. + // * RestartApplicationServer : The environment is + // available the entire time. However, a short application outage occurs when the + // application servers on the running Amazon EC2 instances are restarted. ChangeSeverity *string // The default value for this configuration option. @@ -248,12 +247,12 @@ type ConfigurationOptionDescription struct { // An indication of whether the user defined this configuration option: // - // * true - // : This configuration option was defined by the user. It is a valid choice for + // * true : + // This configuration option was defined by the user. It is a valid choice for // specifying if this as an Option to Remove when updating configuration // settings. // - // * false : This configuration was not defined by the + // * false : This configuration was not defined by the // user. // // Constraint: You can remove only UserDefined options from a configuration. @@ -266,18 +265,18 @@ type ConfigurationOptionDescription struct { // An indication of which type of values this option has and whether it is // allowable to select one or more than one of the possible values: // - // * Scalar : + // * Scalar : // Values for this option are a single selection from the possible values, or an // unformatted string, or numeric value governed by the MIN/MAX/Regex // constraints. // - // * List : Values for this option are multiple selections from - // the possible values. + // * List : Values for this option are multiple selections from the + // possible values. // - // * Boolean : Values for this option are either true or - // false . + // * Boolean : Values for this option are either true or false + // . // - // * Json : Values for this option are a JSON representation of a + // * Json : Values for this option are a JSON representation of a // ConfigDocument. ValueType ConfigurationOptionValueType } @@ -319,17 +318,17 @@ type ConfigurationSettingsDescription struct { // DeploymentStatus parameter indicates the deployment status of this configuration // set: // - // * null: This configuration is not associated with a running + // * null: This configuration is not associated with a running // environment. // - // * pending: This is a draft configuration that is not deployed - // to the associated environment but is in the process of deploying. + // * pending: This is a draft configuration that is not deployed to + // the associated environment but is in the process of deploying. // - // * - // deployed: This is the configuration that is currently deployed to the associated - // running environment. + // * deployed: This + // is the configuration that is currently deployed to the associated running + // environment. // - // * failed: This is a draft configuration that failed to + // * failed: This is a draft configuration that failed to // successfully deploy. DeploymentStatus ConfigurationDeploymentStatus @@ -411,13 +410,12 @@ type Deployment struct { // The status of the deployment: // - // * In Progress : The deployment is in - // progress. + // * In Progress : The deployment is in progress. // - // * Deployed : The deployment succeeded. + // * + // Deployed : The deployment succeeded. // - // * Failed : The - // deployment failed. + // * Failed : The deployment failed. Status *string // The version label of the application version in the deployment. @@ -467,20 +465,20 @@ type EnvironmentDescription struct { // Describes the health status of the environment. AWS Elastic Beanstalk indicates // the failure levels for a running environment: // - // * Red: Indicates the - // environment is not responsive. Occurs when three or more consecutive failures - // occur for an environment. + // * Red: Indicates the environment + // is not responsive. Occurs when three or more consecutive failures occur for an + // environment. // - // * Yellow: Indicates that something is wrong. - // Occurs when two consecutive failures occur for an environment. + // * Yellow: Indicates that something is wrong. Occurs when two + // consecutive failures occur for an environment. // - // * Green: - // Indicates the environment is healthy and fully functional. + // * Green: Indicates the + // environment is healthy and fully functional. // - // * Grey: Default - // health for a new environment. The environment is not fully launched and health - // checks have not started or health checks are suspended during an - // UpdateEnvironment or RestartEnvironment request. + // * Grey: Default health for a new + // environment. The environment is not fully launched and health checks have not + // started or health checks are suspended during an UpdateEnvironment or + // RestartEnvironment request. // // Default: Grey Health EnvironmentHealth @@ -507,20 +505,20 @@ type EnvironmentDescription struct { // The current operational status of the environment: // - // * Launching: Environment - // is in the process of initial deployment. + // * Launching: Environment is + // in the process of initial deployment. // - // * Updating: Environment is in the - // process of updating its configuration settings or application version. + // * Updating: Environment is in the process + // of updating its configuration settings or application version. // - // * - // Ready: Environment is available to have an action performed on it, such as - // update or terminate. + // * Ready: + // Environment is available to have an action performed on it, such as update or + // terminate. // - // * Terminating: Environment is in the shut-down - // process. + // * Terminating: Environment is in the shut-down process. // - // * Terminated: Environment is not running. + // * + // Terminated: Environment is not running. Status EnvironmentStatus // The name of the configuration template used to originally launch this @@ -608,18 +606,18 @@ type EnvironmentTier struct { // The name of this environment tier. Valid values: // - // * For Web server tier – + // * For Web server tier – // WebServer // - // * For Worker tier – Worker + // * For Worker tier – Worker Name *string // The type of this environment tier. Valid values: // - // * For Web server tier – + // * For Web server tier – // Standard // - // * For Worker tier – SQS/HTTP + // * For Worker tier – SQS/HTTP Type *string // The version of this environment tier. When you don't set a value to it, Elastic @@ -1005,16 +1003,16 @@ type PlatformFilter struct { // value is supported for all current operators. The following list shows valid // filter values for some filter attributes. // - // * PlatformStatus: Creating | - // Failed | Ready | Deleting | Deleted - // - // * PlatformLifecycleState: recommended + // * PlatformStatus: Creating | Failed | + // Ready | Deleting | Deleted // + // * PlatformLifecycleState: recommended // - // * SupportedTier: WebServer/Standard | Worker/SQS/HTTP + // * + // SupportedTier: WebServer/Standard | Worker/SQS/HTTP // - // * SupportedAddon: - // Log/S3 | Monitoring/Healthd | WorkerDaemon/SQSD + // * SupportedAddon: Log/S3 | + // Monitoring/Healthd | WorkerDaemon/SQSD Values []*string } @@ -1211,12 +1209,12 @@ type SourceBuildInformation struct { // The location of the source code, as a formatted string, depending on the value // of SourceRepository // - // * For CodeCommit, the format is the repository name and + // * For CodeCommit, the format is the repository name and // commit ID, separated by a forward slash. For example, // my-git-repo/265cfa0cf6af46153527f55d6503ec030551f57a. // - // * For S3, the format - // is the S3 bucket name and object key, separated by a forward slash. For example, + // * For S3, the format is + // the S3 bucket name and object key, separated by a forward slash. For example, // my-s3-bucket/Folders/my-source-file. // // This member is required. @@ -1224,18 +1222,18 @@ type SourceBuildInformation struct { // Location where the repository is stored. // - // * CodeCommit + // * CodeCommit // - // * S3 + // * S3 // // This member is required. SourceRepository SourceRepository // The type of repository. // - // * Git + // * Git // - // * Zip + // * Zip // // This member is required. SourceType SourceType @@ -1316,10 +1314,10 @@ type ValidationMessage struct { // An indication of the severity of this message: // - // * error: This message - // indicates that this is not a valid setting for an option. + // * error: This message indicates + // that this is not a valid setting for an option. // - // * warning: This - // message is providing information you should take into account. + // * warning: This message is + // providing information you should take into account. Severity ValidationSeverity } diff --git a/service/elasticloadbalancing/api_op_ModifyLoadBalancerAttributes.go b/service/elasticloadbalancing/api_op_ModifyLoadBalancerAttributes.go index de9911860ab..a3c51843690 100644 --- a/service/elasticloadbalancing/api_op_ModifyLoadBalancerAttributes.go +++ b/service/elasticloadbalancing/api_op_ModifyLoadBalancerAttributes.go @@ -18,19 +18,19 @@ import ( // timeout value for your load balancer. For more information, see the following in // the Classic Load Balancers Guide: // -// * Cross-Zone Load Balancing +// * Cross-Zone Load Balancing // (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) // -// -// * Connection Draining +// * +// Connection Draining // (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) // -// -// * Access Logs +// * +// Access Logs // (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html) // -// -// * Idle Connection Timeout +// * +// Idle Connection Timeout // (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) func (c *Client) ModifyLoadBalancerAttributes(ctx context.Context, params *ModifyLoadBalancerAttributesInput, optFns ...func(*Options)) (*ModifyLoadBalancerAttributesOutput, error) { if params == nil { diff --git a/service/elasticloadbalancing/types/types.go b/service/elasticloadbalancing/types/types.go index 389b49a3076..417d3488100 100644 --- a/service/elasticloadbalancing/types/types.go +++ b/service/elasticloadbalancing/types/types.go @@ -32,7 +32,7 @@ type AdditionalAttribute struct { // The name of the attribute. The following attribute is supported. // - // * + // * // elb.http.desyncmitigationmode - Determines how the load balancer handles // requests that might pose a security risk to your application. The possible // values are monitor, defensive, and strictest. The default is defensive. @@ -152,38 +152,37 @@ type InstanceState struct { // A description of the instance state. This string can contain one or more of the // following messages. // - // * N/A + // * N/A // - // * A transient error occurred. Please try - // again later. + // * A transient error occurred. Please try again + // later. // - // * Instance has failed at least the UnhealthyThreshold number - // of health checks consecutively. + // * Instance has failed at least the UnhealthyThreshold number of health + // checks consecutively. // - // * Instance has not passed the configured - // HealthyThreshold number of health checks consecutively. + // * Instance has not passed the configured HealthyThreshold + // number of health checks consecutively. // - // * Instance - // registration is still in progress. + // * Instance registration is still in + // progress. // - // * Instance is in the EC2 Availability - // Zone for which LoadBalancer is not configured to route traffic to. + // * Instance is in the EC2 Availability Zone for which LoadBalancer is + // not configured to route traffic to. // - // * - // Instance is not currently registered with the LoadBalancer. + // * Instance is not currently registered with + // the LoadBalancer. // - // * Instance - // deregistration currently in progress. + // * Instance deregistration currently in progress. // - // * Disable Availability Zone is - // currently in progress. + // * Disable + // Availability Zone is currently in progress. // - // * Instance is in pending state. + // * Instance is in pending state. // - // * Instance is - // in stopped state. + // * + // Instance is in stopped state. // - // * Instance is in terminated state. + // * Instance is in terminated state. Description *string // The ID of the instance. @@ -220,12 +219,12 @@ type Limit struct { // The name of the limit. The possible values are: // - // * classic-listeners + // * classic-listeners // - // * + // * // classic-load-balancers // - // * classic-registered-instances + // * classic-registered-instances Name *string } @@ -421,16 +420,16 @@ type PolicyAttributeTypeDescription struct { // The cardinality of the attribute. Valid values: // - // * ONE(1) : Single value + // * ONE(1) : Single value // required // - // * ZERO_OR_ONE(0..1) : Up to one value is allowed + // * ZERO_OR_ONE(0..1) : Up to one value is allowed // - // * - // ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed + // * ZERO_OR_MORE(0..*) + // : Optional. Multiple values are allowed // - // * - // ONE_OR_MORE(1..*0) : Required. Multiple values are allowed + // * ONE_OR_MORE(1..*0) : Required. + // Multiple values are allowed Cardinality *string // The default value of the attribute, if applicable. diff --git a/service/elasticloadbalancingv2/api_op_CreateListener.go b/service/elasticloadbalancingv2/api_op_CreateListener.go index 34ca8559853..f312a0aa37e 100644 --- a/service/elasticloadbalancingv2/api_op_CreateListener.go +++ b/service/elasticloadbalancingv2/api_op_CreateListener.go @@ -78,16 +78,16 @@ type CreateListenerInput struct { // policy. You can specify one policy name. The following are the possible // values: // - // * HTTP1Only + // * HTTP1Only // - // * HTTP2Only + // * HTTP2Only // - // * HTTP2Optional + // * HTTP2Optional // - // * - // HTTP2Preferred + // * HTTP2Preferred // - // * None + // * + // None // // For more information, see ALPN Policies // (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies) @@ -103,28 +103,28 @@ type CreateListenerInput struct { // [HTTPS and TLS listeners] The security policy that defines which protocols and // ciphers are supported. The following are the possible values: // - // * + // * // ELBSecurityPolicy-2016-08 // - // * ELBSecurityPolicy-TLS-1-0-2015-04 + // * ELBSecurityPolicy-TLS-1-0-2015-04 // - // * + // * // ELBSecurityPolicy-TLS-1-1-2017-01 // - // * ELBSecurityPolicy-TLS-1-2-2017-01 + // * ELBSecurityPolicy-TLS-1-2-2017-01 // + // * + // ELBSecurityPolicy-TLS-1-2-Ext-2018-06 // - // * ELBSecurityPolicy-TLS-1-2-Ext-2018-06 + // * ELBSecurityPolicy-FS-2018-06 // - // * ELBSecurityPolicy-FS-2018-06 + // * + // ELBSecurityPolicy-FS-1-1-2019-08 // + // * ELBSecurityPolicy-FS-1-2-2019-08 // - // * ELBSecurityPolicy-FS-1-1-2019-08 - // - // * ELBSecurityPolicy-FS-1-2-2019-08 - // - // - // * ELBSecurityPolicy-FS-1-2-Res-2019-08 + // * + // ELBSecurityPolicy-FS-1-2-Res-2019-08 // // For more information, see Security // Policies diff --git a/service/elasticloadbalancingv2/api_op_CreateTargetGroup.go b/service/elasticloadbalancingv2/api_op_CreateTargetGroup.go index 49aabb22df9..8e2606d791b 100644 --- a/service/elasticloadbalancingv2/api_op_CreateTargetGroup.go +++ b/service/elasticloadbalancingv2/api_op_CreateTargetGroup.go @@ -113,16 +113,16 @@ type CreateTargetGroupInput struct { // target group. You can't specify targets for a target group using more than one // target type. // - // * instance - Targets are specified by instance ID. This is the + // * instance - Targets are specified by instance ID. This is the // default value. // - // * ip - Targets are specified by IP address. You can specify - // IP addresses from the subnets of the virtual private cloud (VPC) for the target + // * ip - Targets are specified by IP address. You can specify IP + // addresses from the subnets of the virtual private cloud (VPC) for the target // group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and // the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP // addresses. // - // * lambda - The target groups contains a single Lambda function. + // * lambda - The target groups contains a single Lambda function. TargetType types.TargetTypeEnum // The number of consecutive health check failures required before considering a diff --git a/service/elasticloadbalancingv2/api_op_ModifyListener.go b/service/elasticloadbalancingv2/api_op_ModifyListener.go index 9a16acdddc6..5079a465c65 100644 --- a/service/elasticloadbalancingv2/api_op_ModifyListener.go +++ b/service/elasticloadbalancingv2/api_op_ModifyListener.go @@ -45,16 +45,16 @@ type ModifyListenerInput struct { // policy. You can specify one policy name. The following are the possible // values: // - // * HTTP1Only + // * HTTP1Only // - // * HTTP2Only + // * HTTP2Only // - // * HTTP2Optional + // * HTTP2Optional // - // * - // HTTP2Preferred + // * HTTP2Preferred // - // * None + // * + // None // // For more information, see ALPN Policies // (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies) @@ -91,28 +91,28 @@ type ModifyListenerInput struct { // [HTTPS and TLS listeners] The security policy that defines which protocols and // ciphers are supported. The following are the possible values: // - // * + // * // ELBSecurityPolicy-2016-08 // - // * ELBSecurityPolicy-TLS-1-0-2015-04 + // * ELBSecurityPolicy-TLS-1-0-2015-04 // - // * + // * // ELBSecurityPolicy-TLS-1-1-2017-01 // - // * ELBSecurityPolicy-TLS-1-2-2017-01 + // * ELBSecurityPolicy-TLS-1-2-2017-01 // + // * + // ELBSecurityPolicy-TLS-1-2-Ext-2018-06 // - // * ELBSecurityPolicy-TLS-1-2-Ext-2018-06 + // * ELBSecurityPolicy-FS-2018-06 // - // * ELBSecurityPolicy-FS-2018-06 + // * + // ELBSecurityPolicy-FS-1-1-2019-08 // + // * ELBSecurityPolicy-FS-1-2-2019-08 // - // * ELBSecurityPolicy-FS-1-1-2019-08 - // - // * ELBSecurityPolicy-FS-1-2-2019-08 - // - // - // * ELBSecurityPolicy-FS-1-2-Res-2019-08 + // * + // ELBSecurityPolicy-FS-1-2-Res-2019-08 // // For more information, see Security // Policies diff --git a/service/elasticloadbalancingv2/types/enums.go b/service/elasticloadbalancingv2/types/enums.go index dc9d2004528..ddc7ed13078 100644 --- a/service/elasticloadbalancingv2/types/enums.go +++ b/service/elasticloadbalancingv2/types/enums.go @@ -6,11 +6,11 @@ type ActionTypeEnum string // Enum values for ActionTypeEnum const ( - ActionTypeEnumForward ActionTypeEnum = "forward" - ActionTypeEnumAuthenticate_oidc ActionTypeEnum = "authenticate-oidc" - ActionTypeEnumAuthenticate_cognito ActionTypeEnum = "authenticate-cognito" - ActionTypeEnumRedirect ActionTypeEnum = "redirect" - ActionTypeEnumFixed_response ActionTypeEnum = "fixed-response" + ActionTypeEnumForward ActionTypeEnum = "forward" + ActionTypeEnumAuthenticateOidc ActionTypeEnum = "authenticate-oidc" + ActionTypeEnumAuthenticateCognito ActionTypeEnum = "authenticate-cognito" + ActionTypeEnumRedirect ActionTypeEnum = "redirect" + ActionTypeEnumFixedResponse ActionTypeEnum = "fixed-response" ) // Values returns all known values for ActionTypeEnum. Note that this can be @@ -90,8 +90,8 @@ type LoadBalancerSchemeEnum string // Enum values for LoadBalancerSchemeEnum const ( - LoadBalancerSchemeEnumInternet_facing LoadBalancerSchemeEnum = "internet-facing" - LoadBalancerSchemeEnumInternal LoadBalancerSchemeEnum = "internal" + LoadBalancerSchemeEnumInternetFacing LoadBalancerSchemeEnum = "internet-facing" + LoadBalancerSchemeEnumInternal LoadBalancerSchemeEnum = "internal" ) // Values returns all known values for LoadBalancerSchemeEnum. Note that this can @@ -108,10 +108,10 @@ type LoadBalancerStateEnum string // Enum values for LoadBalancerStateEnum const ( - LoadBalancerStateEnumActive LoadBalancerStateEnum = "active" - LoadBalancerStateEnumProvisioning LoadBalancerStateEnum = "provisioning" - LoadBalancerStateEnumActive_impaired LoadBalancerStateEnum = "active_impaired" - LoadBalancerStateEnumFailed LoadBalancerStateEnum = "failed" + LoadBalancerStateEnumActive LoadBalancerStateEnum = "active" + LoadBalancerStateEnumProvisioning LoadBalancerStateEnum = "provisioning" + LoadBalancerStateEnumActiveImpaired LoadBalancerStateEnum = "active_impaired" + LoadBalancerStateEnumFailed LoadBalancerStateEnum = "failed" ) // Values returns all known values for LoadBalancerStateEnum. Note that this can be @@ -148,12 +148,12 @@ type ProtocolEnum string // Enum values for ProtocolEnum const ( - ProtocolEnumHttp ProtocolEnum = "HTTP" - ProtocolEnumHttps ProtocolEnum = "HTTPS" - ProtocolEnumTcp ProtocolEnum = "TCP" - ProtocolEnumTls ProtocolEnum = "TLS" - ProtocolEnumUdp ProtocolEnum = "UDP" - ProtocolEnumTcp_udp ProtocolEnum = "TCP_UDP" + ProtocolEnumHttp ProtocolEnum = "HTTP" + ProtocolEnumHttps ProtocolEnum = "HTTPS" + ProtocolEnumTcp ProtocolEnum = "TCP" + ProtocolEnumTls ProtocolEnum = "TLS" + ProtocolEnumUdp ProtocolEnum = "UDP" + ProtocolEnumTcpUdp ProtocolEnum = "TCP_UDP" ) // Values returns all known values for ProtocolEnum. Note that this can be expanded @@ -174,8 +174,8 @@ type RedirectActionStatusCodeEnum string // Enum values for RedirectActionStatusCodeEnum const ( - RedirectActionStatusCodeEnumHttp_301 RedirectActionStatusCodeEnum = "HTTP_301" - RedirectActionStatusCodeEnumHttp_302 RedirectActionStatusCodeEnum = "HTTP_302" + RedirectActionStatusCodeEnumHttp301 RedirectActionStatusCodeEnum = "HTTP_301" + RedirectActionStatusCodeEnumHttp302 RedirectActionStatusCodeEnum = "HTTP_302" ) // Values returns all known values for RedirectActionStatusCodeEnum. Note that this @@ -192,18 +192,18 @@ type TargetHealthReasonEnum string // Enum values for TargetHealthReasonEnum const ( - TargetHealthReasonEnumRegistration_in_progress TargetHealthReasonEnum = "Elb.RegistrationInProgress" - TargetHealthReasonEnumInitial_health_checking TargetHealthReasonEnum = "Elb.InitialHealthChecking" - TargetHealthReasonEnumResponse_code_mismatch TargetHealthReasonEnum = "Target.ResponseCodeMismatch" - TargetHealthReasonEnumTimeout TargetHealthReasonEnum = "Target.Timeout" - TargetHealthReasonEnumFailed_health_checks TargetHealthReasonEnum = "Target.FailedHealthChecks" - TargetHealthReasonEnumNot_registered TargetHealthReasonEnum = "Target.NotRegistered" - TargetHealthReasonEnumNot_in_use TargetHealthReasonEnum = "Target.NotInUse" - TargetHealthReasonEnumDeregistration_in_progress TargetHealthReasonEnum = "Target.DeregistrationInProgress" - TargetHealthReasonEnumInvalid_state TargetHealthReasonEnum = "Target.InvalidState" - TargetHealthReasonEnumIp_unusable TargetHealthReasonEnum = "Target.IpUnusable" - TargetHealthReasonEnumHealth_check_disabled TargetHealthReasonEnum = "Target.HealthCheckDisabled" - TargetHealthReasonEnumInternal_error TargetHealthReasonEnum = "Elb.InternalError" + TargetHealthReasonEnumRegistrationInProgress TargetHealthReasonEnum = "Elb.RegistrationInProgress" + TargetHealthReasonEnumInitialHealthChecking TargetHealthReasonEnum = "Elb.InitialHealthChecking" + TargetHealthReasonEnumResponseCodeMismatch TargetHealthReasonEnum = "Target.ResponseCodeMismatch" + TargetHealthReasonEnumTimeout TargetHealthReasonEnum = "Target.Timeout" + TargetHealthReasonEnumFailedHealthChecks TargetHealthReasonEnum = "Target.FailedHealthChecks" + TargetHealthReasonEnumNotRegistered TargetHealthReasonEnum = "Target.NotRegistered" + TargetHealthReasonEnumNotInUse TargetHealthReasonEnum = "Target.NotInUse" + TargetHealthReasonEnumDeregistrationInProgress TargetHealthReasonEnum = "Target.DeregistrationInProgress" + TargetHealthReasonEnumInvalidState TargetHealthReasonEnum = "Target.InvalidState" + TargetHealthReasonEnumIpUnusable TargetHealthReasonEnum = "Target.IpUnusable" + TargetHealthReasonEnumHealthCheckDisabled TargetHealthReasonEnum = "Target.HealthCheckDisabled" + TargetHealthReasonEnumInternalError TargetHealthReasonEnum = "Elb.InternalError" ) // Values returns all known values for TargetHealthReasonEnum. Note that this can diff --git a/service/elasticloadbalancingv2/types/types.go b/service/elasticloadbalancingv2/types/types.go index e6756cd7ee1..8c6237d70b6 100644 --- a/service/elasticloadbalancingv2/types/types.go +++ b/service/elasticloadbalancingv2/types/types.go @@ -76,13 +76,13 @@ type AuthenticateCognitoActionConfig struct { // The behavior if the user is not authenticated. The following are possible // values: // - // * deny - Return an HTTP 401 Unauthorized error. + // * deny - Return an HTTP 401 Unauthorized error. // - // * allow - - // Allow the request to be forwarded to the target. + // * allow - Allow the + // request to be forwarded to the target. // - // * authenticate - Redirect - // the request to the IdP authorization endpoint. This is the default value. + // * authenticate - Redirect the request to + // the IdP authorization endpoint. This is the default value. OnUnauthenticatedRequest AuthenticateCognitoActionConditionalBehaviorEnum // The set of user claims to be requested from the IdP. The default is openid. To @@ -144,13 +144,13 @@ type AuthenticateOidcActionConfig struct { // The behavior if the user is not authenticated. The following are possible // values: // - // * deny - Return an HTTP 401 Unauthorized error. + // * deny - Return an HTTP 401 Unauthorized error. // - // * allow - - // Allow the request to be forwarded to the target. + // * allow - Allow the + // request to be forwarded to the target. // - // * authenticate - Redirect - // the request to the IdP authorization endpoint. This is the default value. + // * authenticate - Redirect the request to + // the IdP authorization endpoint. This is the default value. OnUnauthenticatedRequest AuthenticateOidcActionConditionalBehaviorEnum // The set of user claims to be requested from the IdP. The default is openid. To @@ -298,37 +298,37 @@ type Limit struct { // The name of the limit. The possible values are: // - // * - // application-load-balancers + // * application-load-balancers // - // * listeners-per-application-load-balancer + // * + // listeners-per-application-load-balancer // - // * + // * // listeners-per-network-load-balancer // - // * network-load-balancers + // * network-load-balancers // - // * + // * // rules-per-application-load-balancer // - // * target-groups + // * target-groups // - // * + // * // target-groups-per-action-on-application-load-balancer // - // * + // * // target-groups-per-action-on-network-load-balancer // - // * + // * // target-groups-per-application-load-balancer // - // * + // * // targets-per-application-load-balancer // - // * + // * // targets-per-availability-zone-per-network-load-balancer // - // * + // * // targets-per-network-load-balancer Name *string } @@ -436,40 +436,40 @@ type LoadBalancerAttribute struct { // The name of the attribute. The following attributes are supported by both // Application Load Balancers and Network Load Balancers: // - // * - // access_logs.s3.enabled - Indicates whether access logs are enabled. The value is - // true or false. The default is false. + // * access_logs.s3.enabled + // - Indicates whether access logs are enabled. The value is true or false. The + // default is false. // - // * access_logs.s3.bucket - The name of - // the S3 bucket for the access logs. This attribute is required if access logs are - // enabled. The bucket must exist in the same region as the load balancer and have - // a bucket policy that grants Elastic Load Balancing permissions to write to the - // bucket. + // * access_logs.s3.bucket - The name of the S3 bucket for the + // access logs. This attribute is required if access logs are enabled. The bucket + // must exist in the same region as the load balancer and have a bucket policy that + // grants Elastic Load Balancing permissions to write to the bucket. // - // * access_logs.s3.prefix - The prefix for the location in the S3 - // bucket for the access logs. + // * + // access_logs.s3.prefix - The prefix for the location in the S3 bucket for the + // access logs. // - // * deletion_protection.enabled - Indicates - // whether deletion protection is enabled. The value is true or false. The default - // is false. + // * deletion_protection.enabled - Indicates whether deletion + // protection is enabled. The value is true or false. The default is false. // - // The following attributes are supported by only Application Load - // Balancers: + // The + // following attributes are supported by only Application Load Balancers: // - // * idle_timeout.timeout_seconds - The idle timeout value, in - // seconds. The valid range is 1-4000 seconds. The default is 60 seconds. + // * + // idle_timeout.timeout_seconds - The idle timeout value, in seconds. The valid + // range is 1-4000 seconds. The default is 60 seconds. // - // * + // * // routing.http.desync_mitigation_mode - Determines how the load balancer handles // requests that might pose a security risk to your application. The possible // values are monitor, defensive, and strictest. The default is defensive. // - // * + // * // routing.http.drop_invalid_header_fields.enabled - Indicates whether HTTP headers // with invalid header fields are removed by the load balancer (true) or routed to // targets (false). The default is false. // - // * routing.http2.enabled - Indicates + // * routing.http2.enabled - Indicates // whether HTTP/2 is enabled. The value is true or false. The default is true. // Elastic Load Balancing requires that message header names contain only // alphanumeric characters and hyphens. @@ -477,9 +477,9 @@ type LoadBalancerAttribute struct { // The following attributes are supported by // only Network Load Balancers: // - // * load_balancing.cross_zone.enabled - - // Indicates whether cross-zone load balancing is enabled. The value is true or - // false. The default is false. + // * load_balancing.cross_zone.enabled - Indicates + // whether cross-zone load balancing is enabled. The value is true or false. The + // default is false. Key *string // The value of the attribute. @@ -556,21 +556,20 @@ type QueryStringKeyValuePair struct { // path. Any components that you do not modify retain their original values. You // can reuse URI components using the following reserved keywords: // -// * +// * // #{protocol} // -// * #{host} +// * #{host} // -// * #{port} +// * #{port} // -// * #{path} (the leading "/" is -// removed) +// * #{path} (the leading "/" is removed) // -// * #{query} +// * +// #{query} // -// For example, you can change the path to -// "/new/#{path}", the hostname to "example.#{host}", or the query to -// "#{query}&value=xyz". +// For example, you can change the path to "/new/#{path}", the hostname +// to "example.#{host}", or the query to "#{query}&value=xyz". type RedirectActionConfig struct { // The HTTP redirect code. The redirect is either permanent (HTTP 301) or temporary @@ -631,19 +630,19 @@ type RuleCondition struct { // The field in the HTTP request. The following are the possible values: // - // * + // * // http-header // - // * http-request-method - // - // * host-header + // * http-request-method // - // * path-pattern + // * host-header // + // * path-pattern // - // * query-string + // * + // query-string // - // * source-ip + // * source-ip Field *string // Information for a host header condition. Specify only when Field is host-header. @@ -675,32 +674,32 @@ type RuleCondition struct { // my.example.com) in Values. A host name is case insensitive, can be up to 128 // characters in length, and can contain any of the following characters. // - // * - // A-Z, a-z, 0-9 - // - // * - . + // * A-Z, + // a-z, 0-9 // - // * * (matches 0 or more characters) + // * - . // - // * ? - // (matches exactly 1 character) + // * * (matches 0 or more characters) // - // If Field is path-pattern and you are not using - // PathPatternConfig, you can specify a single path pattern (for example, /img/*) - // in Values. A path pattern is case-sensitive, can be up to 128 characters in - // length, and can contain any of the following characters. + // * ? (matches exactly 1 + // character) // - // * A-Z, a-z, 0-9 + // If Field is path-pattern and you are not using PathPatternConfig, + // you can specify a single path pattern (for example, /img/*) in Values. A path + // pattern is case-sensitive, can be up to 128 characters in length, and can + // contain any of the following characters. // + // * A-Z, a-z, 0-9 // - // * _ - . $ / ~ " ' @ : + + // * _ - . $ / ~ " ' @ : + // + // - // * & (using &) + // * & (using &) // - // * * (matches 0 or more - // characters) + // * * (matches 0 or more characters) // - // * ? (matches exactly 1 character) + // * ? (matches exactly 1 + // character) Values []*string } @@ -872,39 +871,39 @@ type TargetGroupAttribute struct { // The name of the attribute. The following attributes are supported by both // Application Load Balancers and Network Load Balancers: // - // * + // * // deregistration_delay.timeout_seconds - The amount of time, in seconds, for // Elastic Load Balancing to wait before changing the state of a deregistering // target from draining to unused. The range is 0-3600 seconds. The default value // is 300 seconds. If the target is a Lambda function, this attribute is not // supported. // - // * stickiness.enabled - Indicates whether sticky sessions are + // * stickiness.enabled - Indicates whether sticky sessions are // enabled. The value is true or false. The default is false. // - // * - // stickiness.type - The type of sticky sessions. The possible values are - // - // - // lb_cookie for Application Load Balancers or source_ip for Network Load - // Balancers. + // * stickiness.type - + // The type of sticky sessions. The possible values are // - // The following attributes are supported only if the load balancer is - // an Application Load Balancer and the target is an instance or an IP address: + // lb_cookie for Application + // Load Balancers or source_ip for Network Load Balancers. // + // The following + // attributes are supported only if the load balancer is an Application Load + // Balancer and the target is an instance or an IP address: // - // * load_balancing.algorithm.type - The load balancing algorithm determines how - // the load balancer selects targets when routing requests. The value is - // round_robin or least_outstanding_requests. The default is round_robin. + // * + // load_balancing.algorithm.type - The load balancing algorithm determines how the + // load balancer selects targets when routing requests. The value is round_robin or + // least_outstanding_requests. The default is round_robin. // - // * + // * // slow_start.duration_seconds - The time period, in seconds, during which a newly // registered target receives an increasing share of the traffic to the target // group. After this time period ends, the target receives its full share of // traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds // (disabled). // - // * stickiness.lb_cookie.duration_seconds - The time period, in + // * stickiness.lb_cookie.duration_seconds - The time period, in // seconds, during which requests from a client should be routed to the same // target. After this time period expires, the load balancer-generated cookie is // considered stale. The range is 1 second to 1 week (604800 seconds). The default @@ -914,19 +913,18 @@ type TargetGroupAttribute struct { // the load balancer is an Application Load Balancer and the target is a Lambda // function: // - // * lambda.multi_value_headers.enabled - Indicates whether the - // request and response headers that are exchanged between the load balancer and - // the Lambda function include arrays of values or strings. The value is true or - // false. The default is false. If the value is false and the request contains a - // duplicate header field name or query parameter key, the load balancer uses the - // last value sent by the client. + // * lambda.multi_value_headers.enabled - Indicates whether the request + // and response headers that are exchanged between the load balancer and the Lambda + // function include arrays of values or strings. The value is true or false. The + // default is false. If the value is false and the request contains a duplicate + // header field name or query parameter key, the load balancer uses the last value + // sent by the client. // - // The following attribute is supported only by - // Network Load Balancers: + // The following attribute is supported only by Network Load + // Balancers: // - // * proxy_protocol_v2.enabled - Indicates whether - // Proxy Protocol version 2 is enabled. The value is true or false. The default is - // false. + // * proxy_protocol_v2.enabled - Indicates whether Proxy Protocol + // version 2 is enabled. The value is true or false. The default is false. Key *string // The value of the attribute. @@ -966,64 +964,63 @@ type TargetHealth struct { // If the target state is initial, the reason code can be one of the following // values: // - // * Elb.RegistrationInProgress - The target is in the process of - // being registered with the load balancer. + // * Elb.RegistrationInProgress - The target is in the process of being + // registered with the load balancer. // - // * Elb.InitialHealthChecking - The - // load balancer is still sending the target the minimum number of health checks + // * Elb.InitialHealthChecking - The load + // balancer is still sending the target the minimum number of health checks // required to determine its health status. // // If the target state is unhealthy, the // reason code can be one of the following values: // - // * - // Target.ResponseCodeMismatch - The health checks did not return an expected HTTP - // code. Applies only to Application Load Balancers. - // - // * Target.Timeout - The - // health check requests timed out. Applies only to Application Load Balancers. - // + // * Target.ResponseCodeMismatch - + // The health checks did not return an expected HTTP code. Applies only to + // Application Load Balancers. // - // * Target.FailedHealthChecks - The load balancer received an error while - // establishing a connection to the target or the target response was malformed. + // * Target.Timeout - The health check requests timed + // out. Applies only to Application Load Balancers. // + // * Target.FailedHealthChecks - + // The load balancer received an error while establishing a connection to the + // target or the target response was malformed. // - // * Elb.InternalError - The health checks failed due to an internal error. Applies - // only to Application Load Balancers. + // * Elb.InternalError - The health + // checks failed due to an internal error. Applies only to Application Load + // Balancers. // - // If the target state is unused, the reason - // code can be one of the following values: + // If the target state is unused, the reason code can be one of the + // following values: // - // * Target.NotRegistered - The - // target is not registered with the target group. + // * Target.NotRegistered - The target is not registered with + // the target group. // - // * Target.NotInUse - The - // target group is not used by any load balancer or the target is in an - // Availability Zone that is not enabled for its load balancer. + // * Target.NotInUse - The target group is not used by any load + // balancer or the target is in an Availability Zone that is not enabled for its + // load balancer. // - // * - // Target.InvalidState - The target is in the stopped or terminated state. + // * Target.InvalidState - The target is in the stopped or + // terminated state. // - // * - // Target.IpUnusable - The target IP address is reserved for use by a load - // balancer. + // * Target.IpUnusable - The target IP address is reserved for + // use by a load balancer. // - // If the target state is draining, the reason code can be the following - // value: + // If the target state is draining, the reason code can be + // the following value: // - // * Target.DeregistrationInProgress - The target is in the process of - // being deregistered and the deregistration delay period has not expired. + // * Target.DeregistrationInProgress - The target is in the + // process of being deregistered and the deregistration delay period has not + // expired. // - // If the - // target state is unavailable, the reason code can be the following value: + // If the target state is unavailable, the reason code can be the + // following value: // - // * - // Target.HealthCheckDisabled - Health checks are disabled for the target group. - // Applies only to Application Load Balancers. + // * Target.HealthCheckDisabled - Health checks are disabled for + // the target group. Applies only to Application Load Balancers. // - // * Elb.InternalError - Target - // health is unavailable due to an internal error. Applies only to Network Load - // Balancers. + // * + // Elb.InternalError - Target health is unavailable due to an internal error. + // Applies only to Network Load Balancers. Reason TargetHealthReasonEnum // The state of the target. diff --git a/service/elasticsearchservice/api_op_DescribeElasticsearchInstanceTypeLimits.go b/service/elasticsearchservice/api_op_DescribeElasticsearchInstanceTypeLimits.go index e24653abcf8..9f91dbba127 100644 --- a/service/elasticsearchservice/api_op_DescribeElasticsearchInstanceTypeLimits.go +++ b/service/elasticsearchservice/api_op_DescribeElasticsearchInstanceTypeLimits.go @@ -57,14 +57,14 @@ type DescribeElasticsearchInstanceTypeLimitsOutput struct { // Map of Role of the Instance and Limits that are applicable. Role performed by // given Instance in Elasticsearch can be one of the following: // - // * data: If the + // * data: If the // given InstanceType is used as data node // - // * master: If the given InstanceType - // is used as master node + // * master: If the given InstanceType is + // used as master node // - // * ultra_warm: If the given InstanceType is used as - // warm node + // * ultra_warm: If the given InstanceType is used as warm + // node LimitsByRole map[string]*types.Limits // Metadata pertaining to the operation's result. diff --git a/service/elasticsearchservice/api_op_DescribeInboundCrossClusterSearchConnections.go b/service/elasticsearchservice/api_op_DescribeInboundCrossClusterSearchConnections.go index 7b1a9a07ce0..9f62d5acf6a 100644 --- a/service/elasticsearchservice/api_op_DescribeInboundCrossClusterSearchConnections.go +++ b/service/elasticsearchservice/api_op_DescribeInboundCrossClusterSearchConnections.go @@ -34,17 +34,17 @@ type DescribeInboundCrossClusterSearchConnectionsInput struct { // A list of filters used to match properties for inbound cross-cluster search // connection. Available Filter names for this operation are: // - // * + // * // cross-cluster-search-connection-id // - // * source-domain-info.domain-name + // * source-domain-info.domain-name // - // * + // * // source-domain-info.owner-id // - // * source-domain-info.region + // * source-domain-info.region // - // * + // * // destination-domain-info.domain-name Filters []*types.Filter diff --git a/service/elasticsearchservice/api_op_DescribeOutboundCrossClusterSearchConnections.go b/service/elasticsearchservice/api_op_DescribeOutboundCrossClusterSearchConnections.go index 6f223757110..4b75ccb3b6a 100644 --- a/service/elasticsearchservice/api_op_DescribeOutboundCrossClusterSearchConnections.go +++ b/service/elasticsearchservice/api_op_DescribeOutboundCrossClusterSearchConnections.go @@ -34,17 +34,17 @@ type DescribeOutboundCrossClusterSearchConnectionsInput struct { // A list of filters used to match properties for outbound cross-cluster search // connection. Available Filter names for this operation are: // - // * + // * // cross-cluster-search-connection-id // - // * destination-domain-info.domain-name + // * destination-domain-info.domain-name // + // * + // destination-domain-info.owner-id // - // * destination-domain-info.owner-id + // * destination-domain-info.region // - // * destination-domain-info.region - // - // * + // * // source-domain-info.domain-name Filters []*types.Filter diff --git a/service/elasticsearchservice/api_op_GetUpgradeStatus.go b/service/elasticsearchservice/api_op_GetUpgradeStatus.go index 09d6f2d1387..7b0139cd27b 100644 --- a/service/elasticsearchservice/api_op_GetUpgradeStatus.go +++ b/service/elasticsearchservice/api_op_GetUpgradeStatus.go @@ -47,14 +47,13 @@ type GetUpgradeStatusOutput struct { // GetUpgradeStatusResponse object. The status can take one of the following // values: // - // * In Progress + // * In Progress // - // * Succeeded + // * Succeeded // - // * Succeeded with Issues + // * Succeeded with Issues // - // * - // Failed + // * Failed StepStatus types.UpgradeStatus // A string that describes the update briefly @@ -63,11 +62,11 @@ type GetUpgradeStatusOutput struct { // Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does // through: // - // * PreUpgradeCheck + // * PreUpgradeCheck // - // * Snapshot + // * Snapshot // - // * Upgrade + // * Upgrade UpgradeStep types.UpgradeStep // Metadata pertaining to the operation's result. diff --git a/service/elasticsearchservice/types/enums.go b/service/elasticsearchservice/types/enums.go index f22b9780aea..d91eb3b6f96 100644 --- a/service/elasticsearchservice/types/enums.go +++ b/service/elasticsearchservice/types/enums.go @@ -6,11 +6,11 @@ type DeploymentStatus string // Enum values for DeploymentStatus const ( - DeploymentStatusPending_update DeploymentStatus = "PENDING_UPDATE" - DeploymentStatusIn_progress DeploymentStatus = "IN_PROGRESS" - DeploymentStatusCompleted DeploymentStatus = "COMPLETED" - DeploymentStatusNot_eligible DeploymentStatus = "NOT_ELIGIBLE" - DeploymentStatusEligible DeploymentStatus = "ELIGIBLE" + DeploymentStatusPendingUpdate DeploymentStatus = "PENDING_UPDATE" + DeploymentStatusInProgress DeploymentStatus = "IN_PROGRESS" + DeploymentStatusCompleted DeploymentStatus = "COMPLETED" + DeploymentStatusNotEligible DeploymentStatus = "NOT_ELIGIBLE" + DeploymentStatusEligible DeploymentStatus = "ELIGIBLE" ) // Values returns all known values for DeploymentStatus. Note that this can be @@ -50,11 +50,11 @@ type DomainPackageStatus string // Enum values for DomainPackageStatus const ( - DomainPackageStatusAssociating DomainPackageStatus = "ASSOCIATING" - DomainPackageStatusAssociation_failed DomainPackageStatus = "ASSOCIATION_FAILED" - DomainPackageStatusActive DomainPackageStatus = "ACTIVE" - DomainPackageStatusDissociating DomainPackageStatus = "DISSOCIATING" - DomainPackageStatusDissociation_failed DomainPackageStatus = "DISSOCIATION_FAILED" + DomainPackageStatusAssociating DomainPackageStatus = "ASSOCIATING" + DomainPackageStatusAssociationFailed DomainPackageStatus = "ASSOCIATION_FAILED" + DomainPackageStatusActive DomainPackageStatus = "ACTIVE" + DomainPackageStatusDissociating DomainPackageStatus = "DISSOCIATING" + DomainPackageStatusDissociationFailed DomainPackageStatus = "DISSOCIATION_FAILED" ) // Values returns all known values for DomainPackageStatus. Note that this can be @@ -222,12 +222,12 @@ type InboundCrossClusterSearchConnectionStatusCode string // Enum values for InboundCrossClusterSearchConnectionStatusCode const ( - InboundCrossClusterSearchConnectionStatusCodePending_acceptance InboundCrossClusterSearchConnectionStatusCode = "PENDING_ACCEPTANCE" - InboundCrossClusterSearchConnectionStatusCodeApproved InboundCrossClusterSearchConnectionStatusCode = "APPROVED" - InboundCrossClusterSearchConnectionStatusCodeRejecting InboundCrossClusterSearchConnectionStatusCode = "REJECTING" - InboundCrossClusterSearchConnectionStatusCodeRejected InboundCrossClusterSearchConnectionStatusCode = "REJECTED" - InboundCrossClusterSearchConnectionStatusCodeDeleting InboundCrossClusterSearchConnectionStatusCode = "DELETING" - InboundCrossClusterSearchConnectionStatusCodeDeleted InboundCrossClusterSearchConnectionStatusCode = "DELETED" + InboundCrossClusterSearchConnectionStatusCodePendingAcceptance InboundCrossClusterSearchConnectionStatusCode = "PENDING_ACCEPTANCE" + InboundCrossClusterSearchConnectionStatusCodeApproved InboundCrossClusterSearchConnectionStatusCode = "APPROVED" + InboundCrossClusterSearchConnectionStatusCodeRejecting InboundCrossClusterSearchConnectionStatusCode = "REJECTING" + InboundCrossClusterSearchConnectionStatusCodeRejected InboundCrossClusterSearchConnectionStatusCode = "REJECTED" + InboundCrossClusterSearchConnectionStatusCodeDeleting InboundCrossClusterSearchConnectionStatusCode = "DELETING" + InboundCrossClusterSearchConnectionStatusCodeDeleted InboundCrossClusterSearchConnectionStatusCode = "DELETED" ) // Values returns all known values for @@ -249,10 +249,10 @@ type LogType string // Enum values for LogType const ( - LogTypeIndex_slow_logs LogType = "INDEX_SLOW_LOGS" - LogTypeSearch_slow_logs LogType = "SEARCH_SLOW_LOGS" - LogTypeEs_application_logs LogType = "ES_APPLICATION_LOGS" - LogTypeAudit_logs LogType = "AUDIT_LOGS" + LogTypeIndexSlowLogs LogType = "INDEX_SLOW_LOGS" + LogTypeSearchSlowLogs LogType = "SEARCH_SLOW_LOGS" + LogTypeEsApplicationLogs LogType = "ES_APPLICATION_LOGS" + LogTypeAuditLogs LogType = "AUDIT_LOGS" ) // Values returns all known values for LogType. Note that this can be expanded in @@ -291,14 +291,14 @@ type OutboundCrossClusterSearchConnectionStatusCode string // Enum values for OutboundCrossClusterSearchConnectionStatusCode const ( - OutboundCrossClusterSearchConnectionStatusCodePending_acceptance OutboundCrossClusterSearchConnectionStatusCode = "PENDING_ACCEPTANCE" - OutboundCrossClusterSearchConnectionStatusCodeValidating OutboundCrossClusterSearchConnectionStatusCode = "VALIDATING" - OutboundCrossClusterSearchConnectionStatusCodeValidation_failed OutboundCrossClusterSearchConnectionStatusCode = "VALIDATION_FAILED" - OutboundCrossClusterSearchConnectionStatusCodeProvisioning OutboundCrossClusterSearchConnectionStatusCode = "PROVISIONING" - OutboundCrossClusterSearchConnectionStatusCodeActive OutboundCrossClusterSearchConnectionStatusCode = "ACTIVE" - OutboundCrossClusterSearchConnectionStatusCodeRejected OutboundCrossClusterSearchConnectionStatusCode = "REJECTED" - OutboundCrossClusterSearchConnectionStatusCodeDeleting OutboundCrossClusterSearchConnectionStatusCode = "DELETING" - OutboundCrossClusterSearchConnectionStatusCodeDeleted OutboundCrossClusterSearchConnectionStatusCode = "DELETED" + OutboundCrossClusterSearchConnectionStatusCodePendingAcceptance OutboundCrossClusterSearchConnectionStatusCode = "PENDING_ACCEPTANCE" + OutboundCrossClusterSearchConnectionStatusCodeValidating OutboundCrossClusterSearchConnectionStatusCode = "VALIDATING" + OutboundCrossClusterSearchConnectionStatusCodeValidationFailed OutboundCrossClusterSearchConnectionStatusCode = "VALIDATION_FAILED" + OutboundCrossClusterSearchConnectionStatusCodeProvisioning OutboundCrossClusterSearchConnectionStatusCode = "PROVISIONING" + OutboundCrossClusterSearchConnectionStatusCodeActive OutboundCrossClusterSearchConnectionStatusCode = "ACTIVE" + OutboundCrossClusterSearchConnectionStatusCodeRejected OutboundCrossClusterSearchConnectionStatusCode = "REJECTED" + OutboundCrossClusterSearchConnectionStatusCodeDeleting OutboundCrossClusterSearchConnectionStatusCode = "DELETING" + OutboundCrossClusterSearchConnectionStatusCodeDeleted OutboundCrossClusterSearchConnectionStatusCode = "DELETED" ) // Values returns all known values for @@ -322,14 +322,14 @@ type PackageStatus string // Enum values for PackageStatus const ( - PackageStatusCopying PackageStatus = "COPYING" - PackageStatusCopy_failed PackageStatus = "COPY_FAILED" - PackageStatusValidating PackageStatus = "VALIDATING" - PackageStatusValidation_failed PackageStatus = "VALIDATION_FAILED" - PackageStatusAvailable PackageStatus = "AVAILABLE" - PackageStatusDeleting PackageStatus = "DELETING" - PackageStatusDeleted PackageStatus = "DELETED" - PackageStatusDelete_failed PackageStatus = "DELETE_FAILED" + PackageStatusCopying PackageStatus = "COPYING" + PackageStatusCopyFailed PackageStatus = "COPY_FAILED" + PackageStatusValidating PackageStatus = "VALIDATING" + PackageStatusValidationFailed PackageStatus = "VALIDATION_FAILED" + PackageStatusAvailable PackageStatus = "AVAILABLE" + PackageStatusDeleting PackageStatus = "DELETING" + PackageStatusDeleted PackageStatus = "DELETED" + PackageStatusDeleteFailed PackageStatus = "DELETE_FAILED" ) // Values returns all known values for PackageStatus. Note that this can be @@ -368,9 +368,9 @@ type ReservedElasticsearchInstancePaymentOption string // Enum values for ReservedElasticsearchInstancePaymentOption const ( - ReservedElasticsearchInstancePaymentOptionAll_upfront ReservedElasticsearchInstancePaymentOption = "ALL_UPFRONT" - ReservedElasticsearchInstancePaymentOptionPartial_upfront ReservedElasticsearchInstancePaymentOption = "PARTIAL_UPFRONT" - ReservedElasticsearchInstancePaymentOptionNo_upfront ReservedElasticsearchInstancePaymentOption = "NO_UPFRONT" + ReservedElasticsearchInstancePaymentOptionAllUpfront ReservedElasticsearchInstancePaymentOption = "ALL_UPFRONT" + ReservedElasticsearchInstancePaymentOptionPartialUpfront ReservedElasticsearchInstancePaymentOption = "PARTIAL_UPFRONT" + ReservedElasticsearchInstancePaymentOptionNoUpfront ReservedElasticsearchInstancePaymentOption = "NO_UPFRONT" ) // Values returns all known values for ReservedElasticsearchInstancePaymentOption. @@ -389,8 +389,8 @@ type TLSSecurityPolicy string // Enum values for TLSSecurityPolicy const ( - TLSSecurityPolicyPolicy_min_tls_1_0_2019_07 TLSSecurityPolicy = "Policy-Min-TLS-1-0-2019-07" - TLSSecurityPolicyPolicy_min_tls_1_2_2019_07 TLSSecurityPolicy = "Policy-Min-TLS-1-2-2019-07" + TLSSecurityPolicyPolicyMinTls10201907 TLSSecurityPolicy = "Policy-Min-TLS-1-0-2019-07" + TLSSecurityPolicyPolicyMinTls12201907 TLSSecurityPolicy = "Policy-Min-TLS-1-2-2019-07" ) // Values returns all known values for TLSSecurityPolicy. Note that this can be @@ -407,10 +407,10 @@ type UpgradeStatus string // Enum values for UpgradeStatus const ( - UpgradeStatusIn_progress UpgradeStatus = "IN_PROGRESS" - UpgradeStatusSucceeded UpgradeStatus = "SUCCEEDED" - UpgradeStatusSucceeded_with_issues UpgradeStatus = "SUCCEEDED_WITH_ISSUES" - UpgradeStatusFailed UpgradeStatus = "FAILED" + UpgradeStatusInProgress UpgradeStatus = "IN_PROGRESS" + UpgradeStatusSucceeded UpgradeStatus = "SUCCEEDED" + UpgradeStatusSucceededWithIssues UpgradeStatus = "SUCCEEDED_WITH_ISSUES" + UpgradeStatusFailed UpgradeStatus = "FAILED" ) // Values returns all known values for UpgradeStatus. Note that this can be @@ -429,9 +429,9 @@ type UpgradeStep string // Enum values for UpgradeStep const ( - UpgradeStepPre_upgrade_check UpgradeStep = "PRE_UPGRADE_CHECK" - UpgradeStepSnapshot UpgradeStep = "SNAPSHOT" - UpgradeStepUpgrade UpgradeStep = "UPGRADE" + UpgradeStepPreUpgradeCheck UpgradeStep = "PRE_UPGRADE_CHECK" + UpgradeStepSnapshot UpgradeStep = "SNAPSHOT" + UpgradeStepUpgrade UpgradeStep = "UPGRADE" ) // Values returns all known values for UpgradeStep. Note that this can be expanded diff --git a/service/elasticsearchservice/types/types.go b/service/elasticsearchservice/types/types.go index 553e2e786e4..eeb8f5b6a0f 100644 --- a/service/elasticsearchservice/types/types.go +++ b/service/elasticsearchservice/types/types.go @@ -34,16 +34,16 @@ type AdditionalLimit struct { // // Attributes and their details: // - // * + // * // MaximumNumberOfDataNodesSupported - // This attribute will be present in Master - // node only to specify how much data nodes upto which given - // ESPartitionInstanceType can support as master node. - // * - // MaximumNumberOfDataNodesWithoutMasterNode - // This attribute will be present in - // Data node only to specify how much data nodes of given ESPartitionInstanceType - // upto which you don't need any master nodes to govern them. + // This attribute will be present in Master node + // only to specify how much data nodes upto which given ESPartitionInstanceType can + // support as master node. + // * MaximumNumberOfDataNodesWithoutMasterNode + // This + // attribute will be present in Data node only to specify how much data nodes of + // given ESPartitionInstanceType upto which you don't need any master nodes to + // govern them. LimitName *string // Value for given AdditionalLimit$LimitName . @@ -53,15 +53,15 @@ type AdditionalLimit struct { // Status of the advanced options for the specified Elasticsearch domain. // Currently, the following advanced options are available: // -// * Option to allow +// * Option to allow // references to indices in an HTTP request body. Must be false when configuring // access to individual sub-resources. By default, the value is true. See // Configuration Advanced Options // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options) // for more information. // -// * Option to specify the percentage of heap space that -// is allocated to field data. By default, this setting is unbounded. +// * Option to specify the percentage of heap space that is +// allocated to field data. By default, this setting is unbounded. // // For more // information, see Configuring Advanced Options @@ -189,12 +189,12 @@ type DomainEndpointOptions struct { // // It can be one of the following values: // - // * + // * // Policy-Min-TLS-1-0-2019-07: TLS security policy which supports TLSv1.0 and // higher. // - // * Policy-Min-TLS-1-2-2019-07: TLS security policy which supports - // only TLSv1.2 + // * Policy-Min-TLS-1-2-2019-07: TLS security policy which supports only + // TLSv1.2 TLSSecurityPolicy TLSSecurityPolicy } @@ -591,23 +591,23 @@ type InboundCrossClusterSearchConnectionStatus struct { // The state code for inbound connection. This can be one of the following: // - // * + // * // PENDING_ACCEPTANCE: Inbound connection is not yet accepted by destination domain // owner. // - // * APPROVED: Inbound connection is pending acceptance by destination + // * APPROVED: Inbound connection is pending acceptance by destination // domain owner. // - // * REJECTING: Inbound connection rejection is in process. + // * REJECTING: Inbound connection rejection is in process. // + // * + // REJECTED: Inbound connection is rejected. // - // * REJECTED: Inbound connection is rejected. - // - // * DELETING: Inbound connection + // * DELETING: Inbound connection // deletion is in progress. // - // * DELETED: Inbound connection is deleted and - // cannot be used further. + // * DELETED: Inbound connection is deleted and cannot be + // used further. StatusCode InboundCrossClusterSearchConnectionStatusCode } @@ -655,11 +655,11 @@ type Limits struct { // Attributes and their // details: // -// * CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to -// which log needs to be published. +// * CloudWatchLogsLogGroupArn: ARN of the Cloudwatch log group to which +// log needs to be published. // -// * Enabled: Whether the log publishing for -// given log type is enabled or not +// * Enabled: Whether the log publishing for given log +// type is enabled or not type LogPublishingOption struct { // ARN of the Cloudwatch log group to which log needs to be published. @@ -772,29 +772,29 @@ type OutboundCrossClusterSearchConnectionStatus struct { // The state code for outbound connection. This can be one of the following: // - // * + // * // VALIDATING: The outbound connection request is being validated. // - // * + // * // VALIDATION_FAILED: Validation failed for the connection request. // - // * + // * // PENDING_ACCEPTANCE: Outbound connection request is validated and is not yet // accepted by destination domain owner. // - // * PROVISIONING: Outbound connection + // * PROVISIONING: Outbound connection // request is in process. // - // * ACTIVE: Outbound connection is active and ready to + // * ACTIVE: Outbound connection is active and ready to // use. // - // * REJECTED: Outbound connection request is rejected by destination - // domain owner. - // - // * DELETING: Outbound connection deletion is in progress. + // * REJECTED: Outbound connection request is rejected by destination domain + // owner. // + // * DELETING: Outbound connection deletion is in progress. // - // * DELETED: Outbound connection is deleted and cannot be used further. + // * DELETED: + // Outbound connection is deleted and cannot be used further. StatusCode OutboundCrossClusterSearchConnectionStatusCode } @@ -987,15 +987,14 @@ type StorageType struct { // "instance" storageType we wont have any storageSubType, in case of "ebs" // storageType we will have following valid storageSubTypes // - // * standard + // * standard // - // * - // gp2 + // * gp2 // - // * io1 + // * + // io1 // - // Refer VolumeType for more information regarding above EBS - // storage options. + // Refer VolumeType for more information regarding above EBS storage options. StorageSubTypeName *string // List of limits that are applicable for given storage type. @@ -1003,12 +1002,12 @@ type StorageType struct { // Type of the storage. List of available storage options: // - // * instance - // - // Inbuilt storage available for the given Instance - // * ebs - // Elastic block - // storage that would be attached to the given Instance + // * instance + // Inbuilt + // storage available for the given Instance + // * ebs + // Elastic block storage that would + // be attached to the given Instance StorageTypeName *string } @@ -1018,21 +1017,21 @@ type StorageTypeLimit struct { // Name of storage limits that are applicable for given storage type. If // StorageType is ebs, following storage options are applicable // - // * + // * // MinimumVolumeSize - // Minimum amount of volume size that is applicable for - // given storage type.It can be empty if it is not applicable. - // * - // MaximumVolumeSize - // Maximum amount of volume size that is applicable for - // given storage type.It can be empty if it is not applicable. - // * MaximumIops - // - // Maximum amount of Iops that is applicable for given storage type.It can be empty - // if it is not applicable. - // * MinimumIops - // Minimum amount of Iops that is + // Minimum amount of volume size that is applicable for given + // storage type.It can be empty if it is not applicable. + // * MaximumVolumeSize + // + // Maximum amount of volume size that is applicable for given storage type.It can + // be empty if it is not applicable. + // * MaximumIops + // Maximum amount of Iops that is // applicable for given storage type.It can be empty if it is not applicable. + // * + // MinimumIops + // Minimum amount of Iops that is applicable for given storage type.It + // can be empty if it is not applicable. LimitName *string // Values for the StorageTypeLimit$LimitName . @@ -1074,14 +1073,13 @@ type UpgradeHistory struct { // The overall status of the update. The status can take one of the following // values: // - // * In Progress + // * In Progress // - // * Succeeded + // * Succeeded // - // * Succeeded with Issues + // * Succeeded with Issues // - // * - // Failed + // * Failed UpgradeStatus UpgradeStatus } @@ -1098,24 +1096,24 @@ type UpgradeStepItem struct { // Represents one of 3 steps that an Upgrade or Upgrade Eligibility Check does // through: // - // * PreUpgradeCheck + // * PreUpgradeCheck // - // * Snapshot + // * Snapshot // - // * Upgrade + // * Upgrade UpgradeStep UpgradeStep // The status of a particular step during an upgrade. The status can take one of // the following values: // - // * In Progress + // * In Progress // - // * Succeeded + // * Succeeded // - // * Succeeded with - // Issues + // * Succeeded with Issues // - // * Failed + // * + // Failed UpgradeStepStatus UpgradeStatus } diff --git a/service/elastictranscoder/api_op_CreatePipeline.go b/service/elastictranscoder/api_op_CreatePipeline.go index 0eb0c8a34a6..e9fbb19357d 100644 --- a/service/elastictranscoder/api_op_CreatePipeline.go +++ b/service/elastictranscoder/api_op_CreatePipeline.go @@ -64,117 +64,117 @@ type CreatePipelineInput struct { // also specify values for ThumbnailConfig. If you specify values for ContentConfig // and ThumbnailConfig, omit the OutputBucket object. // - // * Bucket: The Amazon S3 + // * Bucket: The Amazon S3 // bucket in which you want Elastic Transcoder to save transcoded files and // playlists. // - // * Permissions (Optional): The Permissions object specifies which + // * Permissions (Optional): The Permissions object specifies which // users you want to have access to transcoded files and the type of access you // want them to have. You can grant permissions to a maximum of 30 users and/or // predefined Amazon S3 groups. // - // * Grantee Type: Specify the type of value that + // * Grantee Type: Specify the type of value that // appears in the Grantee object: // - // * Canonical: The value in the Grantee - // object is either the canonical user ID for an AWS account or an origin access - // identity for an Amazon CloudFront distribution. For more information about - // canonical user IDs, see Access Control List (ACL) Overview in the Amazon Simple - // Storage Service Developer Guide. For more information about using CloudFront - // origin access identities to require that users use CloudFront URLs instead of - // Amazon S3 URLs, see Using an Origin Access Identity to Restrict Access to Your - // Amazon S3 Content. A canonical user ID is not the same as an AWS account - // number. - // - // * Email: The value in the Grantee object is the registered - // email address of an AWS account. - // - // * Group: The value in the Grantee - // object is one of the following predefined Amazon S3 groups: AllUsers, - // AuthenticatedUsers, or LogDelivery. - // - // * Grantee: The AWS user or group that - // you want to have access to transcoded files and playlists. To identify the user - // or group, you can specify the canonical user ID for an AWS account, an origin - // access identity for a CloudFront distribution, the registered email address of - // an AWS account, or a predefined Amazon S3 group - // - // * Access: The permission - // that you want to give to the AWS user that you specified in Grantee. Permissions - // are granted on the files that Elastic Transcoder adds to the bucket, including - // playlists and video files. Valid values include: - // - // * READ: The grantee - // can read the objects and metadata for objects that Elastic Transcoder adds to - // the Amazon S3 bucket. - // - // * READ_ACP: The grantee can read the object ACL - // for objects that Elastic Transcoder adds to the Amazon S3 bucket. + // * Canonical: The value in the Grantee object is + // either the canonical user ID for an AWS account or an origin access identity for + // an Amazon CloudFront distribution. For more information about canonical user + // IDs, see Access Control List (ACL) Overview in the Amazon Simple Storage Service + // Developer Guide. For more information about using CloudFront origin access + // identities to require that users use CloudFront URLs instead of Amazon S3 URLs, + // see Using an Origin Access Identity to Restrict Access to Your Amazon S3 + // Content. A canonical user ID is not the same as an AWS account number. + // + // * Email: + // The value in the Grantee object is the registered email address of an AWS + // account. + // + // * Group: The value in the Grantee object is one of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // + // * + // Grantee: The AWS user or group that you want to have access to transcoded files + // and playlists. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group + // + // * Access: The permission that you want to give to the AWS user that you + // specified in Grantee. Permissions are granted on the files that Elastic + // Transcoder adds to the bucket, including playlists and video files. Valid values + // include: + // + // * READ: The grantee can read the objects and metadata for objects that + // Elastic Transcoder adds to the Amazon S3 bucket. + // + // * READ_ACP: The grantee can + // read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 + // bucket. // - // * - // WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder - // adds to the Amazon S3 bucket. + // * WRITE_ACP: The grantee can write the ACL for the objects that Elastic + // Transcoder adds to the Amazon S3 bucket. // - // * FULL_CONTROL: The grantee has READ, + // * FULL_CONTROL: The grantee has READ, // READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds // to the Amazon S3 bucket. // - // * StorageClass: The Amazon S3 storage class, - // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the - // video files and playlists that it stores in your Amazon S3 bucket. + // * StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. ContentConfig *types.PipelineOutputConfig // The Amazon Simple Notification Service (Amazon SNS) topic that you want to // notify to report job status. To receive notifications, you must also subscribe // to the new topic in the Amazon SNS console. // - // * Progressing: The topic ARN - // for the Amazon Simple Notification Service (Amazon SNS) topic that you want to + // * Progressing: The topic ARN for + // the Amazon Simple Notification Service (Amazon SNS) topic that you want to // notify when Elastic Transcoder has started to process a job in this pipeline. // This is the ARN that Amazon SNS returned when you created the topic. For more // information, see Create a Topic in the Amazon Simple Notification Service // Developer Guide. // - // * Complete: The topic ARN for the Amazon SNS topic that - // you want to notify when Elastic Transcoder has finished processing a job in this - // pipeline. This is the ARN that Amazon SNS returned when you created the topic. + // * Complete: The topic ARN for the Amazon SNS topic that you + // want to notify when Elastic Transcoder has finished processing a job in this + // pipeline. This is the ARN that Amazon SNS returned when you created the + // topic. // + // * Warning: The topic ARN for the Amazon SNS topic that you want to + // notify when Elastic Transcoder encounters a warning condition while processing a + // job in this pipeline. This is the ARN that Amazon SNS returned when you created + // the topic. // - // * Warning: The topic ARN for the Amazon SNS topic that you want to notify when - // Elastic Transcoder encounters a warning condition while processing a job in this - // pipeline. This is the ARN that Amazon SNS returned when you created the topic. - // - // - // * Error: The topic ARN for the Amazon SNS topic that you want to notify when - // Elastic Transcoder encounters an error condition while processing a job in this - // pipeline. This is the ARN that Amazon SNS returned when you created the topic. + // * Error: The topic ARN for the Amazon SNS topic that you want to + // notify when Elastic Transcoder encounters an error condition while processing a + // job in this pipeline. This is the ARN that Amazon SNS returned when you created + // the topic. Notifications *types.Notifications // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded // files. (Use this, or use ContentConfig:Bucket plus ThumbnailConfig:Bucket.) // Specify this value when all of the following are true: // - // * You want to save + // * You want to save // transcoded files, thumbnails (if any), and playlists (if any) together in one // bucket. // - // * You do not want to specify the users or groups who have access to - // the transcoded files, thumbnails, and playlists. + // * You do not want to specify the users or groups who have access to the + // transcoded files, thumbnails, and playlists. // - // * You do not want to - // specify the permissions that Elastic Transcoder grants to the files. When - // Elastic Transcoder saves files in OutputBucket, it grants full control over the - // files only to the AWS account that owns the role that is specified by Role. + // * You do not want to specify the + // permissions that Elastic Transcoder grants to the files. When Elastic Transcoder + // saves files in OutputBucket, it grants full control over the files only to the + // AWS account that owns the role that is specified by Role. // + // * You want to + // associate the transcoded files and thumbnails with the Amazon S3 Standard + // storage class. // - // * You want to associate the transcoded files and thumbnails with the Amazon S3 - // Standard storage class. - // - // If you want to save transcoded files and playlists in - // one bucket and thumbnails in another bucket, specify which users can access the - // transcoded files or the permissions the users have, or change the Amazon S3 - // storage class, omit OutputBucket and specify values for ContentConfig and - // ThumbnailConfig instead. + // If you want to save transcoded files and playlists in one bucket + // and thumbnails in another bucket, specify which users can access the transcoded + // files or the permissions the users have, or change the Amazon S3 storage class, + // omit OutputBucket and specify values for ContentConfig and ThumbnailConfig + // instead. OutputBucket *string // The ThumbnailConfig object specifies several values, including the Amazon S3 @@ -185,57 +185,56 @@ type CreatePipelineInput struct { // if you don't want to create thumbnails. If you specify values for ContentConfig // and ThumbnailConfig, omit the OutputBucket object. // - // * Bucket: The Amazon S3 + // * Bucket: The Amazon S3 // bucket in which you want Elastic Transcoder to save thumbnail files. // - // * + // * // Permissions (Optional): The Permissions object specifies which users and/or // predefined Amazon S3 groups you want to have access to thumbnail files, and the // type of access you want them to have. You can grant permissions to a maximum of // 30 users and/or predefined Amazon S3 groups. // - // * GranteeType: Specify the - // type of value that appears in the Grantee object: + // * GranteeType: Specify the type of + // value that appears in the Grantee object: // - // * Canonical: The - // value in the Grantee object is either the canonical user ID for an AWS account - // or an origin access identity for an Amazon CloudFront distribution. A canonical - // user ID is not the same as an AWS account number. + // * Canonical: The value in the Grantee + // object is either the canonical user ID for an AWS account or an origin access + // identity for an Amazon CloudFront distribution. A canonical user ID is not the + // same as an AWS account number. // - // * Email: The value in - // the Grantee object is the registered email address of an AWS account. + // * Email: The value in the Grantee object is the + // registered email address of an AWS account. // - // * - // Group: The value in the Grantee object is one of the following predefined Amazon - // S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // * Group: The value in the Grantee + // object is one of the following predefined Amazon S3 groups: AllUsers, + // AuthenticatedUsers, or LogDelivery. // - // * Grantee: The AWS - // user or group that you want to have access to thumbnail files. To identify the - // user or group, you can specify the canonical user ID for an AWS account, an - // origin access identity for a CloudFront distribution, the registered email - // address of an AWS account, or a predefined Amazon S3 group. + // * Grantee: The AWS user or group that you + // want to have access to thumbnail files. To identify the user or group, you can + // specify the canonical user ID for an AWS account, an origin access identity for + // a CloudFront distribution, the registered email address of an AWS account, or a + // predefined Amazon S3 group. // - // * Access: The - // permission that you want to give to the AWS user that you specified in Grantee. - // Permissions are granted on the thumbnail files that Elastic Transcoder adds to - // the bucket. Valid values include: + // * Access: The permission that you want to give to + // the AWS user that you specified in Grantee. Permissions are granted on the + // thumbnail files that Elastic Transcoder adds to the bucket. Valid values + // include: // - // * READ: The grantee can read the - // thumbnails and metadata for objects that Elastic Transcoder adds to the Amazon - // S3 bucket. + // * READ: The grantee can read the thumbnails and metadata for objects + // that Elastic Transcoder adds to the Amazon S3 bucket. // - // * READ_ACP: The grantee can read the object ACL for - // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // * READ_ACP: The grantee + // can read the object ACL for thumbnails that Elastic Transcoder adds to the + // Amazon S3 bucket. // - // * - // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. + // * WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. // - // * FULL_CONTROL: The grantee - // has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. + // * FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that + // Elastic Transcoder adds to the Amazon S3 bucket. // - // * StorageClass: The Amazon S3 + // * StorageClass: The Amazon S3 // storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder // to assign to the thumbnails that it stores in your Amazon S3 bucket. ThumbnailConfig *types.PipelineOutputConfig diff --git a/service/elastictranscoder/api_op_UpdatePipeline.go b/service/elastictranscoder/api_op_UpdatePipeline.go index ce97373cdaa..ca495e09e36 100644 --- a/service/elastictranscoder/api_op_UpdatePipeline.go +++ b/service/elastictranscoder/api_op_UpdatePipeline.go @@ -54,64 +54,63 @@ type UpdatePipelineInput struct { // also specify values for ThumbnailConfig. If you specify values for ContentConfig // and ThumbnailConfig, omit the OutputBucket object. // - // * Bucket: The Amazon S3 + // * Bucket: The Amazon S3 // bucket in which you want Elastic Transcoder to save transcoded files and // playlists. // - // * Permissions (Optional): The Permissions object specifies which + // * Permissions (Optional): The Permissions object specifies which // users you want to have access to transcoded files and the type of access you // want them to have. You can grant permissions to a maximum of 30 users and/or // predefined Amazon S3 groups. // - // * Grantee Type: Specify the type of value that + // * Grantee Type: Specify the type of value that // appears in the Grantee object: // - // * Canonical: The value in the Grantee - // object is either the canonical user ID for an AWS account or an origin access - // identity for an Amazon CloudFront distribution. For more information about - // canonical user IDs, see Access Control List (ACL) Overview in the Amazon Simple - // Storage Service Developer Guide. For more information about using CloudFront - // origin access identities to require that users use CloudFront URLs instead of - // Amazon S3 URLs, see Using an Origin Access Identity to Restrict Access to Your - // Amazon S3 Content. A canonical user ID is not the same as an AWS account - // number. + // * Canonical: The value in the Grantee object is + // either the canonical user ID for an AWS account or an origin access identity for + // an Amazon CloudFront distribution. For more information about canonical user + // IDs, see Access Control List (ACL) Overview in the Amazon Simple Storage Service + // Developer Guide. For more information about using CloudFront origin access + // identities to require that users use CloudFront URLs instead of Amazon S3 URLs, + // see Using an Origin Access Identity to Restrict Access to Your Amazon S3 + // Content. A canonical user ID is not the same as an AWS account number. // - // * Email: The value in the Grantee object is the registered - // email address of an AWS account. + // * Email: + // The value in the Grantee object is the registered email address of an AWS + // account. // - // * Group: The value in the Grantee - // object is one of the following predefined Amazon S3 groups: AllUsers, - // AuthenticatedUsers, or LogDelivery. + // * Group: The value in the Grantee object is one of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. // - // * Grantee: The AWS user or group that - // you want to have access to transcoded files and playlists. To identify the user - // or group, you can specify the canonical user ID for an AWS account, an origin - // access identity for a CloudFront distribution, the registered email address of - // an AWS account, or a predefined Amazon S3 group + // * + // Grantee: The AWS user or group that you want to have access to transcoded files + // and playlists. To identify the user or group, you can specify the canonical user + // ID for an AWS account, an origin access identity for a CloudFront distribution, + // the registered email address of an AWS account, or a predefined Amazon S3 + // group // - // * Access: The permission - // that you want to give to the AWS user that you specified in Grantee. Permissions - // are granted on the files that Elastic Transcoder adds to the bucket, including - // playlists and video files. Valid values include: + // * Access: The permission that you want to give to the AWS user that you + // specified in Grantee. Permissions are granted on the files that Elastic + // Transcoder adds to the bucket, including playlists and video files. Valid values + // include: // - // * READ: The grantee - // can read the objects and metadata for objects that Elastic Transcoder adds to - // the Amazon S3 bucket. + // * READ: The grantee can read the objects and metadata for objects that + // Elastic Transcoder adds to the Amazon S3 bucket. // - // * READ_ACP: The grantee can read the object ACL - // for objects that Elastic Transcoder adds to the Amazon S3 bucket. + // * READ_ACP: The grantee can + // read the object ACL for objects that Elastic Transcoder adds to the Amazon S3 + // bucket. // - // * - // WRITE_ACP: The grantee can write the ACL for the objects that Elastic Transcoder - // adds to the Amazon S3 bucket. + // * WRITE_ACP: The grantee can write the ACL for the objects that Elastic + // Transcoder adds to the Amazon S3 bucket. // - // * FULL_CONTROL: The grantee has READ, + // * FULL_CONTROL: The grantee has READ, // READ_ACP, and WRITE_ACP permissions for the objects that Elastic Transcoder adds // to the Amazon S3 bucket. // - // * StorageClass: The Amazon S3 storage class, - // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the - // video files and playlists that it stores in your Amazon S3 bucket. + // * StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. ContentConfig *types.PipelineOutputConfig // The Amazon S3 bucket in which you saved the media files that you want to @@ -126,24 +125,24 @@ type UpdatePipelineInput struct { // you want to notify to report job status. To receive notifications, you must also // subscribe to the new topic in the Amazon SNS console. // - // * Progressing: The - // topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you - // want to notify when Elastic Transcoder has started to process jobs that are - // added to this pipeline. This is the ARN that Amazon SNS returned when you - // created the topic. + // * Progressing: The topic + // ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want + // to notify when Elastic Transcoder has started to process jobs that are added to + // this pipeline. This is the ARN that Amazon SNS returned when you created the + // topic. // - // * Complete: The topic ARN for the Amazon SNS topic that - // you want to notify when Elastic Transcoder has finished processing a job. This - // is the ARN that Amazon SNS returned when you created the topic. + // * Complete: The topic ARN for the Amazon SNS topic that you want to + // notify when Elastic Transcoder has finished processing a job. This is the ARN + // that Amazon SNS returned when you created the topic. // - // * Warning: - // The topic ARN for the Amazon SNS topic that you want to notify when Elastic - // Transcoder encounters a warning condition. This is the ARN that Amazon SNS - // returned when you created the topic. + // * Warning: The topic ARN + // for the Amazon SNS topic that you want to notify when Elastic Transcoder + // encounters a warning condition. This is the ARN that Amazon SNS returned when + // you created the topic. // - // * Error: The topic ARN for the Amazon - // SNS topic that you want to notify when Elastic Transcoder encounters an error - // condition. This is the ARN that Amazon SNS returned when you created the topic. + // * Error: The topic ARN for the Amazon SNS topic that you + // want to notify when Elastic Transcoder encounters an error condition. This is + // the ARN that Amazon SNS returned when you created the topic. Notifications *types.Notifications // The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder @@ -158,57 +157,56 @@ type UpdatePipelineInput struct { // if you don't want to create thumbnails. If you specify values for ContentConfig // and ThumbnailConfig, omit the OutputBucket object. // - // * Bucket: The Amazon S3 + // * Bucket: The Amazon S3 // bucket in which you want Elastic Transcoder to save thumbnail files. // - // * + // * // Permissions (Optional): The Permissions object specifies which users and/or // predefined Amazon S3 groups you want to have access to thumbnail files, and the // type of access you want them to have. You can grant permissions to a maximum of // 30 users and/or predefined Amazon S3 groups. // - // * GranteeType: Specify the - // type of value that appears in the Grantee object: + // * GranteeType: Specify the type of + // value that appears in the Grantee object: // - // * Canonical: The - // value in the Grantee object is either the canonical user ID for an AWS account - // or an origin access identity for an Amazon CloudFront distribution. A canonical - // user ID is not the same as an AWS account number. + // * Canonical: The value in the Grantee + // object is either the canonical user ID for an AWS account or an origin access + // identity for an Amazon CloudFront distribution. A canonical user ID is not the + // same as an AWS account number. // - // * Email: The value in - // the Grantee object is the registered email address of an AWS account. + // * Email: The value in the Grantee object is the + // registered email address of an AWS account. // - // * - // Group: The value in the Grantee object is one of the following predefined Amazon - // S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // * Group: The value in the Grantee + // object is one of the following predefined Amazon S3 groups: AllUsers, + // AuthenticatedUsers, or LogDelivery. // - // * Grantee: The AWS - // user or group that you want to have access to thumbnail files. To identify the - // user or group, you can specify the canonical user ID for an AWS account, an - // origin access identity for a CloudFront distribution, the registered email - // address of an AWS account, or a predefined Amazon S3 group. + // * Grantee: The AWS user or group that you + // want to have access to thumbnail files. To identify the user or group, you can + // specify the canonical user ID for an AWS account, an origin access identity for + // a CloudFront distribution, the registered email address of an AWS account, or a + // predefined Amazon S3 group. // - // * Access: The - // permission that you want to give to the AWS user that you specified in Grantee. - // Permissions are granted on the thumbnail files that Elastic Transcoder adds to - // the bucket. Valid values include: + // * Access: The permission that you want to give to + // the AWS user that you specified in Grantee. Permissions are granted on the + // thumbnail files that Elastic Transcoder adds to the bucket. Valid values + // include: // - // * READ: The grantee can read the - // thumbnails and metadata for objects that Elastic Transcoder adds to the Amazon - // S3 bucket. + // * READ: The grantee can read the thumbnails and metadata for objects + // that Elastic Transcoder adds to the Amazon S3 bucket. // - // * READ_ACP: The grantee can read the object ACL for - // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // * READ_ACP: The grantee + // can read the object ACL for thumbnails that Elastic Transcoder adds to the + // Amazon S3 bucket. // - // * - // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. + // * WRITE_ACP: The grantee can write the ACL for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. // - // * FULL_CONTROL: The grantee - // has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. + // * FULL_CONTROL: The + // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails that + // Elastic Transcoder adds to the Amazon S3 bucket. // - // * StorageClass: The Amazon S3 + // * StorageClass: The Amazon S3 // storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder // to assign to the thumbnails that it stores in your Amazon S3 bucket. ThumbnailConfig *types.PipelineOutputConfig diff --git a/service/elastictranscoder/api_op_UpdatePipelineNotifications.go b/service/elastictranscoder/api_op_UpdatePipelineNotifications.go index db0fccea52c..834c413450a 100644 --- a/service/elastictranscoder/api_op_UpdatePipelineNotifications.go +++ b/service/elastictranscoder/api_op_UpdatePipelineNotifications.go @@ -43,24 +43,24 @@ type UpdatePipelineNotificationsInput struct { // you want to notify to report job status. To receive notifications, you must also // subscribe to the new topic in the Amazon SNS console. // - // * Progressing: The - // topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you - // want to notify when Elastic Transcoder has started to process jobs that are - // added to this pipeline. This is the ARN that Amazon SNS returned when you - // created the topic. + // * Progressing: The topic + // ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want + // to notify when Elastic Transcoder has started to process jobs that are added to + // this pipeline. This is the ARN that Amazon SNS returned when you created the + // topic. // - // * Complete: The topic ARN for the Amazon SNS topic that - // you want to notify when Elastic Transcoder has finished processing a job. This - // is the ARN that Amazon SNS returned when you created the topic. + // * Complete: The topic ARN for the Amazon SNS topic that you want to + // notify when Elastic Transcoder has finished processing a job. This is the ARN + // that Amazon SNS returned when you created the topic. // - // * Warning: - // The topic ARN for the Amazon SNS topic that you want to notify when Elastic - // Transcoder encounters a warning condition. This is the ARN that Amazon SNS - // returned when you created the topic. + // * Warning: The topic ARN + // for the Amazon SNS topic that you want to notify when Elastic Transcoder + // encounters a warning condition. This is the ARN that Amazon SNS returned when + // you created the topic. // - // * Error: The topic ARN for the Amazon - // SNS topic that you want to notify when Elastic Transcoder encounters an error - // condition. This is the ARN that Amazon SNS returned when you created the topic. + // * Error: The topic ARN for the Amazon SNS topic that you + // want to notify when Elastic Transcoder encounters an error condition. This is + // the ARN that Amazon SNS returned when you created the topic. // // This member is required. Notifications *types.Notifications diff --git a/service/elastictranscoder/api_op_UpdatePipelineStatus.go b/service/elastictranscoder/api_op_UpdatePipelineStatus.go index 796f714a05c..95706fe17b3 100644 --- a/service/elastictranscoder/api_op_UpdatePipelineStatus.go +++ b/service/elastictranscoder/api_op_UpdatePipelineStatus.go @@ -42,10 +42,10 @@ type UpdatePipelineStatusInput struct { // The desired status of the pipeline: // - // * Active: The pipeline is processing + // * Active: The pipeline is processing // jobs. // - // * Paused: The pipeline is not currently processing jobs. + // * Paused: The pipeline is not currently processing jobs. // // This member is required. Status *string diff --git a/service/elastictranscoder/types/types.go b/service/elastictranscoder/types/types.go index 9db6c68a914..fab791ecf52 100644 --- a/service/elastictranscoder/types/types.go +++ b/service/elastictranscoder/types/types.go @@ -44,34 +44,34 @@ type Artwork struct { // Specify one of the following values to control scaling of the output album // art: // - // * Fit: Elastic Transcoder scales the output art so it matches the - // value that you specified in either MaxWidth or MaxHeight without exceeding the - // other value. - // - // * Fill: Elastic Transcoder scales the output art so it matches - // the value that you specified in either MaxWidth or MaxHeight and matches or - // exceeds the other value. Elastic Transcoder centers the output art and then - // crops it in the dimension (if any) that exceeds the maximum value. - // - // * - // Stretch: Elastic Transcoder stretches the output art to match the values that - // you specified for MaxWidth and MaxHeight. If the relative proportions of the - // input art and the output art are different, the output art will be distorted. - // - // - // * Keep: Elastic Transcoder does not scale the output art. If either dimension of - // the input art exceeds the values that you specified for MaxWidth and MaxHeight, - // Elastic Transcoder crops the output art. - // - // * ShrinkToFit: Elastic Transcoder - // scales the output art down so that its dimensions match the values that you - // specified for at least one of MaxWidth and MaxHeight without exceeding either - // value. If you specify this option, Elastic Transcoder does not scale the art - // up. + // * Fit: Elastic Transcoder scales the output art so it matches the value + // that you specified in either MaxWidth or MaxHeight without exceeding the other + // value. // - // * ShrinkToFill Elastic Transcoder scales the output art down so that - // its dimensions match the values that you specified for at least one of MaxWidth - // and MaxHeight without dropping below either value. If you specify this option, + // * Fill: Elastic Transcoder scales the output art so it matches the value + // that you specified in either MaxWidth or MaxHeight and matches or exceeds the + // other value. Elastic Transcoder centers the output art and then crops it in the + // dimension (if any) that exceeds the maximum value. + // + // * Stretch: Elastic + // Transcoder stretches the output art to match the values that you specified for + // MaxWidth and MaxHeight. If the relative proportions of the input art and the + // output art are different, the output art will be distorted. + // + // * Keep: Elastic + // Transcoder does not scale the output art. If either dimension of the input art + // exceeds the values that you specified for MaxWidth and MaxHeight, Elastic + // Transcoder crops the output art. + // + // * ShrinkToFit: Elastic Transcoder scales the + // output art down so that its dimensions match the values that you specified for + // at least one of MaxWidth and MaxHeight without exceeding either value. If you + // specify this option, Elastic Transcoder does not scale the art up. + // + // * + // ShrinkToFill Elastic Transcoder scales the output art down so that its + // dimensions match the values that you specified for at least one of MaxWidth and + // MaxHeight without dropping below either value. If you specify this option, // Elastic Transcoder does not scale the art up. SizingPolicy *string } @@ -95,23 +95,23 @@ type AudioCodecOptions struct { // Audio:Codec. Specify the AAC profile for the output file. Elastic Transcoder // supports the following profiles: // - // * auto: If you specify auto, Elastic + // * auto: If you specify auto, Elastic // Transcoder selects the profile based on the bit rate selected for the output // file. // - // * AAC-LC: The most common AAC profile. Use for bit rates larger than - // 64 kbps. + // * AAC-LC: The most common AAC profile. Use for bit rates larger than 64 + // kbps. // - // * HE-AAC: Not supported on some older players and devices. Use for - // bit rates between 40 and 80 kbps. + // * HE-AAC: Not supported on some older players and devices. Use for bit + // rates between 40 and 80 kbps. // - // * HE-AACv2: Not supported on some players - // and devices. Use for bit rates less than 48 kbps. + // * HE-AACv2: Not supported on some players and + // devices. Use for bit rates less than 48 kbps. // - // All outputs in a Smooth - // playlist must have the same value for Profile. If you created any presets before - // AAC profiles were added, Elastic Transcoder automatically updated your presets - // to use AAC-LC. You can change the value as required. + // All outputs in a Smooth playlist + // must have the same value for Profile. If you created any presets before AAC + // profiles were added, Elastic Transcoder automatically updated your presets to + // use AAC-LC. You can change the value as required. Profile *string // You can only choose whether an audio sample is signed when you specify pcm for @@ -134,104 +134,103 @@ type AudioParameters struct { // eight channels. Use SingleTrack for all non-mxf containers. The outputs of // SingleTrack for a specific channel value and inputs are as follows: // - // * 0 + // * 0 // channels with any input: Audio omitted from the output // - // * 1, 2, or auto + // * 1, 2, or auto // channels with no audio input: Audio omitted from the output // - // * 1 channel - // with any input with audio: One track with one channel, downmixed if necessary - // + // * 1 channel with + // any input with audio: One track with one channel, downmixed if necessary // - // * 2 channels with one track with one channel: One track with two identical + // * 2 + // channels with one track with one channel: One track with two identical // channels // - // * 2 or auto channels with two tracks with one channel each: One - // track with two channels - // - // * 2 or auto channels with one track with two - // channels: One track with two channels - // - // * 2 channels with one track with - // multiple channels: One track with two channels + // * 2 or auto channels with two tracks with one channel each: One track + // with two channels // - // * auto channels with one - // track with one channel: One track with one channel - // - // * auto channels with - // one track with multiple channels: One track with multiple channels + // * 2 or auto channels with one track with two channels: One + // track with two channels // - // When you - // specify OneChannelPerTrack, Elastic Transcoder creates a new track for every - // channel in your output. Your output can have up to eight single-channel tracks. - // The outputs of OneChannelPerTrack for a specific channel value and inputs are as - // follows: + // * 2 channels with one track with multiple channels: + // One track with two channels // - // * 0 channels with any input: Audio omitted from the output + // * auto channels with one track with one channel: + // One track with one channel // - // * - // 1, 2, or auto channels with no audio input: Audio omitted from the output + // * auto channels with one track with multiple + // channels: One track with multiple channels // + // When you specify OneChannelPerTrack, + // Elastic Transcoder creates a new track for every channel in your output. Your + // output can have up to eight single-channel tracks. The outputs of + // OneChannelPerTrack for a specific channel value and inputs are as follows: // - // * 1 channel with any input with audio: One track with one channel, downmixed if - // necessary + // * 0 + // channels with any input: Audio omitted from the output // - // * 2 channels with one track with one channel: Two tracks with - // one identical channel each + // * 1, 2, or auto + // channels with no audio input: Audio omitted from the output // - // * 2 or auto channels with two tracks with one - // channel each: Two tracks with one channel each + // * 1 channel with + // any input with audio: One track with one channel, downmixed if necessary // - // * 2 or auto channels with - // one track with two channels: Two tracks with one channel each + // * 2 + // channels with one track with one channel: Two tracks with one identical channel + // each // - // * 2 channels - // with one track with multiple channels: Two tracks with one channel each + // * 2 or auto channels with two tracks with one channel each: Two tracks + // with one channel each // - // * - // auto channels with one track with one channel: One track with one channel + // * 2 or auto channels with one track with two channels: + // Two tracks with one channel each // + // * 2 channels with one track with multiple + // channels: Two tracks with one channel each // - // * auto channels with one track with multiple channels: Up to eight tracks with - // one channel each + // * auto channels with one track with + // one channel: One track with one channel // - // When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic - // Transcoder creates eight single-channel tracks for your output. All tracks that - // do not contain audio data from an input channel are MOS, or Mit Out Sound, - // tracks. The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel - // value and inputs are as follows: + // * auto channels with one track with + // multiple channels: Up to eight tracks with one channel each // - // * 0 channels with any input: Audio - // omitted from the output + // When you specify + // OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder creates eight + // single-channel tracks for your output. All tracks that do not contain audio data + // from an input channel are MOS, or Mit Out Sound, tracks. The outputs of + // OneChannelPerTrackWithMosTo8Tracks for a specific channel value and inputs are + // as follows: // - // * 1, 2, or auto channels with no audio input: - // Audio omitted from the output + // * 0 channels with any input: Audio omitted from the output // - // * 1 channel with any input with audio: One - // track with one channel, downmixed if necessary, plus six MOS tracks + // * 1, + // 2, or auto channels with no audio input: Audio omitted from the output // - // * 2 - // channels with one track with one channel: Two tracks with one identical channel - // each, plus six MOS tracks + // * 1 + // channel with any input with audio: One track with one channel, downmixed if + // necessary, plus six MOS tracks // - // * 2 or auto channels with two tracks with one - // channel each: Two tracks with one channel each, plus six MOS tracks + // * 2 channels with one track with one channel: + // Two tracks with one identical channel each, plus six MOS tracks // - // * 2 or - // auto channels with one track with two channels: Two tracks with one channel + // * 2 or auto + // channels with two tracks with one channel each: Two tracks with one channel // each, plus six MOS tracks // - // * 2 channels with one track with multiple + // * 2 or auto channels with one track with two // channels: Two tracks with one channel each, plus six MOS tracks // - // * auto - // channels with one track with one channel: One track with one channel, plus seven - // MOS tracks + // * 2 channels + // with one track with multiple channels: Two tracks with one channel each, plus + // six MOS tracks // - // * auto channels with one track with multiple channels: Up to - // eight tracks with one channel each, plus MOS tracks until there are eight tracks - // in all + // * auto channels with one track with one channel: One track with + // one channel, plus seven MOS tracks + // + // * auto channels with one track with + // multiple channels: Up to eight tracks with one channel each, plus MOS tracks + // until there are eight tracks in all AudioPackingMode *string // The bit rate of the audio stream in the output file, in kilobits/second. Enter @@ -247,20 +246,20 @@ type AudioParameters struct { // file, select auto. The output of a specific channel value and inputs are as // follows: // - // * auto channel specified, with any input: Pass through up to eight + // * auto channel specified, with any input: Pass through up to eight // input channels. // - // * 0 channels specified, with any input: Audio omitted from - // the output. + // * 0 channels specified, with any input: Audio omitted from the + // output. // - // * 1 channel specified, with at least one input channel: Mono - // sound. + // * 1 channel specified, with at least one input channel: Mono sound. // - // * 2 channels specified, with any input: Two identical mono channels - // or stereo. For more information about tracks, see Audio:AudioPackingMode. + // * + // 2 channels specified, with any input: Two identical mono channels or stereo. For + // more information about tracks, see Audio:AudioPackingMode. // - // For - // more information about how Elastic Transcoder organizes channels and tracks, see + // For more information + // about how Elastic Transcoder organizes channels and tracks, see // Audio:AudioPackingMode. Channels *string @@ -290,50 +289,49 @@ type CaptionFormat struct { // The format you specify determines whether Elastic Transcoder generates an // embedded or sidecar caption for this output. // - // * Valid Embedded Caption + // * Valid Embedded Caption // Formats: // - // * for FLAC: None + // * for FLAC: None // - // * For MP3: None + // * For MP3: None // - // * For MP4: - // mov-text + // * For MP4: mov-text // - // * For MPEG-TS: None + // * For MPEG-TS: + // None // - // * For ogg: None + // * For ogg: None // - // * For - // webm: None + // * For webm: None // - // * Valid Sidecar Caption Formats: Elastic Transcoder supports - // dfxp (first div element only), scc, srt, and webvtt. If you want ttml or - // smpte-tt compatible captions, specify dfxp as your output format. + // * Valid Sidecar Caption Formats: + // Elastic Transcoder supports dfxp (first div element only), scc, srt, and webvtt. + // If you want ttml or smpte-tt compatible captions, specify dfxp as your output + // format. // - // * For - // FMP4: dfxp + // * For FMP4: dfxp // - // * Non-FMP4 outputs: All sidecar types + // * Non-FMP4 outputs: All sidecar types // - // fmp4 captions + // fmp4 captions // have an extension of .ismt Format *string // The prefix for caption filenames, in the form description-{language}, where: // + // * + // description is a description of the video. // - // * description is a description of the video. - // - // * {language} is a literal - // value that Elastic Transcoder replaces with the two- or three-letter code for - // the language of the caption in the output file names. + // * {language} is a literal value that + // Elastic Transcoder replaces with the two- or three-letter code for the language + // of the caption in the output file names. // - // If you don't include - // {language} in the file name pattern, Elastic Transcoder automatically appends - // "{language}" to the value that you specify for the description. In addition, - // Elastic Transcoder automatically appends the count to the end of the segment - // files. For example, suppose you're transcoding into srt format. When you enter + // If you don't include {language} in the + // file name pattern, Elastic Transcoder automatically appends "{language}" to the + // value that you specify for the description. In addition, Elastic Transcoder + // automatically appends the count to the end of the segment files. For example, + // suppose you're transcoding into srt format. When you enter // "Sydney-{language}-sunrise", and the language of the captions is English (en), // the name of the first caption file is be Sydney-en-sunrise00000.srt. Pattern *string @@ -353,19 +351,19 @@ type Captions struct { // A policy that determines how Elastic Transcoder handles the existence of // multiple captions. // - // * MergeOverride: Elastic Transcoder transcodes both - // embedded and sidecar captions into outputs. If captions for a language are - // embedded in the input file and also appear in a sidecar file, Elastic Transcoder - // uses the sidecar captions and ignores the embedded captions for that language. - // + // * MergeOverride: Elastic Transcoder transcodes both embedded + // and sidecar captions into outputs. If captions for a language are embedded in + // the input file and also appear in a sidecar file, Elastic Transcoder uses the + // sidecar captions and ignores the embedded captions for that language. // - // * MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions + // * + // MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions // into outputs. If captions for a language are embedded in the input file and also // appear in a sidecar file, Elastic Transcoder uses the embedded captions and // ignores the sidecar captions for that language. If CaptionSources is empty, // Elastic Transcoder omits all sidecar captions from the output files. // - // * + // * // Override: Elastic Transcoder transcodes only the sidecar captions that you // specify in CaptionSources. // @@ -395,13 +393,13 @@ type CaptionSource struct { // inputs with captions, the caption language must match in order to be included in // the output. Specify this as one of: // - // * 2-character ISO 639-1 code + // * 2-character ISO 639-1 code // - // * - // 3-character ISO 639-2 code + // * 3-character + // ISO 639-2 code // - // For more information on ISO language codes and - // language names, see the List of ISO 639-1 codes. + // For more information on ISO language codes and language names, + // see the List of ISO 639-1 codes. Language *string // For clip generation or captions that do not start at the same time as the @@ -432,33 +430,33 @@ type CreateJobOutput struct { // one format to another. All captions must be in UTF-8. Elastic Transcoder // supports two types of captions: // - // * Embedded: Embedded captions are included - // in the same file as the audio and video. Elastic Transcoder supports only one + // * Embedded: Embedded captions are included in + // the same file as the audio and video. Elastic Transcoder supports only one // embedded caption per language, to a maximum of 300 embedded captions per file. // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), // CEA-708 (EIA-708, first non-empty channel only), and mov-text Valid outputs // include: mov-text Elastic Transcoder supports a maximum of one embedded format // per output. // - // * Sidecar: Sidecar captions are kept in a separate metadata - // file from the audio and video data. Sidecar captions require a player that is - // capable of understanding the relationship between the video file and the sidecar - // file. Elastic Transcoder supports only one sidecar caption per language, to a - // maximum of 20 sidecar captions per file. Valid input values include: dfxp (first - // div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and - // webvtt Valid outputs include: dfxp (first div element only), scc, srt, and - // webvtt. - // - // If you want ttml or smpte-tt compatible captions, specify dfxp as your - // output format. Elastic Transcoder does not support OCR (Optical Character - // Recognition), does not accept pictures as a valid input for captions, and is not - // available for audio-only transcoding. Elastic Transcoder does not preserve text - // formatting (for example, italics) during the transcoding process. To remove - // captions or leave the captions empty, set Captions to null. To pass through - // existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a - // null CaptionSources array. For more information on embedded files, see the - // Subtitles Wikipedia page. For more information on sidecar files, see the - // Extensible Metadata Platform and Sidecar file Wikipedia pages. + // * Sidecar: Sidecar captions are kept in a separate metadata file + // from the audio and video data. Sidecar captions require a player that is capable + // of understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. Valid input values include: dfxp (first div + // element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If + // you want ttml or smpte-tt compatible captions, specify dfxp as your output + // format. Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available for + // audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. To remove captions or + // leave the captions empty, set Captions to null. To pass through existing + // captions unchanged, set the MergePolicy to MergeRetain, and pass in a null + // CaptionSources array. For more information on embedded files, see the Subtitles + // Wikipedia page. For more information on sidecar files, see the Extensible + // Metadata Platform and Sidecar file Wikipedia pages. Captions *Captions // You can create an output file that contains an excerpt from the input file. This @@ -516,7 +514,7 @@ type CreateJobOutput struct { // file name for each thumbnail. You can specify the following values in any // sequence: // - // * {count} (Required): If you want to create thumbnails, you must + // * {count} (Required): If you want to create thumbnails, you must // include {count} in the ThumbnailPattern object. Wherever you specify {count}, // Elastic Transcoder adds a five-digit sequence number (beginning with 00001) to // thumbnail file names. The number indicates where a given thumbnail appears in @@ -524,12 +522,12 @@ type CreateJobOutput struct { // and/or {resolution} but you omit {count}, Elastic Transcoder returns a // validation error and does not create the job. // - // * Literal values (Optional): - // You can specify literal values anywhere in the ThumbnailPattern object. For - // example, you can include them as a file name prefix or as a delimiter between + // * Literal values (Optional): You + // can specify literal values anywhere in the ThumbnailPattern object. For example, + // you can include them as a file name prefix or as a delimiter between // {resolution} and {count}. // - // * {resolution} (Optional): If you want Elastic + // * {resolution} (Optional): If you want Elastic // Transcoder to include the resolution in the file name, include {resolution} in // the ThumbnailPattern object. // @@ -570,32 +568,31 @@ type CreateJobPlaylist struct { // For each output in this job that you want to include in a master playlist, the // value of the Outputs:Key object. // - // * If your output is not HLS or does not - // have a segment duration set, the name of the output file is a concatenation of + // * If your output is not HLS or does not have a + // segment duration set, the name of the output file is a concatenation of // OutputKeyPrefix and Outputs:Key: OutputKeyPrefixOutputs:Key // - // * If your - // output is HLSv3 and has a segment duration set, or is not included in a + // * If your output is + // HLSv3 and has a segment duration set, or is not included in a playlist, Elastic + // Transcoder creates an output playlist file with a file extension of .m3u8, and a + // series of .ts files that include a five-digit sequential counter beginning with + // 00000: OutputKeyPrefixOutputs:Key.m3u8 OutputKeyPrefixOutputs:Key00000.ts + // + // * If + // your output is HLSv4, has a segment duration set, and is included in an HLSv4 // playlist, Elastic Transcoder creates an output playlist file with a file - // extension of .m3u8, and a series of .ts files that include a five-digit - // sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key.m3u8 - // OutputKeyPrefixOutputs:Key00000.ts - // - // * If your output is HLSv4, has a segment - // duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates - // an output playlist file with a file extension of _v4.m3u8. If the output is - // video, Elastic Transcoder also creates an output file with an extension of - // _iframe.m3u8: OutputKeyPrefixOutputs:Key_v4.m3u8 - // OutputKeyPrefixOutputs:Key_iframe.m3u8 OutputKeyPrefixOutputs:Key.ts - // - // Elastic - // Transcoder automatically appends the relevant file extension to the file name. - // If you include a file extension in Output Key, the file name will have two - // extensions. If you include more than one output in a playlist, any segment - // duration settings, clip settings, or caption settings must be the same for all - // outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, - // and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all - // outputs. + // extension of _v4.m3u8. If the output is video, Elastic Transcoder also creates + // an output file with an extension of _iframe.m3u8: + // OutputKeyPrefixOutputs:Key_v4.m3u8 OutputKeyPrefixOutputs:Key_iframe.m3u8 + // OutputKeyPrefixOutputs:Key.ts + // + // Elastic Transcoder automatically appends the + // relevant file extension to the file name. If you include a file extension in + // Output Key, the file name will have two extensions. If you include more than one + // output in a playlist, any segment duration settings, clip settings, or caption + // settings must be the same for all outputs in the playlist. For Smooth playlists, + // the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist + // ratio must be the same for all outputs. OutputKeys []*string // The DRM settings, if any, that you want Elastic Transcoder to apply to the @@ -655,39 +652,39 @@ type Encryption struct { // when decrypting your input files or encrypting your output files. Elastic // Transcoder supports the following options: // - // * s3: Amazon S3 creates and - // manages the keys used for encrypting your files. + // * s3: Amazon S3 creates and manages + // the keys used for encrypting your files. // - // * s3-aws-kms: Amazon S3 - // calls the Amazon Key Management Service, which creates and manages the keys that - // are used for encrypting your files. If you specify s3-aws-kms and you don't want - // to use the default key, you must add the AWS-KMS key that you want to use to - // your pipeline. + // * s3-aws-kms: Amazon S3 calls the + // Amazon Key Management Service, which creates and manages the keys that are used + // for encrypting your files. If you specify s3-aws-kms and you don't want to use + // the default key, you must add the AWS-KMS key that you want to use to your + // pipeline. // - // * aes-cbc-pkcs7: A padded cipher-block mode of operation - // originally used for HLS files. + // * aes-cbc-pkcs7: A padded cipher-block mode of operation originally + // used for HLS files. // - // * aes-ctr: AES Counter Mode. + // * aes-ctr: AES Counter Mode. // - // * aes-gcm: - // AES Galois Counter Mode, a mode of operation that is an authenticated encryption - // format, meaning that a file, key, or initialization vector that has been - // tampered with fails the decryption process. + // * aes-gcm: AES Galois Counter + // Mode, a mode of operation that is an authenticated encryption format, meaning + // that a file, key, or initialization vector that has been tampered with fails the + // decryption process. // - // For all three AES options, you must - // provide the following settings, which must be base64-encoded: + // For all three AES options, you must provide the following + // settings, which must be base64-encoded: // - // * Key + // * Key // - // * - // Key MD5 + // * Key MD5 // - // * Initialization Vector + // * Initialization + // Vector // - // For the AES modes, your private encryption - // keys and your unencrypted data are never stored by AWS; therefore, it is - // important that you safely manage your encryption keys. If you lose them, you - // won't be able to unencrypt your data. + // For the AES modes, your private encryption keys and your unencrypted + // data are never stored by AWS; therefore, it is important that you safely manage + // your encryption keys. If you lose them, you won't be able to unencrypt your + // data. Mode *string } @@ -742,19 +739,19 @@ type InputCaptions struct { // A policy that determines how Elastic Transcoder handles the existence of // multiple captions. // - // * MergeOverride: Elastic Transcoder transcodes both - // embedded and sidecar captions into outputs. If captions for a language are - // embedded in the input file and also appear in a sidecar file, Elastic Transcoder - // uses the sidecar captions and ignores the embedded captions for that language. - // + // * MergeOverride: Elastic Transcoder transcodes both embedded + // and sidecar captions into outputs. If captions for a language are embedded in + // the input file and also appear in a sidecar file, Elastic Transcoder uses the + // sidecar captions and ignores the embedded captions for that language. // - // * MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions + // * + // MergeRetain: Elastic Transcoder transcodes both embedded and sidecar captions // into outputs. If captions for a language are embedded in the input file and also // appear in a sidecar file, Elastic Transcoder uses the embedded captions and // ignores the sidecar captions for that language. If CaptionSources is empty, // Elastic Transcoder omits all sidecar captions from the output files. // - // * + // * // Override: Elastic Transcoder transcodes only the sidecar captions that you // specify in CaptionSources. // @@ -829,14 +826,14 @@ type Job struct { // returned in the same order in which you specify them. Metadata keys and values // must use characters from the following list: // - // * 0-9 - // - // * A-Z and a-z + // * 0-9 // + // * A-Z and a-z // // * Space // - // * The following symbols: _.:/=+-%@ + // * + // The following symbols: _.:/=+-%@ UserMetadata map[string]*string } @@ -850,19 +847,18 @@ type JobAlbumArt struct { // A policy that determines how Elastic Transcoder handles the existence of // multiple album artwork files. // - // * Replace: The specified album art replaces - // any existing album art. + // * Replace: The specified album art replaces any + // existing album art. // - // * Prepend: The specified album art is placed in - // front of any existing album art. + // * Prepend: The specified album art is placed in front of + // any existing album art. // - // * Append: The specified album art is - // placed after any existing album art. + // * Append: The specified album art is placed after any + // existing album art. // - // * Fallback: If the original input file - // contains artwork, Elastic Transcoder uses that artwork for the output. If the - // original input does not contain artwork, Elastic Transcoder uses the specified - // album art file. + // * Fallback: If the original input file contains artwork, + // Elastic Transcoder uses that artwork for the output. If the original input does + // not contain artwork, Elastic Transcoder uses the specified album art file. MergePolicy *string } @@ -902,33 +898,33 @@ type JobInput struct { // one format to another. All captions must be in UTF-8. Elastic Transcoder // supports two types of captions: // - // * Embedded: Embedded captions are included - // in the same file as the audio and video. Elastic Transcoder supports only one + // * Embedded: Embedded captions are included in + // the same file as the audio and video. Elastic Transcoder supports only one // embedded caption per language, to a maximum of 300 embedded captions per file. // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), // CEA-708 (EIA-708, first non-empty channel only), and mov-text Valid outputs // include: mov-text Elastic Transcoder supports a maximum of one embedded format // per output. // - // * Sidecar: Sidecar captions are kept in a separate metadata - // file from the audio and video data. Sidecar captions require a player that is - // capable of understanding the relationship between the video file and the sidecar - // file. Elastic Transcoder supports only one sidecar caption per language, to a - // maximum of 20 sidecar captions per file. Valid input values include: dfxp (first - // div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and - // webvtt Valid outputs include: dfxp (first div element only), scc, srt, and - // webvtt. - // - // If you want ttml or smpte-tt compatible captions, specify dfxp as your - // output format. Elastic Transcoder does not support OCR (Optical Character - // Recognition), does not accept pictures as a valid input for captions, and is not - // available for audio-only transcoding. Elastic Transcoder does not preserve text - // formatting (for example, italics) during the transcoding process. To remove - // captions or leave the captions empty, set Captions to null. To pass through - // existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a - // null CaptionSources array. For more information on embedded files, see the - // Subtitles Wikipedia page. For more information on sidecar files, see the - // Extensible Metadata Platform and Sidecar file Wikipedia pages. + // * Sidecar: Sidecar captions are kept in a separate metadata file + // from the audio and video data. Sidecar captions require a player that is capable + // of understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. Valid input values include: dfxp (first div + // element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If + // you want ttml or smpte-tt compatible captions, specify dfxp as your output + // format. Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available for + // audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. To remove captions or + // leave the captions empty, set Captions to null. To pass through existing + // captions unchanged, set the MergePolicy to MergeRetain, and pass in a null + // CaptionSources array. For more information on embedded files, see the Subtitles + // Wikipedia page. For more information on sidecar files, see the Extensible + // Metadata Platform and Sidecar file Wikipedia pages. InputCaptions *InputCaptions // Whether the input file is interlaced. If you want Elastic Transcoder to @@ -973,33 +969,33 @@ type JobOutput struct { // one format to another. All captions must be in UTF-8. Elastic Transcoder // supports two types of captions: // - // * Embedded: Embedded captions are included - // in the same file as the audio and video. Elastic Transcoder supports only one + // * Embedded: Embedded captions are included in + // the same file as the audio and video. Elastic Transcoder supports only one // embedded caption per language, to a maximum of 300 embedded captions per file. // Valid input values include: CEA-608 (EIA-608, first non-empty channel only), // CEA-708 (EIA-708, first non-empty channel only), and mov-text Valid outputs // include: mov-text Elastic Transcoder supports a maximum of one embedded format // per output. // - // * Sidecar: Sidecar captions are kept in a separate metadata - // file from the audio and video data. Sidecar captions require a player that is - // capable of understanding the relationship between the video file and the sidecar - // file. Elastic Transcoder supports only one sidecar caption per language, to a - // maximum of 20 sidecar captions per file. Valid input values include: dfxp (first - // div element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and - // webvtt Valid outputs include: dfxp (first div element only), scc, srt, and - // webvtt. - // - // If you want ttml or smpte-tt compatible captions, specify dfxp as your - // output format. Elastic Transcoder does not support OCR (Optical Character - // Recognition), does not accept pictures as a valid input for captions, and is not - // available for audio-only transcoding. Elastic Transcoder does not preserve text - // formatting (for example, italics) during the transcoding process. To remove - // captions or leave the captions empty, set Captions to null. To pass through - // existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a - // null CaptionSources array. For more information on embedded files, see the - // Subtitles Wikipedia page. For more information on sidecar files, see the - // Extensible Metadata Platform and Sidecar file Wikipedia pages. + // * Sidecar: Sidecar captions are kept in a separate metadata file + // from the audio and video data. Sidecar captions require a player that is capable + // of understanding the relationship between the video file and the sidecar file. + // Elastic Transcoder supports only one sidecar caption per language, to a maximum + // of 20 sidecar captions per file. Valid input values include: dfxp (first div + // element only), ebu-tt, scc, smpt, srt, ttml (first div element only), and webvtt + // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. + // + // If + // you want ttml or smpte-tt compatible captions, specify dfxp as your output + // format. Elastic Transcoder does not support OCR (Optical Character Recognition), + // does not accept pictures as a valid input for captions, and is not available for + // audio-only transcoding. Elastic Transcoder does not preserve text formatting + // (for example, italics) during the transcoding process. To remove captions or + // leave the captions empty, set Captions to null. To pass through existing + // captions unchanged, set the MergePolicy to MergeRetain, and pass in a null + // CaptionSources array. For more information on embedded files, see the Subtitles + // Wikipedia page. For more information on sidecar files, see the Extensible + // Metadata Platform and Sidecar file Wikipedia pages. Captions *Captions // You can create an output file that contains an excerpt from the input file. This @@ -1072,20 +1068,20 @@ type JobOutput struct { // Outputs:Status is always the same as Job:Status. If you specified more than one // output: // - // * Job:Status and Outputs:Status for all of the outputs is Submitted + // * Job:Status and Outputs:Status for all of the outputs is Submitted // until Elastic Transcoder starts to process the first output. // - // * When Elastic + // * When Elastic // Transcoder starts to process the first output, Outputs:Status for that output // and Job:Status both change to Progressing. For each output, the value of // Outputs:Status remains Submitted until Elastic Transcoder starts to process the // output. // - // * Job:Status remains Progressing until all of the outputs reach a + // * Job:Status remains Progressing until all of the outputs reach a // terminal status, either Complete or Error. // - // * When all of the outputs reach - // a terminal status, Job:Status changes to Complete only if Outputs:Status for all + // * When all of the outputs reach a + // terminal status, Job:Status changes to Complete only if Outputs:Status for all // of the outputs is Complete. If Outputs:Status for one or more outputs is Error, // the terminal status for Job:Status is also Error. // @@ -1107,7 +1103,7 @@ type JobOutput struct { // file name for each thumbnail. You can specify the following values in any // sequence: // - // * {count} (Required): If you want to create thumbnails, you must + // * {count} (Required): If you want to create thumbnails, you must // include {count} in the ThumbnailPattern object. Wherever you specify {count}, // Elastic Transcoder adds a five-digit sequence number (beginning with 00001) to // thumbnail file names. The number indicates where a given thumbnail appears in @@ -1115,12 +1111,12 @@ type JobOutput struct { // and/or {resolution} but you omit {count}, Elastic Transcoder returns a // validation error and does not create the job. // - // * Literal values (Optional): - // You can specify literal values anywhere in the ThumbnailPattern object. For - // example, you can include them as a file name prefix or as a delimiter between + // * Literal values (Optional): You + // can specify literal values anywhere in the ThumbnailPattern object. For example, + // you can include them as a file name prefix or as a delimiter between // {resolution} and {count}. // - // * {resolution} (Optional): If you want Elastic + // * {resolution} (Optional): If you want Elastic // Transcoder to include the resolution in the file name, include {resolution} in // the ThumbnailPattern object. // @@ -1197,20 +1193,19 @@ type Permission struct { // The permission that you want to give to the AWS user that is listed in Grantee. // Valid values include: // - // * READ: The grantee can read the thumbnails and - // metadata for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. - // + // * READ: The grantee can read the thumbnails and metadata + // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. // - // * READ_ACP: The grantee can read the object ACL for thumbnails that Elastic + // * + // READ_ACP: The grantee can read the object ACL for thumbnails that Elastic // Transcoder adds to the Amazon S3 bucket. // - // * WRITE_ACP: The grantee can write - // the ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 - // bucket. + // * WRITE_ACP: The grantee can write the + // ACL for the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. // - // * FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP - // permissions for the thumbnails that Elastic Transcoder adds to the Amazon S3 - // bucket. + // * + // FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for the + // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. Access []*string // The AWS user or group that you want to have access to transcoded files and @@ -1221,16 +1216,16 @@ type Permission struct { // The type of value that appears in the Grantee object: // - // * Canonical: Either - // the canonical user ID for an AWS account or an origin access identity for an - // Amazon CloudFront distribution. A canonical user ID is not the same as an AWS - // account number. - // - // * Email: The registered email address of an AWS account. + // * Canonical: Either the + // canonical user ID for an AWS account or an origin access identity for an Amazon + // CloudFront distribution. A canonical user ID is not the same as an AWS account + // number. // + // * Email: The registered email address of an AWS account. // - // * Group: One of the following predefined Amazon S3 groups: AllUsers, - // AuthenticatedUsers, or LogDelivery. + // * Group: One + // of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or + // LogDelivery. GranteeType *string } @@ -1252,53 +1247,51 @@ type Pipeline struct { // save transcoded files and playlists. Either you specify both ContentConfig and // ThumbnailConfig, or you specify OutputBucket. // - // * Bucket: The Amazon S3 - // bucket in which you want Elastic Transcoder to save transcoded files and - // playlists. + // * Bucket: The Amazon S3 bucket in + // which you want Elastic Transcoder to save transcoded files and playlists. // - // * Permissions: A list of the users and/or predefined Amazon S3 - // groups you want to have access to transcoded files and playlists, and the type - // of access that you want them to have. + // * + // Permissions: A list of the users and/or predefined Amazon S3 groups you want to + // have access to transcoded files and playlists, and the type of access that you + // want them to have. // - // * GranteeType: The type of value - // that appears in the Grantee object: + // * GranteeType: The type of value that appears in the Grantee + // object: // - // * Canonical: Either the - // canonical user ID for an AWS account or an origin access identity for an Amazon - // CloudFront distribution. + // * Canonical: Either the canonical user ID for an AWS account or an + // origin access identity for an Amazon CloudFront distribution. // - // * Email: The registered email address of - // an AWS account. + // * Email: The + // registered email address of an AWS account. // - // * Group: One of the following predefined Amazon S3 - // groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // * Group: One of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. // - // * Grantee: The - // AWS user or group that you want to have access to transcoded files and - // playlists. + // * + // Grantee: The AWS user or group that you want to have access to transcoded files + // and playlists. // - // * Access: The permission that you want to give to the AWS - // user that is listed in Grantee. Valid values include: - // - // * READ: The - // grantee can read the objects and metadata for objects that Elastic Transcoder - // adds to the Amazon S3 bucket. + // * Access: The permission that you want to give to the AWS user + // that is listed in Grantee. Valid values include: // - // * READ_ACP: The grantee can read the - // object ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. + // * READ: The grantee can read + // the objects and metadata for objects that Elastic Transcoder adds to the Amazon + // S3 bucket. // + // * READ_ACP: The grantee can read the object ACL for objects that + // Elastic Transcoder adds to the Amazon S3 bucket. // - // * WRITE_ACP: The grantee can write the ACL for the objects that Elastic - // Transcoder adds to the Amazon S3 bucket. + // * WRITE_ACP: The grantee can + // write the ACL for the objects that Elastic Transcoder adds to the Amazon S3 + // bucket. // - // * FULL_CONTROL: The - // grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects that - // Elastic Transcoder adds to the Amazon S3 bucket. + // * FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP + // permissions for the objects that Elastic Transcoder adds to the Amazon S3 + // bucket. // - // * StorageClass: The Amazon - // S3 storage class, Standard or ReducedRedundancy, that you want Elastic - // Transcoder to assign to the video files and playlists that it stores in your - // Amazon S3 bucket. + // * StorageClass: The Amazon S3 storage class, Standard or + // ReducedRedundancy, that you want Elastic Transcoder to assign to the video files + // and playlists that it stores in your Amazon S3 bucket. ContentConfig *PipelineOutputConfig // The identifier for the pipeline. You use this value to identify the pipeline in @@ -1318,20 +1311,20 @@ type Pipeline struct { // notify to report job status. To receive notifications, you must also subscribe // to the new topic in the Amazon SNS console. // - // * Progressing (optional): The + // * Progressing (optional): The // Amazon Simple Notification Service (Amazon SNS) topic that you want to notify // when Elastic Transcoder has started to process the job. // - // * Complete - // (optional): The Amazon SNS topic that you want to notify when Elastic Transcoder - // has finished processing the job. + // * Complete (optional): + // The Amazon SNS topic that you want to notify when Elastic Transcoder has + // finished processing the job. // - // * Warning (optional): The Amazon SNS topic - // that you want to notify when Elastic Transcoder encounters a warning - // condition. + // * Warning (optional): The Amazon SNS topic that + // you want to notify when Elastic Transcoder encounters a warning condition. // - // * Error (optional): The Amazon SNS topic that you want to notify - // when Elastic Transcoder encounters an error condition. + // * + // Error (optional): The Amazon SNS topic that you want to notify when Elastic + // Transcoder encounters an error condition. Notifications *Notifications // The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded @@ -1345,63 +1338,61 @@ type Pipeline struct { // The current status of the pipeline: // - // * Active: The pipeline is processing + // * Active: The pipeline is processing // jobs. // - // * Paused: The pipeline is not currently processing jobs. + // * Paused: The pipeline is not currently processing jobs. Status *string // Information about the Amazon S3 bucket in which you want Elastic Transcoder to // save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, // or you specify OutputBucket. // - // * Bucket: The Amazon S3 bucket in which you - // want Elastic Transcoder to save thumbnail files. + // * Bucket: The Amazon S3 bucket in which you want + // Elastic Transcoder to save thumbnail files. // - // * Permissions: A list of - // the users and/or predefined Amazon S3 groups you want to have access to - // thumbnail files, and the type of access that you want them to have. + // * Permissions: A list of the users + // and/or predefined Amazon S3 groups you want to have access to thumbnail files, + // and the type of access that you want them to have. // - // * - // GranteeType: The type of value that appears in the Grantee object: + // * GranteeType: The type of + // value that appears in the Grantee object: // + // * Canonical: Either the canonical + // user ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. A canonical user ID is not the same as an AWS account number. // - // * Canonical: Either the canonical user ID for an AWS account or an origin access - // identity for an Amazon CloudFront distribution. A canonical user ID is not the - // same as an AWS account number. + // * + // Email: The registered email address of an AWS account. // - // * Email: The registered email - // address of an AWS account. + // * Group: One of the + // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or + // LogDelivery. // - // * Group: One of the following predefined - // Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // * Grantee: The AWS user or group that you want to have access to + // thumbnail files. // - // * - // Grantee: The AWS user or group that you want to have access to thumbnail - // files. - // - // * Access: The permission that you want to give to the AWS user + // * Access: The permission that you want to give to the AWS user // that is listed in Grantee. Valid values include: // - // * READ: The - // grantee can read the thumbnails and metadata for thumbnails that Elastic - // Transcoder adds to the Amazon S3 bucket. - // - // * READ_ACP: The grantee - // can read the object ACL for thumbnails that Elastic Transcoder adds to the + // * READ: The grantee can read + // the thumbnails and metadata for thumbnails that Elastic Transcoder adds to the // Amazon S3 bucket. // - // * WRITE_ACP: The grantee can write the ACL for - // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // * READ_ACP: The grantee can read the object ACL for + // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. // + // * WRITE_ACP: + // The grantee can write the ACL for the thumbnails that Elastic Transcoder adds to + // the Amazon S3 bucket. // - // * FULL_CONTROL: The grantee has READ, READ_ACP, and WRITE_ACP permissions for - // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. - // - // * - // StorageClass: The Amazon S3 storage class, Standard or ReducedRedundancy, that - // you want Elastic Transcoder to assign to the thumbnails that it stores in your + // * FULL_CONTROL: The grantee has READ, READ_ACP, and + // WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds to the // Amazon S3 bucket. + // + // * StorageClass: The Amazon S3 storage class, Standard or + // ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails + // that it stores in your Amazon S3 bucket. ThumbnailConfig *PipelineOutputConfig } @@ -1411,25 +1402,25 @@ type PipelineOutputConfig struct { // The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded // files. Specify this value when all of the following are true: // - // * You want to + // * You want to // save transcoded files, thumbnails (if any), and playlists (if any) together in // one bucket. // - // * You do not want to specify the users or groups who have - // access to the transcoded files, thumbnails, and playlists. - // - // * You do not - // want to specify the permissions that Elastic Transcoder grants to the files. + // * You do not want to specify the users or groups who have access to + // the transcoded files, thumbnails, and playlists. // + // * You do not want to specify + // the permissions that Elastic Transcoder grants to the files. // - // * You want to associate the transcoded files and thumbnails with the Amazon S3 - // Standard storage class. + // * You want to + // associate the transcoded files and thumbnails with the Amazon S3 Standard + // storage class. // - // If you want to save transcoded files and playlists in - // one bucket and thumbnails in another bucket, specify which users can access the - // transcoded files or the permissions the users have, or change the Amazon S3 - // storage class, omit OutputBucket and specify values for ContentConfig and - // ThumbnailConfig instead. + // If you want to save transcoded files and playlists in one bucket + // and thumbnails in another bucket, specify which users can access the transcoded + // files or the permissions the users have, or change the Amazon S3 storage class, + // omit OutputBucket and specify values for ContentConfig and ThumbnailConfig + // instead. Bucket *string // Optional. The Permissions object specifies which users and/or predefined Amazon @@ -1478,34 +1469,33 @@ type Playlist struct { // For each output in this job that you want to include in a master playlist, the // value of the Outputs:Key object. // - // * If your output is not HLS or does not - // have a segment duration set, the name of the output file is a concatenation of + // * If your output is not HLS or does not have a + // segment duration set, the name of the output file is a concatenation of // OutputKeyPrefix and Outputs:Key: OutputKeyPrefixOutputs:Key // - // * If your - // output is HLSv3 and has a segment duration set, or is not included in a - // playlist, Elastic Transcoder creates an output playlist file with a file - // extension of .m3u8, and a series of .ts files that include a five-digit - // sequential counter beginning with 00000: OutputKeyPrefixOutputs:Key.m3u8 - // OutputKeyPrefixOutputs:Key00000.ts - // - // * If your output is HLSv4, has a segment - // duration set, and is included in an HLSv4 playlist, Elastic Transcoder creates - // an output playlist file with a file extension of _v4.m3u8. If the output is - // video, Elastic Transcoder also creates an output file with an extension of - // _iframe.m3u8: OutputKeyPrefixOutputs:Key_v4.m3u8 - // OutputKeyPrefixOutputs:Key_iframe.m3u8 OutputKeyPrefixOutputs:Key.ts - // - // Elastic - // Transcoder automatically appends the relevant file extension to the file name. - // If you include a file extension in Output Key, the file name will have two - // extensions. + // * If your output is + // HLSv3 and has a segment duration set, or is not included in a playlist, Elastic + // Transcoder creates an output playlist file with a file extension of .m3u8, and a + // series of .ts files that include a five-digit sequential counter beginning with + // 00000: OutputKeyPrefixOutputs:Key.m3u8 OutputKeyPrefixOutputs:Key00000.ts // - // If you include more than one output in a playlist, any segment - // duration settings, clip settings, or caption settings must be the same for all - // outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, - // and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all - // outputs. + // * If + // your output is HLSv4, has a segment duration set, and is included in an HLSv4 + // playlist, Elastic Transcoder creates an output playlist file with a file + // extension of _v4.m3u8. If the output is video, Elastic Transcoder also creates + // an output file with an extension of _iframe.m3u8: + // OutputKeyPrefixOutputs:Key_v4.m3u8 OutputKeyPrefixOutputs:Key_iframe.m3u8 + // OutputKeyPrefixOutputs:Key.ts + // + // Elastic Transcoder automatically appends the + // relevant file extension to the file name. If you include a file extension in + // Output Key, the file name will have two extensions. + // + // If you include more than + // one output in a playlist, any segment duration settings, clip settings, or + // caption settings must be the same for all outputs in the playlist. For Smooth + // playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to + // Video:KeyframesMaxDist ratio must be the same for all outputs. OutputKeys []*string // The DRM settings, if any, that you want Elastic Transcoder to apply to the @@ -1621,36 +1611,36 @@ type PresetWatermark struct { // The horizontal position of the watermark unless you specify a non-zero value for // HorizontalOffset: // - // * Left: The left edge of the watermark is aligned with - // the left border of the video. + // * Left: The left edge of the watermark is aligned with the + // left border of the video. // - // * Right: The right edge of the watermark is - // aligned with the right border of the video. + // * Right: The right edge of the watermark is aligned + // with the right border of the video. // - // * Center: The watermark is - // centered between the left and right borders. + // * Center: The watermark is centered between + // the left and right borders. HorizontalAlign *string // The amount by which you want the horizontal position of the watermark to be // offset from the position specified by HorizontalAlign: // - // * number of pixels - // (px): The minimum value is 0 pixels, and the maximum value is the value of + // * number of pixels (px): + // The minimum value is 0 pixels, and the maximum value is the value of // MaxWidth. // - // * integer percentage (%): The range of valid values is 0 to - // 100. + // * integer percentage (%): The range of valid values is 0 to 100. // - // For example, if you specify Left for HorizontalAlign and 5px for - // HorizontalOffset, the left side of the watermark appears 5 pixels from the left - // border of the output video. HorizontalOffset is only valid when the value of - // HorizontalAlign is Left or Right. If you specify an offset that causes the - // watermark to extend beyond the left or right border and Elastic Transcoder has - // not added black bars, the watermark is cropped. If Elastic Transcoder has added - // black bars, the watermark extends into the black bars. If the watermark extends - // beyond the black bars, it is cropped. Use the value of Target to specify whether - // you want to include the black bars that are added by Elastic Transcoder, if any, - // in the offset calculation. + // For + // example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset, + // the left side of the watermark appears 5 pixels from the left border of the + // output video. HorizontalOffset is only valid when the value of HorizontalAlign + // is Left or Right. If you specify an offset that causes the watermark to extend + // beyond the left or right border and Elastic Transcoder has not added black bars, + // the watermark is cropped. If Elastic Transcoder has added black bars, the + // watermark extends into the black bars. If the watermark extends beyond the black + // bars, it is cropped. Use the value of Target to specify whether you want to + // include the black bars that are added by Elastic Transcoder, if any, in the + // offset calculation. HorizontalOffset *string // A unique identifier for the settings for one watermark. The value of Id can be @@ -1659,30 +1649,30 @@ type PresetWatermark struct { // The maximum height of the watermark in one of the following formats: // - // * - // number of pixels (px): The minimum value is 16 pixels, and the maximum value is - // the value of MaxHeight. + // * number + // of pixels (px): The minimum value is 16 pixels, and the maximum value is the + // value of MaxHeight. // - // * integer percentage (%): The range of valid values - // is 0 to 100. Use the value of Target to specify whether you want Elastic - // Transcoder to include the black bars that are added by Elastic Transcoder, if - // any, in the calculation. + // * integer percentage (%): The range of valid values is 0 to + // 100. Use the value of Target to specify whether you want Elastic Transcoder to + // include the black bars that are added by Elastic Transcoder, if any, in the + // calculation. // - // If you specify the value in pixels, it must be less - // than or equal to the value of MaxHeight. + // If you specify the value in pixels, it must be less than or equal + // to the value of MaxHeight. MaxHeight *string // The maximum width of the watermark in one of the following formats: // - // * - // number of pixels (px): The minimum value is 16 pixels, and the maximum value is - // the value of MaxWidth. + // * number of + // pixels (px): The minimum value is 16 pixels, and the maximum value is the value + // of MaxWidth. // - // * integer percentage (%): The range of valid values - // is 0 to 100. Use the value of Target to specify whether you want Elastic - // Transcoder to include the black bars that are added by Elastic Transcoder, if - // any, in the calculation. If you specify the value in pixels, it must be less - // than or equal to the value of MaxWidth. + // * integer percentage (%): The range of valid values is 0 to 100. + // Use the value of Target to specify whether you want Elastic Transcoder to + // include the black bars that are added by Elastic Transcoder, if any, in the + // calculation. If you specify the value in pixels, it must be less than or equal + // to the value of MaxWidth. MaxWidth *string // A percentage that indicates how much you want a watermark to obscure the video @@ -1696,61 +1686,61 @@ type PresetWatermark struct { // A value that controls scaling of the watermark: // - // * Fit: Elastic Transcoder + // * Fit: Elastic Transcoder // scales the watermark so it matches the value that you specified in either // MaxWidth or MaxHeight without exceeding the other value. // - // * Stretch: Elastic + // * Stretch: Elastic // Transcoder stretches the watermark to match the values that you specified for // MaxWidth and MaxHeight. If the relative proportions of the watermark and the // values of MaxWidth and MaxHeight are different, the watermark will be // distorted. // - // * ShrinkToFit: Elastic Transcoder scales the watermark down so - // that its dimensions match the values that you specified for at least one of - // MaxWidth and MaxHeight without exceeding either value. If you specify this - // option, Elastic Transcoder does not scale the watermark up. + // * ShrinkToFit: Elastic Transcoder scales the watermark down so that + // its dimensions match the values that you specified for at least one of MaxWidth + // and MaxHeight without exceeding either value. If you specify this option, + // Elastic Transcoder does not scale the watermark up. SizingPolicy *string // A value that determines how Elastic Transcoder interprets values that you // specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight: // - // * + // * // Content: HorizontalOffset and VerticalOffset values are calculated based on the // borders of the video excluding black bars added by Elastic Transcoder, if any. // In addition, MaxWidth and MaxHeight, if specified as a percentage, are // calculated based on the borders of the video excluding black bars added by // Elastic Transcoder, if any. // - // * Frame: HorizontalOffset and VerticalOffset - // values are calculated based on the borders of the video including black bars - // added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, if - // specified as a percentage, are calculated based on the borders of the video - // including black bars added by Elastic Transcoder, if any. + // * Frame: HorizontalOffset and VerticalOffset values + // are calculated based on the borders of the video including black bars added by + // Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, if specified as + // a percentage, are calculated based on the borders of the video including black + // bars added by Elastic Transcoder, if any. Target *string // The vertical position of the watermark unless you specify a non-zero value for // VerticalOffset: // - // * Top: The top edge of the watermark is aligned with the - // top border of the video. + // * Top: The top edge of the watermark is aligned with the top + // border of the video. // - // * Bottom: The bottom edge of the watermark is - // aligned with the bottom border of the video. + // * Bottom: The bottom edge of the watermark is aligned with + // the bottom border of the video. // - // * Center: The watermark is - // centered between the top and bottom borders. + // * Center: The watermark is centered between the + // top and bottom borders. VerticalAlign *string // VerticalOffset The amount by which you want the vertical position of the // watermark to be offset from the position specified by VerticalAlign: // - // * - // number of pixels (px): The minimum value is 0 pixels, and the maximum value is - // the value of MaxHeight. + // * number + // of pixels (px): The minimum value is 0 pixels, and the maximum value is the + // value of MaxHeight. // - // * integer percentage (%): The range of valid values - // is 0 to 100. + // * integer percentage (%): The range of valid values is 0 to + // 100. // // For example, if you specify Top for VerticalAlign and 5px for // VerticalOffset, the top of the watermark appears 5 pixels from the top border of @@ -1810,37 +1800,37 @@ type Thumbnails struct { // Specify one of the following values to control scaling of thumbnails: // - // * - // Fit: Elastic Transcoder scales thumbnails so they match the value that you - // specified in thumbnail MaxWidth or MaxHeight settings without exceeding the - // other value. - // - // * Fill: Elastic Transcoder scales thumbnails so they match the - // value that you specified in thumbnail MaxWidth or MaxHeight settings and matches - // or exceeds the other value. Elastic Transcoder centers the image in thumbnails - // and then crops in the dimension (if any) that exceeds the maximum value. - // - // * - // Stretch: Elastic Transcoder stretches thumbnails to match the values that you - // specified for thumbnail MaxWidth and MaxHeight settings. If the relative - // proportions of the input video and thumbnails are different, the thumbnails will - // be distorted. - // - // * Keep: Elastic Transcoder does not scale thumbnails. If - // either dimension of the input video exceeds the values that you specified for - // thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the - // thumbnails. - // - // * ShrinkToFit: Elastic Transcoder scales thumbnails down so - // that their dimensions match the values that you specified for at least one of - // thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify - // this option, Elastic Transcoder does not scale thumbnails up. - // - // * - // ShrinkToFill: Elastic Transcoder scales thumbnails down so that their dimensions - // match the values that you specified for at least one of MaxWidth and MaxHeight - // without dropping below either value. If you specify this option, Elastic + // * Fit: + // Elastic Transcoder scales thumbnails so they match the value that you specified + // in thumbnail MaxWidth or MaxHeight settings without exceeding the other + // value. + // + // * Fill: Elastic Transcoder scales thumbnails so they match the value + // that you specified in thumbnail MaxWidth or MaxHeight settings and matches or + // exceeds the other value. Elastic Transcoder centers the image in thumbnails and + // then crops in the dimension (if any) that exceeds the maximum value. + // + // * Stretch: + // Elastic Transcoder stretches thumbnails to match the values that you specified + // for thumbnail MaxWidth and MaxHeight settings. If the relative proportions of + // the input video and thumbnails are different, the thumbnails will be + // distorted. + // + // * Keep: Elastic Transcoder does not scale thumbnails. If either + // dimension of the input video exceeds the values that you specified for thumbnail + // MaxWidth and MaxHeight settings, Elastic Transcoder crops the thumbnails. + // + // * + // ShrinkToFit: Elastic Transcoder scales thumbnails down so that their dimensions + // match the values that you specified for at least one of thumbnail MaxWidth and + // MaxHeight without exceeding either value. If you specify this option, Elastic // Transcoder does not scale thumbnails up. + // + // * ShrinkToFill: Elastic Transcoder + // scales thumbnails down so that their dimensions match the values that you + // specified for at least one of MaxWidth and MaxHeight without dropping below + // either value. If you specify this option, Elastic Transcoder does not scale + // thumbnails up. SizingPolicy *string } @@ -1898,30 +1888,30 @@ type VideoParameters struct { // Maximum video bit rate in kilobits/second (baseline and main Profile) : maximum // video bit rate in kilobits/second (high Profile) // - // * 1 - 64 : 80 + // * 1 - 64 : 80 // - // * 1b - - // 128 : 160 + // * 1b - 128 : + // 160 // - // * 1.1 - 192 : 240 + // * 1.1 - 192 : 240 // - // * 1.2 - 384 : 480 + // * 1.2 - 384 : 480 // - // * 1.3 - 768 : 960 + // * 1.3 - 768 : 960 // + // * 2 - 2000 : + // 2500 // - // * 2 - 2000 : 2500 + // * 3 - 10000 : 12500 // - // * 3 - 10000 : 12500 + // * 3.1 - 14000 : 17500 // - // * 3.1 - 14000 : 17500 + // * 3.2 - 20000 : 25000 // - // * 3.2 - // - 20000 : 25000 + // * 4 - + // 20000 : 25000 // - // * 4 - 20000 : 25000 - // - // * 4.1 - 50000 : 62500 + // * 4.1 - 50000 : 62500 BitRate *string // The video codec for the output file. Valid values include gif, H.264, mpeg2, @@ -1932,105 +1922,105 @@ type VideoParameters struct { // Profile (H.264/VP8/VP9 Only) The H.264 profile that you want to use for the // output file. Elastic Transcoder supports the following profiles: // - // * - // baseline: The profile most commonly used for videoconferencing and for mobile + // * baseline: + // The profile most commonly used for videoconferencing and for mobile // applications. // - // * main: The profile used for standard-definition digital TV + // * main: The profile used for standard-definition digital TV // broadcasts. // - // * high: The profile used for high-definition digital TV - // broadcasts and for Blu-ray discs. - // - // Level (H.264 Only) The H.264 level that you - // want to use for the output file. Elastic Transcoder supports the following - // levels: 1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1 - // MaxReferenceFrames (H.264 Only) Applicable only when the value of Video:Codec is - // H.264. The maximum number of previously decoded frames to use as a reference for - // decoding future frames. Valid values are integers 0 through 16, but we recommend - // that you not use a value greater than the following: Min(Floor(Maximum decoded - // picture buffer in macroblocks * 256 / (Width in pixels * Height in pixels)), 16) - // where Width in pixels and Height in pixels represent either MaxWidth and - // MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends - // on the value of the Level object. See the list below. (A macroblock is a block - // of pixels measuring 16x16.) + // * high: The profile used for high-definition digital TV broadcasts + // and for Blu-ray discs. // - // * 1 - 396 + // Level (H.264 Only) The H.264 level that you want to use + // for the output file. Elastic Transcoder supports the following levels: 1, 1b, + // 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1 MaxReferenceFrames (H.264 Only) + // Applicable only when the value of Video:Codec is H.264. The maximum number of + // previously decoded frames to use as a reference for decoding future frames. + // Valid values are integers 0 through 16, but we recommend that you not use a + // value greater than the following: Min(Floor(Maximum decoded picture buffer in + // macroblocks * 256 / (Width in pixels * Height in pixels)), 16) where Width in + // pixels and Height in pixels represent either MaxWidth and MaxHeight, or + // Resolution. Maximum decoded picture buffer in macroblocks depends on the value + // of the Level object. See the list below. (A macroblock is a block of pixels + // measuring 16x16.) // - // * 1b - 396 + // * 1 - 396 // - // * 1.1 - 900 + // * 1b - 396 // + // * 1.1 - 900 // // * 1.2 - 2376 // - // * 1.3 - 2376 - // - // * 2 - 2376 + // * 1.3 - + // 2376 // - // * 2.1 - 4752 + // * 2 - 2376 // - // * 2.2 - - // 8100 + // * 2.1 - 4752 // - // * 3 - 8100 + // * 2.2 - 8100 // - // * 3.1 - 18000 + // * 3 - 8100 // - // * 3.2 - 20480 + // * 3.1 - 18000 // - // * 4 - 32768 + // * 3.2 - + // 20480 // + // * 4 - 32768 // // * 4.1 - 32768 // - // MaxBitRate (Optional, H.264/MPEG2/VP8/VP9 only) The maximum - // number of bits per second in a video buffer; the size of the buffer is specified - // by BufferSize. Specify a value between 16 and 62,500. You can reduce the - // bandwidth required to stream a video by reducing the maximum bit rate, but this - // also reduces the quality of the video. BufferSize (Optional, H.264/MPEG2/VP8/VP9 - // only) The maximum number of bits in any x seconds of the output video. This - // window is commonly 10 seconds, the standard segment duration when you're using - // FMP4 or MPEG-TS for the container type of the output video. Specify an integer - // greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic - // Transcoder sets BufferSize to 10 times the value of MaxBitRate. InterlacedMode - // (Optional, H.264/MPEG2 Only) The interlace mode for the output video. Interlaced - // video is used to double the perceived frame rate for a video by interlacing two - // fields (one field on every other line, the other field on the other lines) so - // that the human eye registers multiple pictures per frame. Interlacing reduces - // the bandwidth required for transmitting a video, but can result in blurred - // images and flickering. Valid values include Progressive (no interlacing, top to - // bottom), TopFirst (top field first), BottomFirst (bottom field first), and Auto. - // If InterlaceMode is not specified, Elastic Transcoder uses Progressive for the - // output. If Auto is specified, Elastic Transcoder interlaces the output. - // ColorSpaceConversionMode (Optional, H.264/MPEG2 Only) The color space conversion - // Elastic Transcoder applies to the output video. Color spaces are the algorithms - // used by the computer to store information about how to render color. Bt.601 is - // the standard for standard definition video, while Bt.709 is the standard for - // high definition video. Valid values include None, Bt709toBt601, Bt601toBt709, - // and Auto. If you chose Auto for ColorSpaceConversionMode and your output is - // interlaced, your frame rate is one of 23.97, 24, 25, 29.97, 50, or 60, your - // SegmentDuration is null, and you are using one of the resolution changes from - // the list below, Elastic Transcoder applies the following color space - // conversions: - // - // * Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder - // applies Bt601ToBt709 - // - // * Standard to HD, 720x576 to 1920x1080 - Elastic - // Transcoder applies Bt601ToBt709 - // - // * HD to Standard, 1920x1080 to 720x480 - - // Elastic Transcoder applies Bt709ToBt601 - // - // * HD to Standard, 1920x1080 to - // 720x576 - Elastic Transcoder applies Bt709ToBt601 - // - // Elastic Transcoder may change - // the behavior of the ColorspaceConversionModeAuto mode in the future. All outputs - // in a playlist must use the same ColorSpaceConversionMode. If you do not specify - // a ColorSpaceConversionMode, Elastic Transcoder does not change the color space - // of a file. If you are unsure what ColorSpaceConversionMode was applied to your + // MaxBitRate (Optional, H.264/MPEG2/VP8/VP9 + // only) The maximum number of bits per second in a video buffer; the size of the + // buffer is specified by BufferSize. Specify a value between 16 and 62,500. You + // can reduce the bandwidth required to stream a video by reducing the maximum bit + // rate, but this also reduces the quality of the video. BufferSize (Optional, + // H.264/MPEG2/VP8/VP9 only) The maximum number of bits in any x seconds of the + // output video. This window is commonly 10 seconds, the standard segment duration + // when you're using FMP4 or MPEG-TS for the container type of the output video. + // Specify an integer greater than 0. If you specify MaxBitRate and omit + // BufferSize, Elastic Transcoder sets BufferSize to 10 times the value of + // MaxBitRate. InterlacedMode (Optional, H.264/MPEG2 Only) The interlace mode for + // the output video. Interlaced video is used to double the perceived frame rate + // for a video by interlacing two fields (one field on every other line, the other + // field on the other lines) so that the human eye registers multiple pictures per + // frame. Interlacing reduces the bandwidth required for transmitting a video, but + // can result in blurred images and flickering. Valid values include Progressive + // (no interlacing, top to bottom), TopFirst (top field first), BottomFirst (bottom + // field first), and Auto. If InterlaceMode is not specified, Elastic Transcoder + // uses Progressive for the output. If Auto is specified, Elastic Transcoder + // interlaces the output. ColorSpaceConversionMode (Optional, H.264/MPEG2 Only) The + // color space conversion Elastic Transcoder applies to the output video. Color + // spaces are the algorithms used by the computer to store information about how to + // render color. Bt.601 is the standard for standard definition video, while Bt.709 + // is the standard for high definition video. Valid values include None, + // Bt709toBt601, Bt601toBt709, and Auto. If you chose Auto for + // ColorSpaceConversionMode and your output is interlaced, your frame rate is one + // of 23.97, 24, 25, 29.97, 50, or 60, your SegmentDuration is null, and you are + // using one of the resolution changes from the list below, Elastic Transcoder + // applies the following color space conversions: + // + // * Standard to HD, 720x480 to + // 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // + // * Standard to HD, 720x576 + // to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // + // * HD to Standard, + // 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 + // + // * HD to + // Standard, 1920x1080 to 720x576 - Elastic Transcoder applies + // Bt709ToBt601 + // + // Elastic Transcoder may change the behavior of the + // ColorspaceConversionModeAuto mode in the future. All outputs in a playlist must + // use the same ColorSpaceConversionMode. If you do not specify a + // ColorSpaceConversionMode, Elastic Transcoder does not change the color space of + // a file. If you are unsure what ColorSpaceConversionMode was applied to your // output file, you can check the AppliedColorSpaceConversion parameter included in // your job response. If your job does not have an AppliedColorSpaceConversion in // its response, no ColorSpaceConversionMode was applied. ChromaSubsampling The @@ -2048,11 +2038,11 @@ type VideoParameters struct { // Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8. // Whether to use a fixed value for FixedGOP. Valid values are true and false: // - // - // * true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance + // * + // true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance // between key frames (the number of frames in a group of pictures, or GOP). // - // * + // * // false: The distance between key frames can vary. // // FixedGOP must be set to true @@ -2066,45 +2056,45 @@ type VideoParameters struct { // = maximum recommended decoding speed in luma samples/second / (width in pixels * // height in pixels) where: // - // * width in pixels and height in pixels represent - // the Resolution of the output video. + // * width in pixels and height in pixels represent the + // Resolution of the output video. // - // * maximum recommended decoding speed in - // Luma samples/second is less than or equal to the maximum value listed in the + // * maximum recommended decoding speed in Luma + // samples/second is less than or equal to the maximum value listed in the // following table, based on the value that you specified for Level. // // The maximum // recommended decoding speed in Luma samples/second for each level is described in // the following list (Level - Decoding speed): // - // * 1 - 380160 + // * 1 - 380160 // - // * 1b - - // 380160 + // * 1b - 380160 // - // * 1.1 - 76800 + // * 1.1 + // - 76800 // - // * 1.2 - 1536000 + // * 1.2 - 1536000 // - // * 1.3 - 3041280 + // * 1.3 - 3041280 // - // * 2 - - // 3041280 + // * 2 - 3041280 // - // * 2.1 - 5068800 + // * 2.1 - 5068800 // - // * 2.2 - 5184000 + // * 2.2 + // - 5184000 // - // * 3 - 10368000 + // * 3 - 10368000 // - // * 3.1 - // - 27648000 + // * 3.1 - 27648000 // - // * 3.2 - 55296000 + // * 3.2 - 55296000 // - // * 4 - 62914560 + // * 4 - 62914560 // - // * 4.1 - 62914560 + // * + // 4.1 - 62914560 FrameRate *string // Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8. @@ -2153,96 +2143,95 @@ type VideoParameters struct { // of the video in the output file, in pixels. Valid values are auto and width x // height: // - // * auto: Elastic Transcoder attempts to preserve the width and - // height of the input file, subject to the following rules. - // - // * width x height - // : The width and height of the output video in pixels. + // * auto: Elastic Transcoder attempts to preserve the width and height of + // the input file, subject to the following rules. // - // Note the following about - // specifying the width and height: + // * width x height : The width + // and height of the output video in pixels. // - // * The width must be an even integer - // between 128 and 4096, inclusive. + // Note the following about specifying + // the width and height: // - // * The height must be an even integer - // between 96 and 3072, inclusive. + // * The width must be an even integer between 128 and 4096, + // inclusive. // - // * If you specify a resolution that is less - // than the resolution of the input file, Elastic Transcoder rescales the output - // file to the lower resolution. + // * The height must be an even integer between 96 and 3072, + // inclusive. // - // * If you specify a resolution that is greater - // than the resolution of the input file, Elastic Transcoder rescales the output to - // the higher resolution. + // * If you specify a resolution that is less than the resolution of + // the input file, Elastic Transcoder rescales the output file to the lower + // resolution. // - // * We recommend that you specify a resolution for - // which the product of width and height is less than or equal to the applicable - // value in the following list (List - Max width x height value): + // * If you specify a resolution that is greater than the resolution + // of the input file, Elastic Transcoder rescales the output to the higher + // resolution. // - // * 1 - - // 25344 + // * We recommend that you specify a resolution for which the product + // of width and height is less than or equal to the applicable value in the + // following list (List - Max width x height value): // - // * 1b - 25344 + // * 1 - 25344 // - // * 1.1 - 101376 + // * 1b - 25344 // - // * 1.2 - 101376 + // * + // 1.1 - 101376 // + // * 1.2 - 101376 // // * 1.3 - 101376 // - // * 2 - 101376 + // * 2 - 101376 // - // * 2.1 - 202752 + // * 2.1 - 202752 // - // * 2.2 - - // 404720 + // * + // 2.2 - 404720 // - // * 3 - 404720 + // * 3 - 404720 // - // * 3.1 - 921600 - // - // * 3.2 - 1310720 + // * 3.1 - 921600 // + // * 3.2 - 1310720 // // * 4 - 2097152 // - // * 4.1 - 2097152 + // * + // 4.1 - 2097152 Resolution *string // Specify one of the following values to control scaling of the output video: // - // - // * Fit: Elastic Transcoder scales the output video so it matches the value that - // you specified in either MaxWidth or MaxHeight without exceeding the other - // value. - // - // * Fill: Elastic Transcoder scales the output video so it matches the - // value that you specified in either MaxWidth or MaxHeight and matches or exceeds - // the other value. Elastic Transcoder centers the output video and then crops it - // in the dimension (if any) that exceeds the maximum value. - // - // * Stretch: - // Elastic Transcoder stretches the output video to match the values that you - // specified for MaxWidth and MaxHeight. If the relative proportions of the input - // video and the output video are different, the output video will be distorted. - // - // - // * Keep: Elastic Transcoder does not scale the output video. If either dimension - // of the input video exceeds the values that you specified for MaxWidth and - // MaxHeight, Elastic Transcoder crops the output video. - // - // * ShrinkToFit: - // Elastic Transcoder scales the output video down so that its dimensions match the - // values that you specified for at least one of MaxWidth and MaxHeight without - // exceeding either value. If you specify this option, Elastic Transcoder does not - // scale the video up. - // - // * ShrinkToFill: Elastic Transcoder scales the output - // video down so that its dimensions match the values that you specified for at - // least one of MaxWidth and MaxHeight without dropping below either value. If you + // * + // Fit: Elastic Transcoder scales the output video so it matches the value that you + // specified in either MaxWidth or MaxHeight without exceeding the other value. + // + // * + // Fill: Elastic Transcoder scales the output video so it matches the value that + // you specified in either MaxWidth or MaxHeight and matches or exceeds the other + // value. Elastic Transcoder centers the output video and then crops it in the + // dimension (if any) that exceeds the maximum value. + // + // * Stretch: Elastic + // Transcoder stretches the output video to match the values that you specified for + // MaxWidth and MaxHeight. If the relative proportions of the input video and the + // output video are different, the output video will be distorted. + // + // * Keep: Elastic + // Transcoder does not scale the output video. If either dimension of the input + // video exceeds the values that you specified for MaxWidth and MaxHeight, Elastic + // Transcoder crops the output video. + // + // * ShrinkToFit: Elastic Transcoder scales the + // output video down so that its dimensions match the values that you specified for + // at least one of MaxWidth and MaxHeight without exceeding either value. If you // specify this option, Elastic Transcoder does not scale the video up. + // + // * + // ShrinkToFill: Elastic Transcoder scales the output video down so that its + // dimensions match the values that you specified for at least one of MaxWidth and + // MaxHeight without dropping below either value. If you specify this option, + // Elastic Transcoder does not scale the video up. SizingPolicy *string // Settings for the size, location, and opacity of graphics that you want Elastic diff --git a/service/emr/api_op_DescribeJobFlows.go b/service/emr/api_op_DescribeJobFlows.go index f265d406c21..50ba4179dc8 100644 --- a/service/emr/api_op_DescribeJobFlows.go +++ b/service/emr/api_op_DescribeJobFlows.go @@ -21,11 +21,11 @@ import ( // months are returned. If no parameters are supplied, then job flows matching // either of the following criteria are returned: // -// * Job flows created and +// * Job flows created and // completed in the last two weeks // -// * Job flows created within the last two -// months that are in one of the following states: RUNNING, WAITING, SHUTTING_DOWN, +// * Job flows created within the last two months +// that are in one of the following states: RUNNING, WAITING, SHUTTING_DOWN, // STARTING // // Amazon EMR can return a maximum of 512 job flow descriptions. diff --git a/service/emr/api_op_ListNotebookExecutions.go b/service/emr/api_op_ListNotebookExecutions.go index 3e45827b254..116be614fbf 100644 --- a/service/emr/api_op_ListNotebookExecutions.go +++ b/service/emr/api_op_ListNotebookExecutions.go @@ -46,37 +46,36 @@ type ListNotebookExecutionsInput struct { // The status filter for listing notebook executions. // - // * START_PENDING - // indicates that the cluster has received the execution request but execution has - // not begun. + // * START_PENDING indicates + // that the cluster has received the execution request but execution has not + // begun. // - // * STARTING indicates that the execution is starting on the - // cluster. + // * STARTING indicates that the execution is starting on the cluster. // - // * RUNNING indicates that the execution is being processed by the - // cluster. + // * + // RUNNING indicates that the execution is being processed by the cluster. // - // * FINISHING indicates that execution processing is in the final - // stages. + // * + // FINISHING indicates that execution processing is in the final stages. // - // * FINISHED indicates that the execution has completed without - // error. + // * + // FINISHED indicates that the execution has completed without error. // - // * FAILING indicates that the execution is failing and will not - // finish successfully. + // * FAILING + // indicates that the execution is failing and will not finish successfully. // - // * FAILED indicates that the execution failed. + // * + // FAILED indicates that the execution failed. // - // * - // STOP_PENDING indicates that the cluster has received a StopNotebookExecution - // request and the stop is pending. + // * STOP_PENDING indicates that the + // cluster has received a StopNotebookExecution request and the stop is pending. // - // * STOPPING indicates that the cluster is - // in the process of stopping the execution as a result of a StopNotebookExecution - // request. + // * + // STOPPING indicates that the cluster is in the process of stopping the execution + // as a result of a StopNotebookExecution request. // - // * STOPPED indicates that the execution stopped because of a - // StopNotebookExecution request. + // * STOPPED indicates that the + // execution stopped because of a StopNotebookExecution request. Status types.NotebookExecutionStatus // The end of time range filter for listing notebook executions. The default is the diff --git a/service/emr/api_op_RunJobFlow.go b/service/emr/api_op_RunJobFlow.go index 3a08d55b968..5483588035c 100644 --- a/service/emr/api_op_RunJobFlow.go +++ b/service/emr/api_op_RunJobFlow.go @@ -141,28 +141,28 @@ type RunJobFlowInput struct { // (https://docs.aws.amazon.com/emr/latest/DeveloperGuide/emr-dg.pdf). Supported // values are: // - // * "mapr-m3" - launch the cluster using MapR M3 Edition. + // * "mapr-m3" - launch the cluster using MapR M3 Edition. // - // * + // * // "mapr-m5" - launch the cluster using MapR M5 Edition. // - // * "mapr" with the - // user arguments specifying "--edition,m3" or "--edition,m5" - launch the job flow + // * "mapr" with the user + // arguments specifying "--edition,m3" or "--edition,m5" - launch the job flow // using MapR M3 or M5 Edition respectively. // - // * "mapr-m7" - launch the cluster + // * "mapr-m7" - launch the cluster // using MapR M7 Edition. // - // * "hunk" - launch the cluster with the Hunk Big Data + // * "hunk" - launch the cluster with the Hunk Big Data // Analtics Platform. // - // * "hue"- launch the cluster with Hue installed. + // * "hue"- launch the cluster with Hue installed. // - // * - // "spark" - launch the cluster with Apache Spark installed. + // * "spark" - + // launch the cluster with Apache Spark installed. // - // * "ganglia" - - // launch the cluster with the Ganglia Monitoring System installed. + // * "ganglia" - launch the + // cluster with the Ganglia Monitoring System installed. NewSupportedProducts []*types.SupportedProductConfig // The specified placement group configuration for an Amazon EMR cluster. @@ -220,10 +220,10 @@ type RunJobFlowInput struct { // (https://docs.aws.amazon.com/emr/latest/DeveloperGuide/emr-dg.pdf). Currently // supported values are: // - // * "mapr-m3" - launch the job flow using MapR M3 + // * "mapr-m3" - launch the job flow using MapR M3 // Edition. // - // * "mapr-m5" - launch the job flow using MapR M5 Edition. + // * "mapr-m5" - launch the job flow using MapR M5 Edition. SupportedProducts []*string // A list of tags to associate with a cluster and propagate to Amazon EC2 diff --git a/service/emr/types/enums.go b/service/emr/types/enums.go index ee0f6b8ccb6..c6000b8fc17 100644 --- a/service/emr/types/enums.go +++ b/service/emr/types/enums.go @@ -6,10 +6,10 @@ type ActionOnFailure string // Enum values for ActionOnFailure const ( - ActionOnFailureTerminate_job_flow ActionOnFailure = "TERMINATE_JOB_FLOW" - ActionOnFailureTerminate_cluster ActionOnFailure = "TERMINATE_CLUSTER" - ActionOnFailureCancel_and_wait ActionOnFailure = "CANCEL_AND_WAIT" - ActionOnFailureContinue ActionOnFailure = "CONTINUE" + ActionOnFailureTerminateJobFlow ActionOnFailure = "TERMINATE_JOB_FLOW" + ActionOnFailureTerminateCluster ActionOnFailure = "TERMINATE_CLUSTER" + ActionOnFailureCancelAndWait ActionOnFailure = "CANCEL_AND_WAIT" + ActionOnFailureContinue ActionOnFailure = "CONTINUE" ) // Values returns all known values for ActionOnFailure. Note that this can be @@ -28,9 +28,9 @@ type AdjustmentType string // Enum values for AdjustmentType const ( - AdjustmentTypeChange_in_capacity AdjustmentType = "CHANGE_IN_CAPACITY" - AdjustmentTypePercent_change_in_capacity AdjustmentType = "PERCENT_CHANGE_IN_CAPACITY" - AdjustmentTypeExact_capacity AdjustmentType = "EXACT_CAPACITY" + AdjustmentTypeChangeInCapacity AdjustmentType = "CHANGE_IN_CAPACITY" + AdjustmentTypePercentChangeInCapacity AdjustmentType = "PERCENT_CHANGE_IN_CAPACITY" + AdjustmentTypeExactCapacity AdjustmentType = "EXACT_CAPACITY" ) // Values returns all known values for AdjustmentType. Note that this can be @@ -74,9 +74,9 @@ type AutoScalingPolicyStateChangeReasonCode string // Enum values for AutoScalingPolicyStateChangeReasonCode const ( - AutoScalingPolicyStateChangeReasonCodeUser_request AutoScalingPolicyStateChangeReasonCode = "USER_REQUEST" - AutoScalingPolicyStateChangeReasonCodeProvision_failure AutoScalingPolicyStateChangeReasonCode = "PROVISION_FAILURE" - AutoScalingPolicyStateChangeReasonCodeCleanup_failure AutoScalingPolicyStateChangeReasonCode = "CLEANUP_FAILURE" + AutoScalingPolicyStateChangeReasonCodeUserRequest AutoScalingPolicyStateChangeReasonCode = "USER_REQUEST" + AutoScalingPolicyStateChangeReasonCodeProvisionFailure AutoScalingPolicyStateChangeReasonCode = "PROVISION_FAILURE" + AutoScalingPolicyStateChangeReasonCodeCleanupFailure AutoScalingPolicyStateChangeReasonCode = "CLEANUP_FAILURE" ) // Values returns all known values for AutoScalingPolicyStateChangeReasonCode. Note @@ -113,13 +113,13 @@ type ClusterState string // Enum values for ClusterState const ( - ClusterStateStarting ClusterState = "STARTING" - ClusterStateBootstrapping ClusterState = "BOOTSTRAPPING" - ClusterStateRunning ClusterState = "RUNNING" - ClusterStateWaiting ClusterState = "WAITING" - ClusterStateTerminating ClusterState = "TERMINATING" - ClusterStateTerminated ClusterState = "TERMINATED" - ClusterStateTerminated_with_errors ClusterState = "TERMINATED_WITH_ERRORS" + ClusterStateStarting ClusterState = "STARTING" + ClusterStateBootstrapping ClusterState = "BOOTSTRAPPING" + ClusterStateRunning ClusterState = "RUNNING" + ClusterStateWaiting ClusterState = "WAITING" + ClusterStateTerminating ClusterState = "TERMINATING" + ClusterStateTerminated ClusterState = "TERMINATED" + ClusterStateTerminatedWithErrors ClusterState = "TERMINATED_WITH_ERRORS" ) // Values returns all known values for ClusterState. Note that this can be expanded @@ -141,14 +141,14 @@ type ClusterStateChangeReasonCode string // Enum values for ClusterStateChangeReasonCode const ( - ClusterStateChangeReasonCodeInternal_error ClusterStateChangeReasonCode = "INTERNAL_ERROR" - ClusterStateChangeReasonCodeValidation_error ClusterStateChangeReasonCode = "VALIDATION_ERROR" - ClusterStateChangeReasonCodeInstance_failure ClusterStateChangeReasonCode = "INSTANCE_FAILURE" - ClusterStateChangeReasonCodeInstance_fleet_timeout ClusterStateChangeReasonCode = "INSTANCE_FLEET_TIMEOUT" - ClusterStateChangeReasonCodeBootstrap_failure ClusterStateChangeReasonCode = "BOOTSTRAP_FAILURE" - ClusterStateChangeReasonCodeUser_request ClusterStateChangeReasonCode = "USER_REQUEST" - ClusterStateChangeReasonCodeStep_failure ClusterStateChangeReasonCode = "STEP_FAILURE" - ClusterStateChangeReasonCodeAll_steps_completed ClusterStateChangeReasonCode = "ALL_STEPS_COMPLETED" + ClusterStateChangeReasonCodeInternalError ClusterStateChangeReasonCode = "INTERNAL_ERROR" + ClusterStateChangeReasonCodeValidationError ClusterStateChangeReasonCode = "VALIDATION_ERROR" + ClusterStateChangeReasonCodeInstanceFailure ClusterStateChangeReasonCode = "INSTANCE_FAILURE" + ClusterStateChangeReasonCodeInstanceFleetTimeout ClusterStateChangeReasonCode = "INSTANCE_FLEET_TIMEOUT" + ClusterStateChangeReasonCodeBootstrapFailure ClusterStateChangeReasonCode = "BOOTSTRAP_FAILURE" + ClusterStateChangeReasonCodeUserRequest ClusterStateChangeReasonCode = "USER_REQUEST" + ClusterStateChangeReasonCodeStepFailure ClusterStateChangeReasonCode = "STEP_FAILURE" + ClusterStateChangeReasonCodeAllStepsCompleted ClusterStateChangeReasonCode = "ALL_STEPS_COMPLETED" ) // Values returns all known values for ClusterStateChangeReasonCode. Note that this @@ -171,10 +171,10 @@ type ComparisonOperator string // Enum values for ComparisonOperator const ( - ComparisonOperatorGreater_than_or_equal ComparisonOperator = "GREATER_THAN_OR_EQUAL" - ComparisonOperatorGreater_than ComparisonOperator = "GREATER_THAN" - ComparisonOperatorLess_than ComparisonOperator = "LESS_THAN" - ComparisonOperatorLess_than_or_equal ComparisonOperator = "LESS_THAN_OR_EQUAL" + ComparisonOperatorGreaterThanOrEqual ComparisonOperator = "GREATER_THAN_OR_EQUAL" + ComparisonOperatorGreaterThan ComparisonOperator = "GREATER_THAN" + ComparisonOperatorLessThan ComparisonOperator = "LESS_THAN" + ComparisonOperatorLessThanOrEqual ComparisonOperator = "LESS_THAN_OR_EQUAL" ) // Values returns all known values for ComparisonOperator. Note that this can be @@ -229,8 +229,8 @@ type InstanceCollectionType string // Enum values for InstanceCollectionType const ( - InstanceCollectionTypeInstance_fleet InstanceCollectionType = "INSTANCE_FLEET" - InstanceCollectionTypeInstance_group InstanceCollectionType = "INSTANCE_GROUP" + InstanceCollectionTypeInstanceFleet InstanceCollectionType = "INSTANCE_FLEET" + InstanceCollectionTypeInstanceGroup InstanceCollectionType = "INSTANCE_GROUP" ) // Values returns all known values for InstanceCollectionType. Note that this can @@ -275,10 +275,10 @@ type InstanceFleetStateChangeReasonCode string // Enum values for InstanceFleetStateChangeReasonCode const ( - InstanceFleetStateChangeReasonCodeInternal_error InstanceFleetStateChangeReasonCode = "INTERNAL_ERROR" - InstanceFleetStateChangeReasonCodeValidation_error InstanceFleetStateChangeReasonCode = "VALIDATION_ERROR" - InstanceFleetStateChangeReasonCodeInstance_failure InstanceFleetStateChangeReasonCode = "INSTANCE_FAILURE" - InstanceFleetStateChangeReasonCodeCluster_terminated InstanceFleetStateChangeReasonCode = "CLUSTER_TERMINATED" + InstanceFleetStateChangeReasonCodeInternalError InstanceFleetStateChangeReasonCode = "INTERNAL_ERROR" + InstanceFleetStateChangeReasonCodeValidationError InstanceFleetStateChangeReasonCode = "VALIDATION_ERROR" + InstanceFleetStateChangeReasonCodeInstanceFailure InstanceFleetStateChangeReasonCode = "INSTANCE_FAILURE" + InstanceFleetStateChangeReasonCodeClusterTerminated InstanceFleetStateChangeReasonCode = "CLUSTER_TERMINATED" ) // Values returns all known values for InstanceFleetStateChangeReasonCode. Note @@ -327,7 +327,7 @@ const ( InstanceGroupStateTerminating InstanceGroupState = "TERMINATING" InstanceGroupStateTerminated InstanceGroupState = "TERMINATED" InstanceGroupStateArrested InstanceGroupState = "ARRESTED" - InstanceGroupStateShutting_down InstanceGroupState = "SHUTTING_DOWN" + InstanceGroupStateShuttingDown InstanceGroupState = "SHUTTING_DOWN" InstanceGroupStateEnded InstanceGroupState = "ENDED" ) @@ -354,10 +354,10 @@ type InstanceGroupStateChangeReasonCode string // Enum values for InstanceGroupStateChangeReasonCode const ( - InstanceGroupStateChangeReasonCodeInternal_error InstanceGroupStateChangeReasonCode = "INTERNAL_ERROR" - InstanceGroupStateChangeReasonCodeValidation_error InstanceGroupStateChangeReasonCode = "VALIDATION_ERROR" - InstanceGroupStateChangeReasonCodeInstance_failure InstanceGroupStateChangeReasonCode = "INSTANCE_FAILURE" - InstanceGroupStateChangeReasonCodeCluster_terminated InstanceGroupStateChangeReasonCode = "CLUSTER_TERMINATED" + InstanceGroupStateChangeReasonCodeInternalError InstanceGroupStateChangeReasonCode = "INTERNAL_ERROR" + InstanceGroupStateChangeReasonCodeValidationError InstanceGroupStateChangeReasonCode = "VALIDATION_ERROR" + InstanceGroupStateChangeReasonCodeInstanceFailure InstanceGroupStateChangeReasonCode = "INSTANCE_FAILURE" + InstanceGroupStateChangeReasonCodeClusterTerminated InstanceGroupStateChangeReasonCode = "CLUSTER_TERMINATED" ) // Values returns all known values for InstanceGroupStateChangeReasonCode. Note @@ -417,11 +417,11 @@ type InstanceState string // Enum values for InstanceState const ( - InstanceStateAwaiting_fulfillment InstanceState = "AWAITING_FULFILLMENT" - InstanceStateProvisioning InstanceState = "PROVISIONING" - InstanceStateBootstrapping InstanceState = "BOOTSTRAPPING" - InstanceStateRunning InstanceState = "RUNNING" - InstanceStateTerminated InstanceState = "TERMINATED" + InstanceStateAwaitingFulfillment InstanceState = "AWAITING_FULFILLMENT" + InstanceStateProvisioning InstanceState = "PROVISIONING" + InstanceStateBootstrapping InstanceState = "BOOTSTRAPPING" + InstanceStateRunning InstanceState = "RUNNING" + InstanceStateTerminated InstanceState = "TERMINATED" ) // Values returns all known values for InstanceState. Note that this can be @@ -441,11 +441,11 @@ type InstanceStateChangeReasonCode string // Enum values for InstanceStateChangeReasonCode const ( - InstanceStateChangeReasonCodeInternal_error InstanceStateChangeReasonCode = "INTERNAL_ERROR" - InstanceStateChangeReasonCodeValidation_error InstanceStateChangeReasonCode = "VALIDATION_ERROR" - InstanceStateChangeReasonCodeInstance_failure InstanceStateChangeReasonCode = "INSTANCE_FAILURE" - InstanceStateChangeReasonCodeBootstrap_failure InstanceStateChangeReasonCode = "BOOTSTRAP_FAILURE" - InstanceStateChangeReasonCodeCluster_terminated InstanceStateChangeReasonCode = "CLUSTER_TERMINATED" + InstanceStateChangeReasonCodeInternalError InstanceStateChangeReasonCode = "INTERNAL_ERROR" + InstanceStateChangeReasonCodeValidationError InstanceStateChangeReasonCode = "VALIDATION_ERROR" + InstanceStateChangeReasonCodeInstanceFailure InstanceStateChangeReasonCode = "INSTANCE_FAILURE" + InstanceStateChangeReasonCodeBootstrapFailure InstanceStateChangeReasonCode = "BOOTSTRAP_FAILURE" + InstanceStateChangeReasonCodeClusterTerminated InstanceStateChangeReasonCode = "CLUSTER_TERMINATED" ) // Values returns all known values for InstanceStateChangeReasonCode. Note that @@ -470,7 +470,7 @@ const ( JobFlowExecutionStateBootstrapping JobFlowExecutionState = "BOOTSTRAPPING" JobFlowExecutionStateRunning JobFlowExecutionState = "RUNNING" JobFlowExecutionStateWaiting JobFlowExecutionState = "WAITING" - JobFlowExecutionStateShutting_down JobFlowExecutionState = "SHUTTING_DOWN" + JobFlowExecutionStateShuttingDown JobFlowExecutionState = "SHUTTING_DOWN" JobFlowExecutionStateTerminated JobFlowExecutionState = "TERMINATED" JobFlowExecutionStateCompleted JobFlowExecutionState = "COMPLETED" JobFlowExecutionStateFailed JobFlowExecutionState = "FAILED" @@ -496,8 +496,8 @@ type MarketType string // Enum values for MarketType const ( - MarketTypeOn_demand MarketType = "ON_DEMAND" - MarketTypeSpot MarketType = "SPOT" + MarketTypeOnDemand MarketType = "ON_DEMAND" + MarketTypeSpot MarketType = "SPOT" ) // Values returns all known values for MarketType. Note that this can be expanded @@ -514,16 +514,16 @@ type NotebookExecutionStatus string // Enum values for NotebookExecutionStatus const ( - NotebookExecutionStatusStart_pending NotebookExecutionStatus = "START_PENDING" - NotebookExecutionStatusStarting NotebookExecutionStatus = "STARTING" - NotebookExecutionStatusRunning NotebookExecutionStatus = "RUNNING" - NotebookExecutionStatusFinishing NotebookExecutionStatus = "FINISHING" - NotebookExecutionStatusFinished NotebookExecutionStatus = "FINISHED" - NotebookExecutionStatusFailing NotebookExecutionStatus = "FAILING" - NotebookExecutionStatusFailed NotebookExecutionStatus = "FAILED" - NotebookExecutionStatusStop_pending NotebookExecutionStatus = "STOP_PENDING" - NotebookExecutionStatusStopping NotebookExecutionStatus = "STOPPING" - NotebookExecutionStatusStopped NotebookExecutionStatus = "STOPPED" + NotebookExecutionStatusStartPending NotebookExecutionStatus = "START_PENDING" + NotebookExecutionStatusStarting NotebookExecutionStatus = "STARTING" + NotebookExecutionStatusRunning NotebookExecutionStatus = "RUNNING" + NotebookExecutionStatusFinishing NotebookExecutionStatus = "FINISHING" + NotebookExecutionStatusFinished NotebookExecutionStatus = "FINISHED" + NotebookExecutionStatusFailing NotebookExecutionStatus = "FAILING" + NotebookExecutionStatusFailed NotebookExecutionStatus = "FAILED" + NotebookExecutionStatusStopPending NotebookExecutionStatus = "STOP_PENDING" + NotebookExecutionStatusStopping NotebookExecutionStatus = "STOPPING" + NotebookExecutionStatusStopped NotebookExecutionStatus = "STOPPED" ) // Values returns all known values for NotebookExecutionStatus. Note that this can @@ -548,7 +548,7 @@ type OnDemandProvisioningAllocationStrategy string // Enum values for OnDemandProvisioningAllocationStrategy const ( - OnDemandProvisioningAllocationStrategyLowest_price OnDemandProvisioningAllocationStrategy = "lowest-price" + OnDemandProvisioningAllocationStrategyLowestPrice OnDemandProvisioningAllocationStrategy = "lowest-price" ) // Values returns all known values for OnDemandProvisioningAllocationStrategy. Note @@ -605,8 +605,8 @@ type ScaleDownBehavior string // Enum values for ScaleDownBehavior const ( - ScaleDownBehaviorTerminate_at_instance_hour ScaleDownBehavior = "TERMINATE_AT_INSTANCE_HOUR" - ScaleDownBehaviorTerminate_at_task_completion ScaleDownBehavior = "TERMINATE_AT_TASK_COMPLETION" + ScaleDownBehaviorTerminateAtInstanceHour ScaleDownBehavior = "TERMINATE_AT_INSTANCE_HOUR" + ScaleDownBehaviorTerminateAtTaskCompletion ScaleDownBehavior = "TERMINATE_AT_TASK_COMPLETION" ) // Values returns all known values for ScaleDownBehavior. Note that this can be @@ -623,7 +623,7 @@ type SpotProvisioningAllocationStrategy string // Enum values for SpotProvisioningAllocationStrategy const ( - SpotProvisioningAllocationStrategyCapacity_optimized SpotProvisioningAllocationStrategy = "capacity-optimized" + SpotProvisioningAllocationStrategyCapacityOptimized SpotProvisioningAllocationStrategy = "capacity-optimized" ) // Values returns all known values for SpotProvisioningAllocationStrategy. Note @@ -640,8 +640,8 @@ type SpotProvisioningTimeoutAction string // Enum values for SpotProvisioningTimeoutAction const ( - SpotProvisioningTimeoutActionSwitch_to_on_demand SpotProvisioningTimeoutAction = "SWITCH_TO_ON_DEMAND" - SpotProvisioningTimeoutActionTerminate_cluster SpotProvisioningTimeoutAction = "TERMINATE_CLUSTER" + SpotProvisioningTimeoutActionSwitchToOnDemand SpotProvisioningTimeoutAction = "SWITCH_TO_ON_DEMAND" + SpotProvisioningTimeoutActionTerminateCluster SpotProvisioningTimeoutAction = "TERMINATE_CLUSTER" ) // Values returns all known values for SpotProvisioningTimeoutAction. Note that @@ -659,11 +659,11 @@ type Statistic string // Enum values for Statistic const ( - StatisticSample_count Statistic = "SAMPLE_COUNT" - StatisticAverage Statistic = "AVERAGE" - StatisticSum Statistic = "SUM" - StatisticMinimum Statistic = "MINIMUM" - StatisticMaximum Statistic = "MAXIMUM" + StatisticSampleCount Statistic = "SAMPLE_COUNT" + StatisticAverage Statistic = "AVERAGE" + StatisticSum Statistic = "SUM" + StatisticMinimum Statistic = "MINIMUM" + StatisticMaximum Statistic = "MAXIMUM" ) // Values returns all known values for Statistic. Note that this can be expanded in @@ -683,8 +683,8 @@ type StepCancellationOption string // Enum values for StepCancellationOption const ( - StepCancellationOptionSend_interrupt StepCancellationOption = "SEND_INTERRUPT" - StepCancellationOptionTerminate_process StepCancellationOption = "TERMINATE_PROCESS" + StepCancellationOptionSendInterrupt StepCancellationOption = "SEND_INTERRUPT" + StepCancellationOptionTerminateProcess StepCancellationOption = "TERMINATE_PROCESS" ) // Values returns all known values for StepCancellationOption. Note that this can @@ -729,13 +729,13 @@ type StepState string // Enum values for StepState const ( - StepStatePending StepState = "PENDING" - StepStateCancel_pending StepState = "CANCEL_PENDING" - StepStateRunning StepState = "RUNNING" - StepStateCompleted StepState = "COMPLETED" - StepStateCancelled StepState = "CANCELLED" - StepStateFailed StepState = "FAILED" - StepStateInterrupted StepState = "INTERRUPTED" + StepStatePending StepState = "PENDING" + StepStateCancelPending StepState = "CANCEL_PENDING" + StepStateRunning StepState = "RUNNING" + StepStateCompleted StepState = "COMPLETED" + StepStateCancelled StepState = "CANCELLED" + StepStateFailed StepState = "FAILED" + StepStateInterrupted StepState = "INTERRUPTED" ) // Values returns all known values for StepState. Note that this can be expanded in @@ -773,33 +773,33 @@ type Unit string // Enum values for Unit const ( - UnitNone Unit = "NONE" - UnitSeconds Unit = "SECONDS" - UnitMicro_seconds Unit = "MICRO_SECONDS" - UnitMilli_seconds Unit = "MILLI_SECONDS" - UnitBytes Unit = "BYTES" - UnitKilo_bytes Unit = "KILO_BYTES" - UnitMega_bytes Unit = "MEGA_BYTES" - UnitGiga_bytes Unit = "GIGA_BYTES" - UnitTera_bytes Unit = "TERA_BYTES" - UnitBits Unit = "BITS" - UnitKilo_bits Unit = "KILO_BITS" - UnitMega_bits Unit = "MEGA_BITS" - UnitGiga_bits Unit = "GIGA_BITS" - UnitTera_bits Unit = "TERA_BITS" - UnitPercent Unit = "PERCENT" - UnitCount Unit = "COUNT" - UnitBytes_per_second Unit = "BYTES_PER_SECOND" - UnitKilo_bytes_per_second Unit = "KILO_BYTES_PER_SECOND" - UnitMega_bytes_per_second Unit = "MEGA_BYTES_PER_SECOND" - UnitGiga_bytes_per_second Unit = "GIGA_BYTES_PER_SECOND" - UnitTera_bytes_per_second Unit = "TERA_BYTES_PER_SECOND" - UnitBits_per_second Unit = "BITS_PER_SECOND" - UnitKilo_bits_per_second Unit = "KILO_BITS_PER_SECOND" - UnitMega_bits_per_second Unit = "MEGA_BITS_PER_SECOND" - UnitGiga_bits_per_second Unit = "GIGA_BITS_PER_SECOND" - UnitTera_bits_per_second Unit = "TERA_BITS_PER_SECOND" - UnitCount_per_second Unit = "COUNT_PER_SECOND" + UnitNone Unit = "NONE" + UnitSeconds Unit = "SECONDS" + UnitMicroSeconds Unit = "MICRO_SECONDS" + UnitMilliSeconds Unit = "MILLI_SECONDS" + UnitBytes Unit = "BYTES" + UnitKiloBytes Unit = "KILO_BYTES" + UnitMegaBytes Unit = "MEGA_BYTES" + UnitGigaBytes Unit = "GIGA_BYTES" + UnitTeraBytes Unit = "TERA_BYTES" + UnitBits Unit = "BITS" + UnitKiloBits Unit = "KILO_BITS" + UnitMegaBits Unit = "MEGA_BITS" + UnitGigaBits Unit = "GIGA_BITS" + UnitTeraBits Unit = "TERA_BITS" + UnitPercent Unit = "PERCENT" + UnitCount Unit = "COUNT" + UnitBytesPerSecond Unit = "BYTES_PER_SECOND" + UnitKiloBytesPerSecond Unit = "KILO_BYTES_PER_SECOND" + UnitMegaBytesPerSecond Unit = "MEGA_BYTES_PER_SECOND" + UnitGigaBytesPerSecond Unit = "GIGA_BYTES_PER_SECOND" + UnitTeraBytesPerSecond Unit = "TERA_BYTES_PER_SECOND" + UnitBitsPerSecond Unit = "BITS_PER_SECOND" + UnitKiloBitsPerSecond Unit = "KILO_BITS_PER_SECOND" + UnitMegaBitsPerSecond Unit = "MEGA_BITS_PER_SECOND" + UnitGigaBitsPerSecond Unit = "GIGA_BITS_PER_SECOND" + UnitTeraBitsPerSecond Unit = "TERA_BITS_PER_SECOND" + UnitCountPerSecond Unit = "COUNT_PER_SECOND" ) // Values returns all known values for Unit. Note that this can be expanded in the diff --git a/service/emr/types/types.go b/service/emr/types/types.go index 06bd57c1af1..c6113ccbdbf 100644 --- a/service/emr/types/types.go +++ b/service/emr/types/types.go @@ -934,28 +934,28 @@ type InstanceFleetStatus struct { // A code representing the instance fleet status. // - // * PROVISIONING—The instance + // * PROVISIONING—The instance // fleet is provisioning EC2 resources and is not yet ready to run jobs. // - // * + // * // BOOTSTRAPPING—EC2 instances and other resources have been provisioned and the // bootstrap actions specified for the instances are underway. // - // * RUNNING—EC2 + // * RUNNING—EC2 // instances and other resources are running. They are either executing jobs or // waiting to execute jobs. // - // * RESIZING—A resize operation is underway. EC2 + // * RESIZING—A resize operation is underway. EC2 // instances are either being added or removed. // - // * SUSPENDED—A resize operation + // * SUSPENDED—A resize operation // could not complete. Existing EC2 instances are running, but instances can't be // added or removed. // - // * TERMINATING—The instance fleet is terminating EC2 + // * TERMINATING—The instance fleet is terminating EC2 // instances. // - // * TERMINATED—The instance fleet is no longer active, and all EC2 + // * TERMINATED—The instance fleet is no longer active, and all EC2 // instances have been terminated. State InstanceFleetState @@ -1713,37 +1713,36 @@ type NotebookExecution struct { // The status of the notebook execution. // - // * START_PENDING indicates that the + // * START_PENDING indicates that the // cluster has received the execution request but execution has not begun. // - // * + // * // STARTING indicates that the execution is starting on the cluster. // - // * RUNNING + // * RUNNING // indicates that the execution is being processed by the cluster. // - // * FINISHING + // * FINISHING // indicates that execution processing is in the final stages. // - // * FINISHED + // * FINISHED // indicates that the execution has completed without error. // - // * FAILING - // indicates that the execution is failing and will not finish successfully. + // * FAILING indicates + // that the execution is failing and will not finish successfully. // - // * - // FAILED indicates that the execution failed. + // * FAILED + // indicates that the execution failed. // - // * STOP_PENDING indicates that - // the cluster has received a StopNotebookExecution request and the stop is - // pending. + // * STOP_PENDING indicates that the cluster + // has received a StopNotebookExecution request and the stop is pending. // - // * STOPPING indicates that the cluster is in the process of - // stopping the execution as a result of a StopNotebookExecution request. + // * + // STOPPING indicates that the cluster is in the process of stopping the execution + // as a result of a StopNotebookExecution request. // - // * - // STOPPED indicates that the execution stopped because of a StopNotebookExecution - // request. + // * STOPPED indicates that the + // execution stopped because of a StopNotebookExecution request. Status NotebookExecutionStatus // A list of tags associated with a notebook execution. Tags are user-defined key @@ -1772,37 +1771,36 @@ type NotebookExecutionSummary struct { // The status of the notebook execution. // - // * START_PENDING indicates that the + // * START_PENDING indicates that the // cluster has received the execution request but execution has not begun. // - // * + // * // STARTING indicates that the execution is starting on the cluster. // - // * RUNNING + // * RUNNING // indicates that the execution is being processed by the cluster. // - // * FINISHING + // * FINISHING // indicates that execution processing is in the final stages. // - // * FINISHED + // * FINISHED // indicates that the execution has completed without error. // - // * FAILING - // indicates that the execution is failing and will not finish successfully. + // * FAILING indicates + // that the execution is failing and will not finish successfully. // - // * - // FAILED indicates that the execution failed. + // * FAILED + // indicates that the execution failed. // - // * STOP_PENDING indicates that - // the cluster has received a StopNotebookExecution request and the stop is - // pending. + // * STOP_PENDING indicates that the cluster + // has received a StopNotebookExecution request and the stop is pending. // - // * STOPPING indicates that the cluster is in the process of - // stopping the execution as a result of a StopNotebookExecution request. + // * + // STOPPING indicates that the cluster is in the process of stopping the execution + // as a result of a StopNotebookExecution request. // - // * - // STOPPED indicates that the execution stopped because of a StopNotebookExecution - // request. + // * STOPPED indicates that the + // execution stopped because of a StopNotebookExecution request. Status NotebookExecutionStatus } diff --git a/service/eventbridge/api_op_PutTargets.go b/service/eventbridge/api_op_PutTargets.go index c21db332eeb..5a04b4871b4 100644 --- a/service/eventbridge/api_op_PutTargets.go +++ b/service/eventbridge/api_op_PutTargets.go @@ -16,62 +16,61 @@ import ( // when a rule is triggered. You can configure the following as targets for // Events: // -// * EC2 instances +// * EC2 instances // -// * SSM Run Command +// * SSM Run Command // -// * SSM Automation +// * SSM Automation // -// * -// AWS Lambda functions +// * AWS Lambda +// functions // -// * Data streams in Amazon Kinesis Data Streams +// * Data streams in Amazon Kinesis Data Streams // -// * -// Data delivery streams in Amazon Kinesis Data Firehose +// * Data delivery +// streams in Amazon Kinesis Data Firehose // -// * Amazon ECS tasks +// * Amazon ECS tasks // +// * AWS Step +// Functions state machines // -// * AWS Step Functions state machines +// * AWS Batch jobs // -// * AWS Batch jobs +// * AWS CodeBuild projects // -// * AWS CodeBuild -// projects +// * +// Pipelines in AWS CodePipeline // -// * Pipelines in AWS CodePipeline +// * Amazon Inspector assessment templates // -// * Amazon Inspector assessment -// templates +// * Amazon +// SNS topics // -// * Amazon SNS topics +// * Amazon SQS queues, including FIFO queues // -// * Amazon SQS queues, including FIFO -// queues +// * The default event bus +// of another AWS account // -// * The default event bus of another AWS account +// * Amazon API Gateway REST APIs // -// * Amazon API -// Gateway REST APIs +// * Redshift Clusters to +// invoke Data API ExecuteStatement on // -// * Redshift Clusters to invoke Data API ExecuteStatement -// on -// -// Creating rules with built-in targets is supported only in the AWS Management -// Console. The built-in targets are EC2 CreateSnapshot API call, EC2 -// RebootInstances API call, EC2 StopInstances API call, and EC2 TerminateInstances -// API call. For some target types, PutTargets provides target-specific parameters. -// If the target is a Kinesis data stream, you can optionally specify which shard -// the event goes to by using the KinesisParameters argument. To invoke a command -// on multiple EC2 instances with one rule, you can use the RunCommandParameters -// field. To be able to make API calls against the resources that you own, Amazon -// EventBridge (CloudWatch Events) needs the appropriate permissions. For AWS -// Lambda and Amazon SNS resources, EventBridge relies on resource-based policies. -// For EC2 instances, Kinesis data streams, AWS Step Functions state machines and -// API Gateway REST APIs, EventBridge relies on IAM roles that you specify in the -// RoleARN argument in PutTargets. For more information, see Authentication and -// Access Control +// Creating rules with built-in targets is +// supported only in the AWS Management Console. The built-in targets are EC2 +// CreateSnapshot API call, EC2 RebootInstances API call, EC2 StopInstances API +// call, and EC2 TerminateInstances API call. For some target types, PutTargets +// provides target-specific parameters. If the target is a Kinesis data stream, you +// can optionally specify which shard the event goes to by using the +// KinesisParameters argument. To invoke a command on multiple EC2 instances with +// one rule, you can use the RunCommandParameters field. To be able to make API +// calls against the resources that you own, Amazon EventBridge (CloudWatch Events) +// needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, +// EventBridge relies on resource-based policies. For EC2 instances, Kinesis data +// streams, AWS Step Functions state machines and API Gateway REST APIs, +// EventBridge relies on IAM roles that you specify in the RoleARN argument in +// PutTargets. For more information, see Authentication and Access Control // (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html) // in the Amazon EventBridge User Guide. If another AWS account is in the same // region and has granted you permission (using PutPermission), you can send events @@ -94,32 +93,31 @@ import ( // are mutually exclusive and optional parameters of a target. When a rule is // triggered due to a matched event: // -// * If none of the following arguments are +// * If none of the following arguments are // specified for a target, then the entire event is passed to the target in JSON // format (unless the target is Amazon EC2 Run Command or Amazon ECS task, in which // case nothing from the event is passed to the target). // -// * If Input is -// specified in the form of valid JSON, then the matched event is overridden with -// this constant. -// -// * If InputPath is specified in the form of JSONPath (for -// example, $.detail), then only the part of the event specified in the path is -// passed to the target (for example, only the detail part of the event is -// passed). -// -// * If InputTransformer is specified, then one or more specified -// JSONPaths are extracted from the event and used as values in a template that you -// specify as the input to the target. -// -// When you specify InputPath or -// InputTransformer, you must use JSON dot notation, not bracket notation. When you -// add targets to a rule and the associated rule triggers soon after, new or -// updated targets might not be immediately invoked. Allow a short period of time -// for changes to take effect. This action can partially fail if too many requests -// are made at the same time. If that happens, FailedEntryCount is non-zero in the -// response and each entry in FailedEntries provides the ID of the failed target -// and the error code. +// * If Input is specified +// in the form of valid JSON, then the matched event is overridden with this +// constant. +// +// * If InputPath is specified in the form of JSONPath (for example, +// $.detail), then only the part of the event specified in the path is passed to +// the target (for example, only the detail part of the event is passed). +// +// * If +// InputTransformer is specified, then one or more specified JSONPaths are +// extracted from the event and used as values in a template that you specify as +// the input to the target. +// +// When you specify InputPath or InputTransformer, you +// must use JSON dot notation, not bracket notation. When you add targets to a rule +// and the associated rule triggers soon after, new or updated targets might not be +// immediately invoked. Allow a short period of time for changes to take effect. +// This action can partially fail if too many requests are made at the same time. +// If that happens, FailedEntryCount is non-zero in the response and each entry in +// FailedEntries provides the ID of the failed target and the error code. func (c *Client) PutTargets(ctx context.Context, params *PutTargetsInput, optFns ...func(*Options)) (*PutTargetsOutput, error) { if params == nil { params = &PutTargetsInput{} diff --git a/service/eventbridge/doc.go b/service/eventbridge/doc.go index 81a8f5176c1..f075ee51192 100644 --- a/service/eventbridge/doc.go +++ b/service/eventbridge/doc.go @@ -9,19 +9,18 @@ // them to targets to take action. You can also use rules to take action on a // predetermined schedule. For example, you can configure rules to: // -// * +// * // Automatically invoke an AWS Lambda function to update DNS entries when an event // notifies you that Amazon EC2 instance enters the running state. // -// * Direct +// * Direct // specific API records from AWS CloudTrail to an Amazon Kinesis data stream for // detailed analysis of potential security or availability risks. // -// * -// Periodically invoke a built-in target to create a snapshot of an Amazon EBS -// volume. +// * Periodically +// invoke a built-in target to create a snapshot of an Amazon EBS volume. // -// For more information about the features of Amazon EventBridge, see the -// Amazon EventBridge User Guide -// (https://docs.aws.amazon.com/eventbridge/latest/userguide). +// For more +// information about the features of Amazon EventBridge, see the Amazon EventBridge +// User Guide (https://docs.aws.amazon.com/eventbridge/latest/userguide). package eventbridge diff --git a/service/eventbridge/types/types.go b/service/eventbridge/types/types.go index 53b0457f91d..f29410ea81a 100644 --- a/service/eventbridge/types/types.go +++ b/service/eventbridge/types/types.go @@ -228,32 +228,31 @@ type InputTransformer struct { // valid JSON. If InputTemplate is a JSON object (surrounded by curly braces), the // following restrictions apply: // - // * The placeholder cannot be used as an object + // * The placeholder cannot be used as an object // key. // - // * Object values cannot include quote marks. + // * Object values cannot include quote marks. // - // The following example - // shows the syntax for using InputPathsMap and InputTemplate. - // "InputTransformer": - // { + // The following example shows + // the syntax for using InputPathsMap and InputTemplate. "InputTransformer": + // + // { // - // "InputPathsMap": {"instance": - // "$.detail.instance","status": "$.detail.status"}, + // "InputPathsMap": {"instance": "$.detail.instance","status": + // "$.detail.status"}, // - // "InputTemplate": " is in - // state " + // "InputTemplate": " is in state " // - // } To have the InputTemplate include quote marks within a JSON string, - // escape each quote marks with a slash, as in the following example: - // "InputTransformer": + // } To have the + // InputTemplate include quote marks within a JSON string, escape each quote marks + // with a slash, as in the following example: "InputTransformer": // { // - // "InputPathsMap": {"instance": - // "$.detail.instance","status": "$.detail.status"}, // - // "InputTemplate": " is in - // state """ + // "InputPathsMap": {"instance": "$.detail.instance","status": + // "$.detail.status"}, + // + // "InputTemplate": " is in state """ // // } // diff --git a/service/firehose/api_op_CreateDeliveryStream.go b/service/firehose/api_op_CreateDeliveryStream.go index c40a103a0be..f2fe5b60975 100644 --- a/service/firehose/api_op_CreateDeliveryStream.go +++ b/service/firehose/api_op_CreateDeliveryStream.go @@ -44,23 +44,23 @@ import ( // strictly. For example, record boundaries might be such that the size is a little // over or under the configured buffering size. By default, no encryption is // performed. We strongly recommend that you enable encryption to ensure secure -// data storage in Amazon S3. A few notes about Amazon Redshift as a destination: +// data storage in Amazon S3. A few notes about Amazon Redshift as a +// destination: // +// * An Amazon Redshift destination requires an S3 bucket as +// intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 +// and then uses COPY syntax to load data into an Amazon Redshift table. This is +// specified in the RedshiftDestinationConfiguration.S3Configuration parameter. // -// * An Amazon Redshift destination requires an S3 bucket as intermediate location. -// Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax -// to load data into an Amazon Redshift table. This is specified in the -// RedshiftDestinationConfiguration.S3Configuration parameter. -// -// * The -// compression formats SNAPPY or ZIP cannot be specified in +// * +// The compression formats SNAPPY or ZIP cannot be specified in // RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift // COPY operation that reads from the S3 bucket doesn't support these compression // formats. // -// * We strongly recommend that you use the user name and password -// you provide exclusively with Kinesis Data Firehose, and that the permissions for -// the account are restricted for Amazon Redshift INSERT permissions. +// * We strongly recommend that you use the user name and password you +// provide exclusively with Kinesis Data Firehose, and that the permissions for the +// account are restricted for Amazon Redshift INSERT permissions. // // Kinesis Data // Firehose assumes the IAM role that is configured as part of the destination. The @@ -100,10 +100,10 @@ type CreateDeliveryStreamInput struct { // The delivery stream type. This parameter can be one of the following values: // + // * + // DirectPut: Provider applications access the delivery stream directly. // - // * DirectPut: Provider applications access the delivery stream directly. - // - // * + // * // KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a // source. DeliveryStreamType types.DeliveryStreamType diff --git a/service/firehose/api_op_ListDeliveryStreams.go b/service/firehose/api_op_ListDeliveryStreams.go index 819cb082e77..9828f241c5d 100644 --- a/service/firehose/api_op_ListDeliveryStreams.go +++ b/service/firehose/api_op_ListDeliveryStreams.go @@ -38,10 +38,10 @@ type ListDeliveryStreamsInput struct { // The delivery stream type. This can be one of the following values: // - // * - // DirectPut: Provider applications access the delivery stream directly. + // * DirectPut: + // Provider applications access the delivery stream directly. // - // * + // * // KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a // source. // diff --git a/service/firehose/types/enums.go b/service/firehose/types/enums.go index 7f2b308e650..89582bc19a1 100644 --- a/service/firehose/types/enums.go +++ b/service/firehose/types/enums.go @@ -6,11 +6,11 @@ type CompressionFormat string // Enum values for CompressionFormat const ( - CompressionFormatUncompressed CompressionFormat = "UNCOMPRESSED" - CompressionFormatGzip CompressionFormat = "GZIP" - CompressionFormatZip CompressionFormat = "ZIP" - CompressionFormatSnappy CompressionFormat = "Snappy" - CompressionFormatHadoop_snappy CompressionFormat = "HADOOP_SNAPPY" + CompressionFormatUncompressed CompressionFormat = "UNCOMPRESSED" + CompressionFormatGzip CompressionFormat = "GZIP" + CompressionFormatZip CompressionFormat = "ZIP" + CompressionFormatSnappy CompressionFormat = "Snappy" + CompressionFormatHadoopSnappy CompressionFormat = "HADOOP_SNAPPY" ) // Values returns all known values for CompressionFormat. Note that this can be @@ -48,12 +48,12 @@ type DeliveryStreamEncryptionStatus string // Enum values for DeliveryStreamEncryptionStatus const ( - DeliveryStreamEncryptionStatusEnabled DeliveryStreamEncryptionStatus = "ENABLED" - DeliveryStreamEncryptionStatusEnabling DeliveryStreamEncryptionStatus = "ENABLING" - DeliveryStreamEncryptionStatusEnabling_failed DeliveryStreamEncryptionStatus = "ENABLING_FAILED" - DeliveryStreamEncryptionStatusDisabled DeliveryStreamEncryptionStatus = "DISABLED" - DeliveryStreamEncryptionStatusDisabling DeliveryStreamEncryptionStatus = "DISABLING" - DeliveryStreamEncryptionStatusDisabling_failed DeliveryStreamEncryptionStatus = "DISABLING_FAILED" + DeliveryStreamEncryptionStatusEnabled DeliveryStreamEncryptionStatus = "ENABLED" + DeliveryStreamEncryptionStatusEnabling DeliveryStreamEncryptionStatus = "ENABLING" + DeliveryStreamEncryptionStatusEnablingFailed DeliveryStreamEncryptionStatus = "ENABLING_FAILED" + DeliveryStreamEncryptionStatusDisabled DeliveryStreamEncryptionStatus = "DISABLED" + DeliveryStreamEncryptionStatusDisabling DeliveryStreamEncryptionStatus = "DISABLING" + DeliveryStreamEncryptionStatusDisablingFailed DeliveryStreamEncryptionStatus = "DISABLING_FAILED" ) // Values returns all known values for DeliveryStreamEncryptionStatus. Note that @@ -75,21 +75,21 @@ type DeliveryStreamFailureType string // Enum values for DeliveryStreamFailureType const ( - DeliveryStreamFailureTypeRetire_kms_grant_failed DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED" - DeliveryStreamFailureTypeCreate_kms_grant_failed DeliveryStreamFailureType = "CREATE_KMS_GRANT_FAILED" - DeliveryStreamFailureTypeKms_access_denied DeliveryStreamFailureType = "KMS_ACCESS_DENIED" - DeliveryStreamFailureTypeDisabled_kms_key DeliveryStreamFailureType = "DISABLED_KMS_KEY" - DeliveryStreamFailureTypeInvalid_kms_key DeliveryStreamFailureType = "INVALID_KMS_KEY" - DeliveryStreamFailureTypeKms_key_not_found DeliveryStreamFailureType = "KMS_KEY_NOT_FOUND" - DeliveryStreamFailureTypeKms_opt_in_required DeliveryStreamFailureType = "KMS_OPT_IN_REQUIRED" - DeliveryStreamFailureTypeCreate_eni_failed DeliveryStreamFailureType = "CREATE_ENI_FAILED" - DeliveryStreamFailureTypeDelete_eni_failed DeliveryStreamFailureType = "DELETE_ENI_FAILED" - DeliveryStreamFailureTypeSubnet_not_found DeliveryStreamFailureType = "SUBNET_NOT_FOUND" - DeliveryStreamFailureTypeSecurity_group_not_found DeliveryStreamFailureType = "SECURITY_GROUP_NOT_FOUND" - DeliveryStreamFailureTypeEni_access_denied DeliveryStreamFailureType = "ENI_ACCESS_DENIED" - DeliveryStreamFailureTypeSubnet_access_denied DeliveryStreamFailureType = "SUBNET_ACCESS_DENIED" - DeliveryStreamFailureTypeSecurity_group_access_denied DeliveryStreamFailureType = "SECURITY_GROUP_ACCESS_DENIED" - DeliveryStreamFailureTypeUnknown_error DeliveryStreamFailureType = "UNKNOWN_ERROR" + DeliveryStreamFailureTypeRetireKmsGrantFailed DeliveryStreamFailureType = "RETIRE_KMS_GRANT_FAILED" + DeliveryStreamFailureTypeCreateKmsGrantFailed DeliveryStreamFailureType = "CREATE_KMS_GRANT_FAILED" + DeliveryStreamFailureTypeKmsAccessDenied DeliveryStreamFailureType = "KMS_ACCESS_DENIED" + DeliveryStreamFailureTypeDisabledKmsKey DeliveryStreamFailureType = "DISABLED_KMS_KEY" + DeliveryStreamFailureTypeInvalidKmsKey DeliveryStreamFailureType = "INVALID_KMS_KEY" + DeliveryStreamFailureTypeKmsKeyNotFound DeliveryStreamFailureType = "KMS_KEY_NOT_FOUND" + DeliveryStreamFailureTypeKmsOptInRequired DeliveryStreamFailureType = "KMS_OPT_IN_REQUIRED" + DeliveryStreamFailureTypeCreateEniFailed DeliveryStreamFailureType = "CREATE_ENI_FAILED" + DeliveryStreamFailureTypeDeleteEniFailed DeliveryStreamFailureType = "DELETE_ENI_FAILED" + DeliveryStreamFailureTypeSubnetNotFound DeliveryStreamFailureType = "SUBNET_NOT_FOUND" + DeliveryStreamFailureTypeSecurityGroupNotFound DeliveryStreamFailureType = "SECURITY_GROUP_NOT_FOUND" + DeliveryStreamFailureTypeEniAccessDenied DeliveryStreamFailureType = "ENI_ACCESS_DENIED" + DeliveryStreamFailureTypeSubnetAccessDenied DeliveryStreamFailureType = "SUBNET_ACCESS_DENIED" + DeliveryStreamFailureTypeSecurityGroupAccessDenied DeliveryStreamFailureType = "SECURITY_GROUP_ACCESS_DENIED" + DeliveryStreamFailureTypeUnknownError DeliveryStreamFailureType = "UNKNOWN_ERROR" ) // Values returns all known values for DeliveryStreamFailureType. Note that this @@ -119,11 +119,11 @@ type DeliveryStreamStatus string // Enum values for DeliveryStreamStatus const ( - DeliveryStreamStatusCreating DeliveryStreamStatus = "CREATING" - DeliveryStreamStatusCreating_failed DeliveryStreamStatus = "CREATING_FAILED" - DeliveryStreamStatusDeleting DeliveryStreamStatus = "DELETING" - DeliveryStreamStatusDeleting_failed DeliveryStreamStatus = "DELETING_FAILED" - DeliveryStreamStatusActive DeliveryStreamStatus = "ACTIVE" + DeliveryStreamStatusCreating DeliveryStreamStatus = "CREATING" + DeliveryStreamStatusCreatingFailed DeliveryStreamStatus = "CREATING_FAILED" + DeliveryStreamStatusDeleting DeliveryStreamStatus = "DELETING" + DeliveryStreamStatusDeletingFailed DeliveryStreamStatus = "DELETING_FAILED" + DeliveryStreamStatusActive DeliveryStreamStatus = "ACTIVE" ) // Values returns all known values for DeliveryStreamStatus. Note that this can be @@ -240,8 +240,8 @@ type KeyType string // Enum values for KeyType const ( - KeyTypeAws_owned_cmk KeyType = "AWS_OWNED_CMK" - KeyTypeCustomer_managed_cmk KeyType = "CUSTOMER_MANAGED_CMK" + KeyTypeAwsOwnedCmk KeyType = "AWS_OWNED_CMK" + KeyTypeCustomerManagedCmk KeyType = "CUSTOMER_MANAGED_CMK" ) // Values returns all known values for KeyType. Note that this can be expanded in @@ -294,8 +294,8 @@ type OrcFormatVersion string // Enum values for OrcFormatVersion const ( - OrcFormatVersionV0_11 OrcFormatVersion = "V0_11" - OrcFormatVersionV0_12 OrcFormatVersion = "V0_12" + OrcFormatVersionV011 OrcFormatVersion = "V0_11" + OrcFormatVersionV012 OrcFormatVersion = "V0_12" ) // Values returns all known values for OrcFormatVersion. Note that this can be @@ -350,11 +350,11 @@ type ProcessorParameterName string // Enum values for ProcessorParameterName const ( - ProcessorParameterNameLambda_arn ProcessorParameterName = "LambdaArn" - ProcessorParameterNameLambda_number_of_retries ProcessorParameterName = "NumberOfRetries" - ProcessorParameterNameRole_arn ProcessorParameterName = "RoleArn" - ProcessorParameterNameBuffer_size_in_mb ProcessorParameterName = "BufferSizeInMBs" - ProcessorParameterNameBuffer_interval_in_seconds ProcessorParameterName = "BufferIntervalInSeconds" + ProcessorParameterNameLambdaArn ProcessorParameterName = "LambdaArn" + ProcessorParameterNameLambdaNumberOfRetries ProcessorParameterName = "NumberOfRetries" + ProcessorParameterNameRoleArn ProcessorParameterName = "RoleArn" + ProcessorParameterNameBufferSizeInMb ProcessorParameterName = "BufferSizeInMBs" + ProcessorParameterNameBufferIntervalInSeconds ProcessorParameterName = "BufferIntervalInSeconds" ) // Values returns all known values for ProcessorParameterName. Note that this can diff --git a/service/firehose/types/types.go b/service/firehose/types/types.go index 3b1ca9818aa..f2939882044 100644 --- a/service/firehose/types/types.go +++ b/service/firehose/types/types.go @@ -124,10 +124,10 @@ type DeliveryStreamDescription struct { // The delivery stream type. This can be one of the following values: // - // * - // DirectPut: Provider applications access the delivery stream directly. + // * DirectPut: + // Provider applications access the delivery stream directly. // - // * + // * // KinesisStreamAsSource: The delivery stream uses a Kinesis data stream as a // source. // @@ -1722,29 +1722,29 @@ type VpcConfiguration struct { // that the role trusts the Kinesis Data Firehose service principal and that it // grants the following permissions: // - // * ec2:DescribeVpcs + // * ec2:DescribeVpcs // - // * + // * // ec2:DescribeVpcAttribute // - // * ec2:DescribeSubnets + // * ec2:DescribeSubnets // - // * - // ec2:DescribeSecurityGroups + // * ec2:DescribeSecurityGroups // - // * ec2:DescribeNetworkInterfaces + // * + // ec2:DescribeNetworkInterfaces // - // * - // ec2:CreateNetworkInterface + // * ec2:CreateNetworkInterface // - // * ec2:CreateNetworkInterfacePermission + // * + // ec2:CreateNetworkInterfacePermission // - // * - // ec2:DeleteNetworkInterface + // * ec2:DeleteNetworkInterface // - // If you revoke these permissions after you create the - // delivery stream, Kinesis Data Firehose can't scale out by creating more ENIs - // when necessary. You might therefore see a degradation in performance. + // If you + // revoke these permissions after you create the delivery stream, Kinesis Data + // Firehose can't scale out by creating more ENIs when necessary. You might + // therefore see a degradation in performance. // // This member is required. RoleARN *string @@ -1792,24 +1792,24 @@ type VpcConfigurationDescription struct { // the Kinesis Data Firehose service principal and that it grants the following // permissions: // - // * ec2:DescribeVpcs + // * ec2:DescribeVpcs // - // * ec2:DescribeVpcAttribute + // * ec2:DescribeVpcAttribute // - // * + // * // ec2:DescribeSubnets // - // * ec2:DescribeSecurityGroups + // * ec2:DescribeSecurityGroups // - // * + // * // ec2:DescribeNetworkInterfaces // - // * ec2:CreateNetworkInterface + // * ec2:CreateNetworkInterface // - // * + // * // ec2:CreateNetworkInterfacePermission // - // * ec2:DeleteNetworkInterface + // * ec2:DeleteNetworkInterface // // If you // revoke these permissions after you create the delivery stream, Kinesis Data diff --git a/service/fms/api_op_DeletePolicy.go b/service/fms/api_op_DeletePolicy.go index 12a13df5a62..5d18f962ab6 100644 --- a/service/fms/api_op_DeletePolicy.go +++ b/service/fms/api_op_DeletePolicy.go @@ -37,32 +37,32 @@ type DeletePolicyInput struct { // If True, the request performs cleanup according to the policy type. For AWS WAF // and Shield Advanced policies, the cleanup does the following: // - // * Deletes - // rule groups created by AWS Firewall Manager + // * Deletes rule + // groups created by AWS Firewall Manager // - // * Removes web ACLs from - // in-scope resources + // * Removes web ACLs from in-scope + // resources // - // * Deletes web ACLs that contain no rules or rule - // groups + // * Deletes web ACLs that contain no rules or rule groups // - // For security group policies, the cleanup does the following for each - // security group in the policy: + // For security + // group policies, the cleanup does the following for each security group in the + // policy: // - // * Disassociates the security group from - // in-scope resources + // * Disassociates the security group from in-scope resources // - // * Deletes the security group if it was created through - // Firewall Manager and if it's no longer associated with any resources through - // another policy + // * Deletes + // the security group if it was created through Firewall Manager and if it's no + // longer associated with any resources through another policy // - // After the cleanup, in-scope resources are no longer protected by - // web ACLs in this policy. Protection of out-of-scope resources remains unchanged. - // Scope is determined by tags that you create and accounts that you associate with - // the policy. When creating the policy, if you specify that only resources in - // specific accounts or with specific tags are in scope of the policy, those - // accounts and resources are handled by the policy. All others are out of scope. - // If you don't specify tags or accounts, all resources are in scope. + // After the cleanup, + // in-scope resources are no longer protected by web ACLs in this policy. + // Protection of out-of-scope resources remains unchanged. Scope is determined by + // tags that you create and accounts that you associate with the policy. When + // creating the policy, if you specify that only resources in specific accounts or + // with specific tags are in scope of the policy, those accounts and resources are + // handled by the policy. All others are out of scope. If you don't specify tags or + // accounts, all resources are in scope. DeleteAllPolicyResources *bool } diff --git a/service/fms/api_op_GetProtectionStatus.go b/service/fms/api_op_GetProtectionStatus.go index 1a1ede0dbc8..c554a40efa2 100644 --- a/service/fms/api_op_GetProtectionStatus.go +++ b/service/fms/api_op_GetProtectionStatus.go @@ -75,20 +75,20 @@ type GetProtectionStatusOutput struct { // Details about the attack, including the following: // - // * Attack type + // * Attack type // - // * - // Account ID + // * Account + // ID // - // * ARN of the resource attacked + // * ARN of the resource attacked // - // * Start time of the attack + // * Start time of the attack // + // * End time of + // the attack (ongoing attacks will not have an end time) // - // * End time of the attack (ongoing attacks will not have an end time) - // - // The - // details are in JSON format. + // The details are in JSON + // format. Data *string // If you have more objects than the number that you specified for MaxResults in diff --git a/service/fms/api_op_PutPolicy.go b/service/fms/api_op_PutPolicy.go index 7bce07864d7..0e2a451c4ce 100644 --- a/service/fms/api_op_PutPolicy.go +++ b/service/fms/api_op_PutPolicy.go @@ -14,25 +14,25 @@ import ( // Creates an AWS Firewall Manager policy. Firewall Manager provides the following // types of policies: // -// * A Shield Advanced policy, which applies Shield -// Advanced protection to specified accounts and resources +// * A Shield Advanced policy, which applies Shield Advanced +// protection to specified accounts and resources // -// * An AWS WAF policy -// (type WAFV2), which defines rule groups to run first in the corresponding AWS -// WAF web ACL and rule groups to run last in the web ACL. +// * An AWS WAF policy (type +// WAFV2), which defines rule groups to run first in the corresponding AWS WAF web +// ACL and rule groups to run last in the web ACL. // -// * An AWS WAF -// Classic policy (type WAF), which defines a rule group. +// * An AWS WAF Classic policy +// (type WAF), which defines a rule group. // -// * A security group -// policy, which manages VPC security groups across your AWS organization. +// * A security group policy, which +// manages VPC security groups across your AWS organization. // -// Each -// policy is specific to one of the types. If you want to enforce more than one -// policy type across accounts, create multiple policies. You can create multiple -// policies for each type. You must be subscribed to Shield Advanced to create a -// Shield Advanced policy. For more information about subscribing to Shield -// Advanced, see CreateSubscription +// Each policy is +// specific to one of the types. If you want to enforce more than one policy type +// across accounts, create multiple policies. You can create multiple policies for +// each type. You must be subscribed to Shield Advanced to create a Shield Advanced +// policy. For more information about subscribing to Shield Advanced, see +// CreateSubscription // (https://docs.aws.amazon.com/waf/latest/DDOSAPIReference/API_CreateSubscription.html). func (c *Client) PutPolicy(ctx context.Context, params *PutPolicyInput, optFns ...func(*Options)) (*PutPolicyOutput, error) { if params == nil { diff --git a/service/fms/types/enums.go b/service/fms/types/enums.go index 32517880d43..ae28984bb7d 100644 --- a/service/fms/types/enums.go +++ b/service/fms/types/enums.go @@ -30,8 +30,8 @@ type CustomerPolicyScopeIdType string // Enum values for CustomerPolicyScopeIdType const ( - CustomerPolicyScopeIdTypeAccount CustomerPolicyScopeIdType = "ACCOUNT" - CustomerPolicyScopeIdTypeOrg_unit CustomerPolicyScopeIdType = "ORG_UNIT" + CustomerPolicyScopeIdTypeAccount CustomerPolicyScopeIdType = "ACCOUNT" + CustomerPolicyScopeIdTypeOrgUnit CustomerPolicyScopeIdType = "ORG_UNIT" ) // Values returns all known values for CustomerPolicyScopeIdType. Note that this @@ -106,12 +106,12 @@ type SecurityServiceType string // Enum values for SecurityServiceType const ( - SecurityServiceTypeWaf SecurityServiceType = "WAF" - SecurityServiceTypeWafv2 SecurityServiceType = "WAFV2" - SecurityServiceTypeShield_advanced SecurityServiceType = "SHIELD_ADVANCED" - SecurityServiceTypeSecurity_groups_common SecurityServiceType = "SECURITY_GROUPS_COMMON" - SecurityServiceTypeSecurity_groups_content_audit SecurityServiceType = "SECURITY_GROUPS_CONTENT_AUDIT" - SecurityServiceTypeSecurity_groups_usage_audit SecurityServiceType = "SECURITY_GROUPS_USAGE_AUDIT" + SecurityServiceTypeWaf SecurityServiceType = "WAF" + SecurityServiceTypeWafv2 SecurityServiceType = "WAFV2" + SecurityServiceTypeShieldAdvanced SecurityServiceType = "SHIELD_ADVANCED" + SecurityServiceTypeSecurityGroupsCommon SecurityServiceType = "SECURITY_GROUPS_COMMON" + SecurityServiceTypeSecurityGroupsContentAudit SecurityServiceType = "SECURITY_GROUPS_CONTENT_AUDIT" + SecurityServiceTypeSecurityGroupsUsageAudit SecurityServiceType = "SECURITY_GROUPS_USAGE_AUDIT" ) // Values returns all known values for SecurityServiceType. Note that this can be diff --git a/service/fms/types/types.go b/service/fms/types/types.go index c7619519cca..72fcbc034a3 100644 --- a/service/fms/types/types.go +++ b/service/fms/types/types.go @@ -211,17 +211,17 @@ type Policy struct { // specified by the ExcludeMap. You can specify account IDs, OUs, or a // combination: // - // * Specify account IDs by setting the key to ACCOUNT. For - // example, the following is a valid map: {“ACCOUNT” : [“accountID1”, - // “accountID2”]}. + // * Specify account IDs by setting the key to ACCOUNT. For example, + // the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. // - // * Specify OUs by setting the key to ORG_UNIT. For example, - // the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. + // * + // Specify OUs by setting the key to ORG_UNIT. For example, the following is a + // valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. // - // * - // Specify accounts and OUs together in a single map, separated with a comma. For - // example, the following is a valid map: {“ACCOUNT” : [“accountID1”, - // “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. + // * Specify accounts and OUs + // together in a single map, separated with a comma. For example, the following is + // a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, + // “ouid112”]}. ExcludeMap map[string][]*string // Specifies the AWS account IDs and AWS Organizations organizational units (OUs) @@ -235,17 +235,17 @@ type Policy struct { // specified by the ExcludeMap. You can specify account IDs, OUs, or a // combination: // - // * Specify account IDs by setting the key to ACCOUNT. For - // example, the following is a valid map: {“ACCOUNT” : [“accountID1”, - // “accountID2”]}. + // * Specify account IDs by setting the key to ACCOUNT. For example, + // the following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. // - // * Specify OUs by setting the key to ORG_UNIT. For example, - // the following is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. + // * + // Specify OUs by setting the key to ORG_UNIT. For example, the following is a + // valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. // - // * - // Specify accounts and OUs together in a single map, separated with a comma. For - // example, the following is a valid map: {“ACCOUNT” : [“accountID1”, - // “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. + // * Specify accounts and OUs + // together in a single map, separated with a comma. For example, the following is + // a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”], “ORG_UNIT” : [“ouid111”, + // “ouid112”]}. IncludeMap map[string][]*string // The ID of the AWS Firewall Manager policy. @@ -496,23 +496,22 @@ type SecurityServicePolicyData struct { // Details about the service that are specific to the service type, in JSON format. // For service type SHIELD_ADVANCED, this is an empty string. // - // * Example: + // * Example: // WAFV2"ManagedServiceData": // "{\"type\":\"WAFV2\",\"defaultAction\":{\"type\":\"ALLOW\"},\"preProcessRuleGroups\":[{\"managedRuleGroupIdentifier\":null,\"ruleGroupArn\":\"rulegrouparn\",\"overrideAction\":{\"type\":\"COUNT\"},\"excludeRules\":[{\"name\":\"EntityName\"}],\"ruleGroupType\":\"RuleGroup\"}],\"postProcessRuleGroups\":[{\"managedRuleGroupIdentifier\":{\"managedRuleGroupName\":\"AWSManagedRulesAdminProtectionRuleSet\",\"vendorName\":\"AWS\"},\"ruleGroupArn\":\"rulegrouparn\",\"overrideAction\":{\"type\":\"NONE\"},\"excludeRules\":[],\"ruleGroupType\":\"ManagedRuleGroup\"}],\"overrideCustomerWebACLAssociation\":false}" // - // - // * Example: WAF Classic"ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": + // * + // Example: WAF Classic"ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": // [{\"id\": \"12345678-1bcd-9012-efga-0987654321ab\", \"overrideAction\" : // {\"type\": \"COUNT\"}}], \"defaultAction\": {\"type\": \"BLOCK\"}} // - // * - // Example: + // * Example: // SECURITY_GROUPS_COMMON"SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_COMMON","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_COMMON\",\"revertManualSecurityGroupChanges\":false,\"exclusiveResourceSecurityGroupManagement\":false, // \"applyToAllEC2InstanceENIs\":false,\"securityGroups\":[{\"id\":\" // sg-000e55995d61a06bd\"}]}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} // - // - // * Example: + // * + // Example: // SECURITY_GROUPS_CONTENT_AUDIT"SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_CONTENT_AUDIT","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"securityGroups\":[{\"id\":\" // sg-000e55995d61a06bd // \"}],\"securityGroupAction\":{\"type\":\"ALLOW\"}}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} @@ -522,7 +521,7 @@ type SecurityServicePolicyData struct { // contain a value or a range that matches a rule value or range in the policy // security group. // - // * Example: + // * Example: // SECURITY_GROUPS_USAGE_AUDIT"SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_USAGE_AUDIT","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_USAGE_AUDIT\",\"deleteUnusedSecurityGroups\":true,\"coalesceRedundantSecurityGroups\":true}"},"RemediationEnabled":false,"Resou // rceType":"AWS::EC2::SecurityGroup"} ManagedServiceData *string diff --git a/service/forecast/api_op_CreateDataset.go b/service/forecast/api_op_CreateDataset.go index fb50353ce61..058fdcb0e6b 100644 --- a/service/forecast/api_op_CreateDataset.go +++ b/service/forecast/api_op_CreateDataset.go @@ -15,24 +15,24 @@ import ( // provide helps Forecast understand how to consume the data for model training. // This includes the following: // -// * DataFrequency - How frequently your -// historical time-series data is collected. +// * DataFrequency - How frequently your historical +// time-series data is collected. // -// * Domain and DatasetType - Each -// dataset has an associated dataset domain and a type within the domain. Amazon -// Forecast provides a list of predefined domains and types within each domain. For -// each unique dataset domain and type within the domain, Amazon Forecast requires -// your data to include a minimum set of predefined fields. +// * Domain and DatasetType - Each dataset has an +// associated dataset domain and a type within the domain. Amazon Forecast provides +// a list of predefined domains and types within each domain. For each unique +// dataset domain and type within the domain, Amazon Forecast requires your data to +// include a minimum set of predefined fields. // -// * Schema - A -// schema specifies the fields in the dataset, including the field name and data -// type. +// * Schema - A schema specifies the +// fields in the dataset, including the field name and data type. // -// After creating a dataset, you import your training data into it and add -// the dataset to a dataset group. You use the dataset group to create a predictor. -// For more information, see howitworks-datasets-groups. To get a list of all your -// datasets, use the ListDatasets operation. For example Forecast datasets, see the -// Amazon Forecast Sample GitHub repository +// After creating a +// dataset, you import your training data into it and add the dataset to a dataset +// group. You use the dataset group to create a predictor. For more information, +// see howitworks-datasets-groups. To get a list of all your datasets, use the +// ListDatasets operation. For example Forecast datasets, see the Amazon Forecast +// Sample GitHub repository // (https://github.com/aws-samples/amazon-forecast-samples). The Status of a // dataset must be ACTIVE before you can import training data. Use the // DescribeDataset operation to get the status. @@ -99,33 +99,32 @@ type CreateDatasetInput struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50. + // * Maximum number of + // tags per resource - 50. // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8. - // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8. // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in + // UTF-8. // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. // - // * Tag keys and values are case sensitive. + // * Tag keys and values are + // case sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for keys as it is reserved - // for AWS use. You cannot edit or delete tag keys with this prefix. Values can - // have this prefix. If a tag value has aws as its prefix but the key does not, - // then Forecast considers it to be a user tag and will count against the limit of - // 50 tags. Tags with only the key prefix of aws do not count against your tags per - // resource limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag value + // has aws as its prefix but the key does not, then Forecast considers it to be a + // user tag and will count against the limit of 50 tags. Tags with only the key + // prefix of aws do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/forecast/api_op_CreateDatasetGroup.go b/service/forecast/api_op_CreateDatasetGroup.go index 7a4e9ab185a..b4e74b13e90 100644 --- a/service/forecast/api_op_CreateDatasetGroup.go +++ b/service/forecast/api_op_CreateDatasetGroup.go @@ -61,33 +61,32 @@ type CreateDatasetGroupInput struct { // and organize them. Each tag consists of a key and an optional value, both of // which you define. The following basic restrictions apply to tags: // - // * Maximum + // * Maximum // number of tags per resource - 50. // - // * For each resource, each tag key must be + // * For each resource, each tag key must be // unique, and each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8. + // * Maximum key length - 128 + // Unicode characters in UTF-8. // - // * Maximum value length - 256 Unicode - // characters in UTF-8. + // * Maximum value length - 256 Unicode characters in + // UTF-8. // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. // + // * Tag keys and values are + // case sensitive. // - // * Tag keys and values are case sensitive. - // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for keys as it is reserved - // for AWS use. You cannot edit or delete tag keys with this prefix. Values can - // have this prefix. If a tag value has aws as its prefix but the key does not, - // then Forecast considers it to be a user tag and will count against the limit of - // 50 tags. Tags with only the key prefix of aws do not count against your tags per - // resource limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag value + // has aws as its prefix but the key does not, then Forecast considers it to be a + // user tag and will count against the limit of 50 tags. Tags with only the key + // prefix of aws do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/forecast/api_op_CreateDatasetImportJob.go b/service/forecast/api_op_CreateDatasetImportJob.go index 60c5f2bb663..05cc40bb1ee 100644 --- a/service/forecast/api_op_CreateDatasetImportJob.go +++ b/service/forecast/api_op_CreateDatasetImportJob.go @@ -72,47 +72,47 @@ type CreateDatasetImportJobInput struct { // categorize and organize them. Each tag consists of a key and an optional value, // both of which you define. The following basic restrictions apply to tags: // - // * + // * // Maximum number of tags per resource - 50. // - // * For each resource, each tag key + // * For each resource, each tag key // must be unique, and each tag key can have only one value. // - // * Maximum key - // length - 128 Unicode characters in UTF-8. + // * Maximum key length + // - 128 Unicode characters in UTF-8. // - // * Maximum value length - 256 - // Unicode characters in UTF-8. + // * Maximum value length - 256 Unicode + // characters in UTF-8. // - // * If your tagging schema is used across - // multiple services and resources, remember that other services may have - // restrictions on allowed characters. Generally allowed characters are: letters, - // numbers, and spaces representable in UTF-8, and the following characters: + - = - // . _ : / @. + // * If your tagging schema is used across multiple services + // and resources, remember that other services may have restrictions on allowed + // characters. Generally allowed characters are: letters, numbers, and spaces + // representable in UTF-8, and the following characters: + - = . _ : / @. // - // * Tag keys and values are case sensitive. + // * Tag + // keys and values are case sensitive. // - // * Do not use - // aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys - // as it is reserved for AWS use. You cannot edit or delete tag keys with this - // prefix. Values can have this prefix. If a tag value has aws as its prefix but - // the key does not, then Forecast considers it to be a user tag and will count - // against the limit of 50 tags. Tags with only the key prefix of aws do not count - // against your tags per resource limit. + // * Do not use aws:, AWS:, or any upper or + // lowercase combination of such as a prefix for keys as it is reserved for AWS + // use. You cannot edit or delete tag keys with this prefix. Values can have this + // prefix. If a tag value has aws as its prefix but the key does not, then Forecast + // considers it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. Tags []*types.Tag // The format of timestamps in the dataset. The format that you specify depends on // the DataFrequency specified when the dataset was created. The following formats // are supported // - // * "yyyy-MM-dd" For the following data frequencies: Y, M, W, - // and D + // * "yyyy-MM-dd" For the following data frequencies: Y, M, W, and + // D // - // * "yyyy-MM-dd HH:mm:ss" For the following data frequencies: H, 30min, - // 15min, and 1min; and optionally, for: Y, M, W, and D + // * "yyyy-MM-dd HH:mm:ss" For the following data frequencies: H, 30min, 15min, + // and 1min; and optionally, for: Y, M, W, and D // - // If the format isn't - // specified, Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss". + // If the format isn't specified, + // Amazon Forecast expects the format to be "yyyy-MM-dd HH:mm:ss". TimestampFormat *string } diff --git a/service/forecast/api_op_CreateForecast.go b/service/forecast/api_op_CreateForecast.go index 37d7e3f827e..7ad5dd6895b 100644 --- a/service/forecast/api_op_CreateForecast.go +++ b/service/forecast/api_op_CreateForecast.go @@ -62,33 +62,32 @@ type CreateForecastInput struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50. + // * Maximum number of + // tags per resource - 50. // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8. - // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8. // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in + // UTF-8. // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. // - // * Tag keys and values are case sensitive. + // * Tag keys and values are + // case sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for keys as it is reserved - // for AWS use. You cannot edit or delete tag keys with this prefix. Values can - // have this prefix. If a tag value has aws as its prefix but the key does not, - // then Forecast considers it to be a user tag and will count against the limit of - // 50 tags. Tags with only the key prefix of aws do not count against your tags per - // resource limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag value + // has aws as its prefix but the key does not, then Forecast considers it to be a + // user tag and will count against the limit of 50 tags. Tags with only the key + // prefix of aws do not count against your tags per resource limit. Tags []*types.Tag } diff --git a/service/forecast/api_op_CreateForecastExportJob.go b/service/forecast/api_op_CreateForecastExportJob.go index eeff4506a76..fcaf937b7c1 100644 --- a/service/forecast/api_op_CreateForecastExportJob.go +++ b/service/forecast/api_op_CreateForecastExportJob.go @@ -62,33 +62,33 @@ type CreateForecastExportJobInput struct { // categorize and organize them. Each tag consists of a key and an optional value, // both of which you define. The following basic restrictions apply to tags: // - // * + // * // Maximum number of tags per resource - 50. // - // * For each resource, each tag key + // * For each resource, each tag key // must be unique, and each tag key can have only one value. // - // * Maximum key - // length - 128 Unicode characters in UTF-8. + // * Maximum key length + // - 128 Unicode characters in UTF-8. // - // * Maximum value length - 256 - // Unicode characters in UTF-8. + // * Maximum value length - 256 Unicode + // characters in UTF-8. // - // * If your tagging schema is used across - // multiple services and resources, remember that other services may have - // restrictions on allowed characters. Generally allowed characters are: letters, - // numbers, and spaces representable in UTF-8, and the following characters: + - = - // . _ : / @. + // * If your tagging schema is used across multiple services + // and resources, remember that other services may have restrictions on allowed + // characters. Generally allowed characters are: letters, numbers, and spaces + // representable in UTF-8, and the following characters: + - = . _ : / @. // - // * Tag keys and values are case sensitive. + // * Tag + // keys and values are case sensitive. // - // * Do not use - // aws:, AWS:, or any upper or lowercase combination of such as a prefix for keys - // as it is reserved for AWS use. You cannot edit or delete tag keys with this - // prefix. Values can have this prefix. If a tag value has aws as its prefix but - // the key does not, then Forecast considers it to be a user tag and will count - // against the limit of 50 tags. Tags with only the key prefix of aws do not count - // against your tags per resource limit. + // * Do not use aws:, AWS:, or any upper or + // lowercase combination of such as a prefix for keys as it is reserved for AWS + // use. You cannot edit or delete tag keys with this prefix. Values can have this + // prefix. If a tag value has aws as its prefix but the key does not, then Forecast + // considers it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. Tags []*types.Tag } diff --git a/service/forecast/api_op_CreatePredictor.go b/service/forecast/api_op_CreatePredictor.go index 8e4b0646b87..613f8debfad 100644 --- a/service/forecast/api_op_CreatePredictor.go +++ b/service/forecast/api_op_CreatePredictor.go @@ -34,19 +34,19 @@ import ( // see EvaluationResult. When AutoML is enabled, the following properties are // disallowed: // -// * AlgorithmArn +// * AlgorithmArn // -// * HPOConfig +// * HPOConfig // -// * PerformHPO +// * PerformHPO // -// * -// TrainingParameters +// * TrainingParameters // -// To get a list of all of your predictors, use the -// ListPredictors operation. Before you can use the predictor to create a forecast, -// the Status of the predictor must be ACTIVE, signifying that training has -// completed. To get the status, use the DescribePredictor operation. +// To +// get a list of all of your predictors, use the ListPredictors operation. Before +// you can use the predictor to create a forecast, the Status of the predictor must +// be ACTIVE, signifying that training has completed. To get the status, use the +// DescribePredictor operation. func (c *Client) CreatePredictor(ctx context.Context, params *CreatePredictorInput, optFns ...func(*Options)) (*CreatePredictorOutput, error) { if params == nil { params = &CreatePredictorInput{} @@ -93,19 +93,19 @@ type CreatePredictorInput struct { // The Amazon Resource Name (ARN) of the algorithm to use for model training. // Required if PerformAutoML is not set to true. Supported algorithms: // - // * + // * // arn:aws:forecast:::algorithm/ARIMA // - // * - // arn:aws:forecast:::algorithm/Deep_AR_Plus Supports hyperparameter optimization - // (HPO) + // * arn:aws:forecast:::algorithm/Deep_AR_Plus + // Supports hyperparameter optimization (HPO) // - // * arn:aws:forecast:::algorithm/ETS + // * + // arn:aws:forecast:::algorithm/ETS // - // * - // arn:aws:forecast:::algorithm/NPTS + // * arn:aws:forecast:::algorithm/NPTS // - // * arn:aws:forecast:::algorithm/Prophet + // * + // arn:aws:forecast:::algorithm/Prophet AlgorithmArn *string // An AWS Key Management Service (KMS) key and the AWS Identity and Access @@ -145,40 +145,39 @@ type CreatePredictorInput struct { // to specify an algorithm and PerformAutoML must be false. The following algorithm // supports HPO: // - // * DeepAR+ + // * DeepAR+ PerformHPO *bool // The optional metadata that you apply to the predictor to help you categorize and // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // - // * Maximum - // number of tags per resource - 50. + // * Maximum number of + // tags per resource - 50. // - // * For each resource, each tag key must be - // unique, and each tag key can have only one value. + // * For each resource, each tag key must be unique, and + // each tag key can have only one value. // - // * Maximum key length - - // 128 Unicode characters in UTF-8. - // - // * Maximum value length - 256 Unicode + // * Maximum key length - 128 Unicode // characters in UTF-8. // - // * If your tagging schema is used across multiple - // services and resources, remember that other services may have restrictions on - // allowed characters. Generally allowed characters are: letters, numbers, and - // spaces representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in + // UTF-8. // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable in + // UTF-8, and the following characters: + - = . _ : / @. // - // * Tag keys and values are case sensitive. + // * Tag keys and values are + // case sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for keys as it is reserved - // for AWS use. You cannot edit or delete tag keys with this prefix. Values can - // have this prefix. If a tag value has aws as its prefix but the key does not, - // then Forecast considers it to be a user tag and will count against the limit of - // 50 tags. Tags with only the key prefix of aws do not count against your tags per - // resource limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination + // of such as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag value + // has aws as its prefix but the key does not, then Forecast considers it to be a + // user tag and will count against the limit of 50 tags. Tags with only the key + // prefix of aws do not count against your tags per resource limit. Tags []*types.Tag // The hyperparameters to override for model training. The hyperparameters that you diff --git a/service/forecast/api_op_DescribeDataset.go b/service/forecast/api_op_DescribeDataset.go index 561a3e67368..9b1292191e7 100644 --- a/service/forecast/api_op_DescribeDataset.go +++ b/service/forecast/api_op_DescribeDataset.go @@ -16,12 +16,12 @@ import ( // In addition to listing the parameters specified in the CreateDataset request, // this operation includes the following dataset properties: // -// * CreationTime +// * CreationTime // +// * +// LastModificationTime // -// * LastModificationTime -// -// * Status +// * Status func (c *Client) DescribeDataset(ctx context.Context, params *DescribeDatasetInput, optFns ...func(*Options)) (*DescribeDatasetOutput, error) { if params == nil { params = &DescribeDatasetInput{} @@ -84,18 +84,18 @@ type DescribeDatasetOutput struct { // The status of the dataset. States include: // - // * ACTIVE + // * ACTIVE // - // * CREATE_PENDING, + // * CREATE_PENDING, // CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, DELETE_IN_PROGRESS, + // * DELETE_PENDING, DELETE_IN_PROGRESS, // DELETE_FAILED // - // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED // - // The - // UPDATE states apply while data is imported to the dataset from a call to the + // The UPDATE + // states apply while data is imported to the dataset from a call to the // CreateDatasetImportJob operation and reflect the status of the dataset import // job. For example, when the import job status is CREATE_IN_PROGRESS, the status // of the dataset is UPDATE_IN_PROGRESS. The Status of the dataset must be ACTIVE diff --git a/service/forecast/api_op_DescribeDatasetGroup.go b/service/forecast/api_op_DescribeDatasetGroup.go index 9e953433972..2d895b2dee5 100644 --- a/service/forecast/api_op_DescribeDatasetGroup.go +++ b/service/forecast/api_op_DescribeDatasetGroup.go @@ -16,15 +16,14 @@ import ( // addition to listing the parameters provided in the CreateDatasetGroup request, // this operation includes the following properties: // -// * DatasetArns - The -// datasets belonging to the group. +// * DatasetArns - The datasets +// belonging to the group. // -// * CreationTime +// * CreationTime // -// * -// LastModificationTime +// * LastModificationTime // -// * Status +// * Status func (c *Client) DescribeDatasetGroup(ctx context.Context, params *DescribeDatasetGroupInput, optFns ...func(*Options)) (*DescribeDatasetGroupOutput, error) { if params == nil { params = &DescribeDatasetGroupInput{} @@ -73,20 +72,20 @@ type DescribeDatasetGroupOutput struct { // The status of the dataset group. States include: // - // * ACTIVE + // * ACTIVE // - // * - // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // * CREATE_PENDING, + // CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, - // DELETE_IN_PROGRESS, DELETE_FAILED + // * DELETE_PENDING, DELETE_IN_PROGRESS, + // DELETE_FAILED // - // * UPDATE_PENDING, UPDATE_IN_PROGRESS, - // UPDATE_FAILED + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED // - // The UPDATE states apply when you call the UpdateDatasetGroup - // operation. The Status of the dataset group must be ACTIVE before you can use the - // dataset group to create a predictor. + // The UPDATE + // states apply when you call the UpdateDatasetGroup operation. The Status of the + // dataset group must be ACTIVE before you can use the dataset group to create a + // predictor. Status *string // Metadata pertaining to the operation's result. diff --git a/service/forecast/api_op_DescribeDatasetImportJob.go b/service/forecast/api_op_DescribeDatasetImportJob.go index 9ab4c971be6..f08a6e4b835 100644 --- a/service/forecast/api_op_DescribeDatasetImportJob.go +++ b/service/forecast/api_op_DescribeDatasetImportJob.go @@ -17,19 +17,19 @@ import ( // CreateDatasetImportJob request, this operation includes the following // properties: // -// * CreationTime +// * CreationTime // -// * LastModificationTime +// * LastModificationTime // -// * DataSize +// * DataSize // +// * +// FieldStatistics // -// * FieldStatistics +// * Status // -// * Status -// -// * Message - If an error occurred, -// information about the error. +// * Message - If an error occurred, information about +// the error. func (c *Client) DescribeDatasetImportJob(ctx context.Context, params *DescribeDatasetImportJobInput, optFns ...func(*Options)) (*DescribeDatasetImportJobOutput, error) { if params == nil { params = &DescribeDatasetImportJobInput{} @@ -82,13 +82,13 @@ type DescribeDatasetImportJobOutput struct { // The last time that the dataset was modified. The time depends on the status of // the job, as follows: // - // * CREATE_PENDING - The same time as CreationTime. - // + // * CREATE_PENDING - The same time as CreationTime. // - // * CREATE_IN_PROGRESS - The current timestamp. + // * + // CREATE_IN_PROGRESS - The current timestamp. // - // * ACTIVE or CREATE_FAILED - - // When the job finished or failed. + // * ACTIVE or CREATE_FAILED - When + // the job finished or failed. LastModificationTime *time.Time // If an error occurred, an informational message about the error. @@ -98,12 +98,12 @@ type DescribeDatasetImportJobOutput struct { // the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the // status of the dataset is UPDATE_IN_PROGRESS. States include: // - // * ACTIVE - // + // * ACTIVE // - // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // * + // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, + // * DELETE_PENDING, // DELETE_IN_PROGRESS, DELETE_FAILED Status *string @@ -111,11 +111,11 @@ type DescribeDatasetImportJobOutput struct { // the DataFrequency specified when the dataset was created. The following formats // are supported // - // * "yyyy-MM-dd" For the following data frequencies: Y, M, W, - // and D + // * "yyyy-MM-dd" For the following data frequencies: Y, M, W, and + // D // - // * "yyyy-MM-dd HH:mm:ss" For the following data frequencies: H, 30min, - // 15min, and 1min; and optionally, for: Y, M, W, and D + // * "yyyy-MM-dd HH:mm:ss" For the following data frequencies: H, 30min, 15min, + // and 1min; and optionally, for: Y, M, W, and D TimestampFormat *string // Metadata pertaining to the operation's result. diff --git a/service/forecast/api_op_DescribeForecast.go b/service/forecast/api_op_DescribeForecast.go index a442ab2bea2..61bb7607628 100644 --- a/service/forecast/api_op_DescribeForecast.go +++ b/service/forecast/api_op_DescribeForecast.go @@ -15,17 +15,17 @@ import ( // listing the properties provided in the CreateForecast request, this operation // lists the following properties: // -// * DatasetGroupArn - The dataset group that +// * DatasetGroupArn - The dataset group that // provided the training data. // -// * CreationTime -// -// * LastModificationTime +// * CreationTime // +// * LastModificationTime // // * Status // -// * Message - If an error occurred, information about the error. +// * +// Message - If an error occurred, information about the error. func (c *Client) DescribeForecast(ctx context.Context, params *DescribeForecastInput, optFns ...func(*Options)) (*DescribeForecastOutput, error) { if params == nil { params = &DescribeForecastInput{} @@ -80,12 +80,12 @@ type DescribeForecastOutput struct { // The status of the forecast. States include: // - // * ACTIVE + // * ACTIVE // - // * CREATE_PENDING, + // * CREATE_PENDING, // CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, DELETE_IN_PROGRESS, + // * DELETE_PENDING, DELETE_IN_PROGRESS, // DELETE_FAILED // // The Status of the forecast must be ACTIVE before you can query or diff --git a/service/forecast/api_op_DescribeForecastExportJob.go b/service/forecast/api_op_DescribeForecastExportJob.go index ea780667b9f..b56e0b0a267 100644 --- a/service/forecast/api_op_DescribeForecastExportJob.go +++ b/service/forecast/api_op_DescribeForecastExportJob.go @@ -17,14 +17,14 @@ import ( // CreateForecastExportJob request, this operation lists the following // properties: // -// * CreationTime +// * CreationTime // -// * LastModificationTime +// * LastModificationTime // -// * Status +// * Status // -// * -// Message - If an error occurred, information about the error. +// * Message - If an +// error occurred, information about the error. func (c *Client) DescribeForecastExportJob(ctx context.Context, params *DescribeForecastExportJobInput, optFns ...func(*Options)) (*DescribeForecastExportJobOutput, error) { if params == nil { params = &DescribeForecastExportJobInput{} @@ -74,12 +74,12 @@ type DescribeForecastExportJobOutput struct { // The status of the forecast export job. States include: // - // * ACTIVE + // * ACTIVE // - // * + // * // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, + // * DELETE_PENDING, // DELETE_IN_PROGRESS, DELETE_FAILED // // The Status of the forecast export job must be diff --git a/service/forecast/api_op_DescribePredictor.go b/service/forecast/api_op_DescribePredictor.go index 581ba13f6ea..0abfac3d5c4 100644 --- a/service/forecast/api_op_DescribePredictor.go +++ b/service/forecast/api_op_DescribePredictor.go @@ -16,21 +16,21 @@ import ( // to listing the properties provided in the CreatePredictor request, this // operation lists the following properties: // -// * DatasetImportJobArns - The -// dataset import jobs used to import training data. +// * DatasetImportJobArns - The dataset +// import jobs used to import training data. // -// * AutoMLAlgorithmArns - -// If AutoML is performed, the algorithms that were evaluated. +// * AutoMLAlgorithmArns - If AutoML is +// performed, the algorithms that were evaluated. // -// * -// CreationTime +// * CreationTime // -// * LastModificationTime +// * +// LastModificationTime // -// * Status +// * Status // -// * Message - If an -// error occurred, information about the error. +// * Message - If an error occurred, information +// about the error. func (c *Client) DescribePredictor(ctx context.Context, params *DescribePredictorInput, optFns ...func(*Options)) (*DescribePredictorOutput, error) { if params == nil { params = &DescribePredictorInput{} @@ -121,19 +121,19 @@ type DescribePredictorOutput struct { // The status of the predictor. States include: // - // * ACTIVE + // * ACTIVE // - // * - // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // * CREATE_PENDING, + // CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, - // DELETE_IN_PROGRESS, DELETE_FAILED + // * DELETE_PENDING, DELETE_IN_PROGRESS, + // DELETE_FAILED // - // * UPDATE_PENDING, UPDATE_IN_PROGRESS, - // UPDATE_FAILED + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED // - // The Status of the predictor must be ACTIVE before you can use the - // predictor to create a forecast. + // The Status + // of the predictor must be ACTIVE before you can use the predictor to create a + // forecast. Status *string // The default training parameters or overrides selected during model training. If diff --git a/service/forecast/api_op_ListDatasetImportJobs.go b/service/forecast/api_op_ListDatasetImportJobs.go index dec35d6dc9d..c223e6bd76a 100644 --- a/service/forecast/api_op_ListDatasetImportJobs.go +++ b/service/forecast/api_op_ListDatasetImportJobs.go @@ -39,19 +39,19 @@ type ListDatasetImportJobsInput struct { // respectively. The match statement consists of a key and a value. Filter // properties // - // * Condition - The condition to apply. Valid values are IS and + // * Condition - The condition to apply. Valid values are IS and // IS_NOT. To include the datasets that match the statement, specify IS. To exclude // matching datasets, specify IS_NOT. // - // * Key - The name of the parameter to - // filter on. Valid values are DatasetArn and Status. + // * Key - The name of the parameter to filter + // on. Valid values are DatasetArn and Status. // - // * Value - The value to - // match. + // * Value - The value to match. // - // For example, to list all dataset import jobs whose status is ACTIVE, you - // specify the following filter: "Filters": [ { "Condition": "IS", "Key": "Status", - // "Value": "ACTIVE" } ] + // For + // example, to list all dataset import jobs whose status is ACTIVE, you specify the + // following filter: "Filters": [ { "Condition": "IS", "Key": "Status", "Value": + // "ACTIVE" } ] Filters []*types.Filter // The number of items to return in the response. diff --git a/service/forecast/api_op_ListForecastExportJobs.go b/service/forecast/api_op_ListForecastExportJobs.go index 030bc9bf4ef..d392b2c3350 100644 --- a/service/forecast/api_op_ListForecastExportJobs.go +++ b/service/forecast/api_op_ListForecastExportJobs.go @@ -39,18 +39,18 @@ type ListForecastExportJobsInput struct { // list, respectively. The match statement consists of a key and a value. Filter // properties // - // * Condition - The condition to apply. Valid values are IS and + // * Condition - The condition to apply. Valid values are IS and // IS_NOT. To include the forecast export jobs that match the statement, specify // IS. To exclude matching forecast export jobs, specify IS_NOT. // - // * Key - The - // name of the parameter to filter on. Valid values are ForecastArn and Status. + // * Key - The name + // of the parameter to filter on. Valid values are ForecastArn and Status. // + // * Value + // - The value to match. // - // * Value - The value to match. - // - // For example, to list all jobs that export a - // forecast named electricityforecast, specify the following filter: "Filters": [ { + // For example, to list all jobs that export a forecast + // named electricityforecast, specify the following filter: "Filters": [ { // "Condition": "IS", "Key": "ForecastArn", "Value": // "arn:aws:forecast:us-west-2::forecast/electricityforecast" } ] Filters []*types.Filter diff --git a/service/forecast/api_op_ListForecasts.go b/service/forecast/api_op_ListForecasts.go index ba0091621a5..3afd8aef7e0 100644 --- a/service/forecast/api_op_ListForecasts.go +++ b/service/forecast/api_op_ListForecasts.go @@ -39,19 +39,19 @@ type ListForecastsInput struct { // respectively. The match statement consists of a key and a value. Filter // properties // - // * Condition - The condition to apply. Valid values are IS and + // * Condition - The condition to apply. Valid values are IS and // IS_NOT. To include the forecasts that match the statement, specify IS. To // exclude matching forecasts, specify IS_NOT. // - // * Key - The name of the - // parameter to filter on. Valid values are DatasetGroupArn, PredictorArn, and - // Status. + // * Key - The name of the parameter + // to filter on. Valid values are DatasetGroupArn, PredictorArn, and Status. // - // * Value - The value to match. + // * + // Value - The value to match. // - // For example, to list all forecasts - // whose status is not ACTIVE, you would specify: "Filters": [ { "Condition": - // "IS_NOT", "Key": "Status", "Value": "ACTIVE" } ] + // For example, to list all forecasts whose status is + // not ACTIVE, you would specify: "Filters": [ { "Condition": "IS_NOT", "Key": + // "Status", "Value": "ACTIVE" } ] Filters []*types.Filter // The number of items to return in the response. diff --git a/service/forecast/api_op_ListPredictors.go b/service/forecast/api_op_ListPredictors.go index fb1db76f962..96487e2de44 100644 --- a/service/forecast/api_op_ListPredictors.go +++ b/service/forecast/api_op_ListPredictors.go @@ -39,19 +39,19 @@ type ListPredictorsInput struct { // respectively. The match statement consists of a key and a value. Filter // properties // - // * Condition - The condition to apply. Valid values are IS and + // * Condition - The condition to apply. Valid values are IS and // IS_NOT. To include the predictors that match the statement, specify IS. To // exclude matching predictors, specify IS_NOT. // - // * Key - The name of the - // parameter to filter on. Valid values are DatasetGroupArn and Status. + // * Key - The name of the parameter + // to filter on. Valid values are DatasetGroupArn and Status. // - // * - // Value - The value to match. + // * Value - The value + // to match. // - // For example, to list all predictors whose status is - // ACTIVE, you would specify: "Filters": [ { "Condition": "IS", "Key": "Status", - // "Value": "ACTIVE" } ] + // For example, to list all predictors whose status is ACTIVE, you would + // specify: "Filters": [ { "Condition": "IS", "Key": "Status", "Value": "ACTIVE" } + // ] Filters []*types.Filter // The number of items to return in the response. diff --git a/service/forecast/api_op_TagResource.go b/service/forecast/api_op_TagResource.go index 697f00ac407..fc615a6398a 100644 --- a/service/forecast/api_op_TagResource.go +++ b/service/forecast/api_op_TagResource.go @@ -42,33 +42,32 @@ type TagResourceInput struct { // The tags to add to the resource. A tag is an array of key-value pairs. The // following basic restrictions apply to tags: // - // * Maximum number of tags per + // * Maximum number of tags per // resource - 50. // - // * For each resource, each tag key must be unique, and each - // tag key can have only one value. + // * For each resource, each tag key must be unique, and each tag + // key can have only one value. // - // * Maximum key length - 128 Unicode - // characters in UTF-8. - // - // * Maximum value length - 256 Unicode characters in + // * Maximum key length - 128 Unicode characters in // UTF-8. // - // * If your tagging schema is used across multiple services and - // resources, remember that other services may have restrictions on allowed - // characters. Generally allowed characters are: letters, numbers, and spaces - // representable in UTF-8, and the following characters: + - = . _ : / @. + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your + // tagging schema is used across multiple services and resources, remember that + // other services may have restrictions on allowed characters. Generally allowed + // characters are: letters, numbers, and spaces representable in UTF-8, and the + // following characters: + - = . _ : / @. // - // * - // Tag keys and values are case sensitive. + // * Tag keys and values are case + // sensitive. // - // * Do not use aws:, AWS:, or any - // upper or lowercase combination of such as a prefix for keys as it is reserved - // for AWS use. You cannot edit or delete tag keys with this prefix. Values can - // have this prefix. If a tag value has aws as its prefix but the key does not, - // then Forecast considers it to be a user tag and will count against the limit of - // 50 tags. Tags with only the key prefix of aws do not count against your tags per - // resource limit. + // * Do not use aws:, AWS:, or any upper or lowercase combination of + // such as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag value + // has aws as its prefix but the key does not, then Forecast considers it to be a + // user tag and will count against the limit of 50 tags. Tags with only the key + // prefix of aws do not count against your tags per resource limit. // // This member is required. Tags []*types.Tag diff --git a/service/forecast/types/enums.go b/service/forecast/types/enums.go index c8238d64ba5..19f789d88f0 100644 --- a/service/forecast/types/enums.go +++ b/service/forecast/types/enums.go @@ -28,9 +28,9 @@ type DatasetType string // Enum values for DatasetType const ( - DatasetTypeTarget_time_series DatasetType = "TARGET_TIME_SERIES" - DatasetTypeRelated_time_series DatasetType = "RELATED_TIME_SERIES" - DatasetTypeItem_metadata DatasetType = "ITEM_METADATA" + DatasetTypeTargetTimeSeries DatasetType = "TARGET_TIME_SERIES" + DatasetTypeRelatedTimeSeries DatasetType = "RELATED_TIME_SERIES" + DatasetTypeItemMetadata DatasetType = "ITEM_METADATA" ) // Values returns all known values for DatasetType. Note that this can be expanded @@ -48,13 +48,13 @@ type Domain string // Enum values for Domain const ( - DomainRetail Domain = "RETAIL" - DomainCustom Domain = "CUSTOM" - DomainInventory_planning Domain = "INVENTORY_PLANNING" - DomainEc2_capacity Domain = "EC2_CAPACITY" - DomainWork_force Domain = "WORK_FORCE" - DomainWeb_traffic Domain = "WEB_TRAFFIC" - DomainMetrics Domain = "METRICS" + DomainRetail Domain = "RETAIL" + DomainCustom Domain = "CUSTOM" + DomainInventoryPlanning Domain = "INVENTORY_PLANNING" + DomainEc2Capacity Domain = "EC2_CAPACITY" + DomainWorkForce Domain = "WORK_FORCE" + DomainWebTraffic Domain = "WEB_TRAFFIC" + DomainMetrics Domain = "METRICS" ) // Values returns all known values for Domain. Note that this can be expanded in @@ -110,8 +110,8 @@ type FilterConditionString string // Enum values for FilterConditionString const ( - FilterConditionStringIs FilterConditionString = "IS" - FilterConditionStringIs_not FilterConditionString = "IS_NOT" + FilterConditionStringIs FilterConditionString = "IS" + FilterConditionStringIsNot FilterConditionString = "IS_NOT" ) // Values returns all known values for FilterConditionString. Note that this can be diff --git a/service/forecast/types/types.go b/service/forecast/types/types.go index 4ab7aaa5bec..4c7efe1ec75 100644 --- a/service/forecast/types/types.go +++ b/service/forecast/types/types.go @@ -112,13 +112,13 @@ type DatasetImportJobSummary struct { // The last time that the dataset was modified. The time depends on the status of // the job, as follows: // - // * CREATE_PENDING - The same time as CreationTime. + // * CREATE_PENDING - The same time as CreationTime. // + // * + // CREATE_IN_PROGRESS - The current timestamp. // - // * CREATE_IN_PROGRESS - The current timestamp. - // - // * ACTIVE or CREATE_FAILED - - // When the job finished or failed. + // * ACTIVE or CREATE_FAILED - When + // the job finished or failed. LastModificationTime *time.Time // If an error occurred, an informational message about the error. @@ -128,12 +128,12 @@ type DatasetImportJobSummary struct { // the dataset. For example, when the import job status is CREATE_IN_PROGRESS, the // status of the dataset is UPDATE_IN_PROGRESS. States include: // - // * ACTIVE - // + // * ACTIVE // - // * CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // * + // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, + // * DELETE_PENDING, // DELETE_IN_PROGRESS, DELETE_FAILED Status *string } @@ -324,28 +324,27 @@ type FeaturizationMethod struct { // parameters and their valid values for the "filling" featurization method for a // Target Time Series dataset. Bold signifies the default value. // - // * - // aggregation: sum, avg, first, min, max - // - // * frontfill: none + // * aggregation: + // sum, avg, first, min, max // - // * middlefill: - // zero, nan (not a number), value, median, mean, min, max + // * frontfill: none // - // * backfill: zero, - // nan, value, median, mean, min, max + // * middlefill: zero, nan (not a + // number), value, median, mean, min, max // - // The following list shows the parameters and - // their valid values for a Related Time Series featurization method (there are no - // defaults): + // * backfill: zero, nan, value, median, + // mean, min, max // - // * middlefill: zero, value, median, mean, min, max + // The following list shows the parameters and their valid values + // for a Related Time Series featurization method (there are no defaults): // - // * - // backfill: zero, value, median, mean, min, max + // * + // middlefill: zero, value, median, mean, min, max // - // * futurefill: zero, value, + // * backfill: zero, value, // median, mean, min, max + // + // * futurefill: zero, value, median, mean, min, max FeaturizationMethodParameters map[string]*string } @@ -399,12 +398,12 @@ type ForecastExportJobSummary struct { // The status of the forecast export job. States include: // - // * ACTIVE + // * ACTIVE // - // * + // * // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, + // * DELETE_PENDING, // DELETE_IN_PROGRESS, DELETE_FAILED // // The Status of the forecast export job must be @@ -444,12 +443,12 @@ type ForecastSummary struct { // The status of the forecast. States include: // - // * ACTIVE + // * ACTIVE // - // * CREATE_PENDING, + // * CREATE_PENDING, // CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, DELETE_IN_PROGRESS, + // * DELETE_PENDING, DELETE_IN_PROGRESS, // DELETE_FAILED // // The Status of the forecast must be ACTIVE before you can query or @@ -602,19 +601,19 @@ type PredictorSummary struct { // The status of the predictor. States include: // - // * ACTIVE + // * ACTIVE // - // * - // CREATE_PENDING, CREATE_IN_PROGRESS, CREATE_FAILED + // * CREATE_PENDING, + // CREATE_IN_PROGRESS, CREATE_FAILED // - // * DELETE_PENDING, - // DELETE_IN_PROGRESS, DELETE_FAILED + // * DELETE_PENDING, DELETE_IN_PROGRESS, + // DELETE_FAILED // - // * UPDATE_PENDING, UPDATE_IN_PROGRESS, - // UPDATE_FAILED + // * UPDATE_PENDING, UPDATE_IN_PROGRESS, UPDATE_FAILED // - // The Status of the predictor must be ACTIVE before you can use the - // predictor to create a forecast. + // The Status + // of the predictor must be ACTIVE before you can use the predictor to create a + // forecast. Status *string } @@ -702,34 +701,34 @@ type Statistics struct { // are not included in the Jollyday library, but both are supported by Amazon // Forecast. Their holidays are: "IN" - INDIA // -// * JANUARY 26 - REPUBLIC DAY -// +// * JANUARY 26 - REPUBLIC DAY // -// * AUGUST 15 - INDEPENDENCE DAY +// * +// AUGUST 15 - INDEPENDENCE DAY // -// * OCTOBER 2 GANDHI'S BIRTHDAY +// * OCTOBER 2 GANDHI'S BIRTHDAY // -// "KR" - -// KOREA +// "KR" - KOREA // -// * JANUARY 1 - NEW YEAR +// * +// JANUARY 1 - NEW YEAR // -// * MARCH 1 - INDEPENDENCE MOVEMENT DAY +// * MARCH 1 - INDEPENDENCE MOVEMENT DAY // +// * MAY 5 - +// CHILDREN'S DAY // -// * MAY 5 - CHILDREN'S DAY +// * JUNE 6 - MEMORIAL DAY // -// * JUNE 6 - MEMORIAL DAY +// * AUGUST 15 - LIBERATION DAY // -// * AUGUST 15 - -// LIBERATION DAY +// * OCTOBER +// 3 - NATIONAL FOUNDATION DAY // -// * OCTOBER 3 - NATIONAL FOUNDATION DAY +// * OCTOBER 9 - HANGEUL DAY // -// * OCTOBER 9 - -// HANGEUL DAY -// -// * DECEMBER 25 - CHRISTMAS DAY +// * DECEMBER 25 - +// CHRISTMAS DAY type SupplementaryFeature struct { // The name of the feature. This must be "holiday". @@ -739,78 +738,77 @@ type SupplementaryFeature struct { // One of the following 2 letter country codes: // - // * "AR" - ARGENTINA - // - // * "AT" - // - AUSTRIA + // * "AR" - ARGENTINA // - // * "AU" - AUSTRALIA + // * "AT" - + // AUSTRIA // - // * "BE" - BELGIUM + // * "AU" - AUSTRALIA // - // * "BR" - BRAZIL + // * "BE" - BELGIUM // + // * "BR" - BRAZIL // - // * "CA" - CANADA + // * "CA" - + // CANADA // - // * "CN" - CHINA + // * "CN" - CHINA // - // * "CZ" - CZECH REPUBLIC + // * "CZ" - CZECH REPUBLIC // - // * "DK" - - // DENMARK + // * "DK" - DENMARK // - // * "EC" - ECUADOR + // * "EC" - + // ECUADOR // - // * "FI" - FINLAND + // * "FI" - FINLAND // - // * "FR" - FRANCE + // * "FR" - FRANCE // - // * - // "DE" - GERMANY + // * "DE" - GERMANY // - // * "HU" - HUNGARY + // * "HU" - + // HUNGARY // - // * "IE" - IRELAND - // - // * "IN" - INDIA + // * "IE" - IRELAND // + // * "IN" - INDIA // // * "IT" - ITALY // - // * "JP" - JAPAN - // - // * "KR" - KOREA + // * "JP" - JAPAN // - // * "LU" - - // LUXEMBOURG + // * + // "KR" - KOREA // - // * "MX" - MEXICO + // * "LU" - LUXEMBOURG // - // * "NL" - NETHERLANDS + // * "MX" - MEXICO // - // * "NO" - NORWAY + // * "NL" - NETHERLANDS // + // * "NO" + // - NORWAY // // * "PL" - POLAND // - // * "PT" - PORTUGAL + // * "PT" - PORTUGAL // - // * "RU" - RUSSIA + // * "RU" - RUSSIA // - // * "ZA" - SOUTH + // * "ZA" - SOUTH // AFRICA // - // * "ES" - SPAIN + // * "ES" - SPAIN // - // * "SE" - SWEDEN + // * "SE" - SWEDEN // - // * "CH" - SWITZERLAND + // * "CH" - SWITZERLAND // - // * - // "US" - UNITED STATES + // * "US" - UNITED + // STATES // - // * "UK" - UNITED KINGDOM + // * "UK" - UNITED KINGDOM // // This member is required. Value *string @@ -820,33 +818,32 @@ type SupplementaryFeature struct { // organize them. Each tag consists of a key and an optional value, both of which // you define. The following basic restrictions apply to tags: // -// * Maximum -// number of tags per resource - 50. -// -// * For each resource, each tag key must be -// unique, and each tag key can have only one value. +// * Maximum number of +// tags per resource - 50. // -// * Maximum key length - -// 128 Unicode characters in UTF-8. +// * For each resource, each tag key must be unique, and +// each tag key can have only one value. // -// * Maximum value length - 256 Unicode +// * Maximum key length - 128 Unicode // characters in UTF-8. // -// * If your tagging schema is used across multiple -// services and resources, remember that other services may have restrictions on -// allowed characters. Generally allowed characters are: letters, numbers, and -// spaces representable in UTF-8, and the following characters: + - = . _ : / @. +// * Maximum value length - 256 Unicode characters in +// UTF-8. // +// * If your tagging schema is used across multiple services and resources, +// remember that other services may have restrictions on allowed characters. +// Generally allowed characters are: letters, numbers, and spaces representable in +// UTF-8, and the following characters: + - = . _ : / @. // -// * Tag keys and values are case sensitive. +// * Tag keys and values are +// case sensitive. // -// * Do not use aws:, AWS:, or any -// upper or lowercase combination of such as a prefix for keys as it is reserved -// for AWS use. You cannot edit or delete tag keys with this prefix. Values can -// have this prefix. If a tag value has aws as its prefix but the key does not, -// then Forecast considers it to be a user tag and will count against the limit of -// 50 tags. Tags with only the key prefix of aws do not count against your tags per -// resource limit. +// * Do not use aws:, AWS:, or any upper or lowercase combination +// of such as a prefix for keys as it is reserved for AWS use. You cannot edit or +// delete tag keys with this prefix. Values can have this prefix. If a tag value +// has aws as its prefix but the key does not, then Forecast considers it to be a +// user tag and will count against the limit of 50 tags. Tags with only the key +// prefix of aws do not count against your tags per resource limit. type Tag struct { // One part of a key-value pair that makes up a tag. A key is a general label that @@ -871,12 +868,12 @@ type TestWindowSummary struct { // The status of the test. Possible status values are: // - // * ACTIVE + // * ACTIVE // - // * + // * // CREATE_IN_PROGRESS // - // * CREATE_FAILED + // * CREATE_FAILED Status *string // The time at which the test ended. @@ -908,10 +905,10 @@ type WindowSummary struct { // The type of evaluation. // - // * SUMMARY - The average metrics across all - // windows. + // * SUMMARY - The average metrics across all windows. // - // * COMPUTED - The metrics for the specified window. + // * + // COMPUTED - The metrics for the specified window. EvaluationType EvaluationType // The number of data points within the window. diff --git a/service/forecastquery/types/types.go b/service/forecastquery/types/types.go index 1c535a474ed..52f9db516e8 100644 --- a/service/forecastquery/types/types.go +++ b/service/forecastquery/types/types.go @@ -19,14 +19,14 @@ type Forecast struct { // The forecast. The string of the string-to-array map is one of the following // values: // - // * p10 + // * p10 // - // * p50 + // * p50 // - // * p90 + // * p90 // - // The default setting is ["0.1", "0.5", - // "0.9"]. Use the optional ForecastTypes parameter of the CreateForecast + // The default setting is ["0.1", "0.5", "0.9"]. Use + // the optional ForecastTypes parameter of the CreateForecast // (https://docs.aws.amazon.com/forecast/latest/dg/API_CreateForecast.html) // operation to change the values. The values will vary depending on how this is // set, with a minimum of 1 and a maximum of 5. diff --git a/service/frauddetector/api_op_UpdateModelVersionStatus.go b/service/frauddetector/api_op_UpdateModelVersionStatus.go index e223c6c45cf..a3741b3cb18 100644 --- a/service/frauddetector/api_op_UpdateModelVersionStatus.go +++ b/service/frauddetector/api_op_UpdateModelVersionStatus.go @@ -14,10 +14,10 @@ import ( // Updates the status of a model version. You can perform the following status // updates: // -// * Change the TRAINING_COMPLETE status to ACTIVE. +// * Change the TRAINING_COMPLETE status to ACTIVE. // -// * Change -// ACTIVEto INACTIVE. +// * Change ACTIVEto +// INACTIVE. func (c *Client) UpdateModelVersionStatus(ctx context.Context, params *UpdateModelVersionStatusInput, optFns ...func(*Options)) (*UpdateModelVersionStatusOutput, error) { if params == nil { params = &UpdateModelVersionStatusInput{} diff --git a/service/frauddetector/types/enums.go b/service/frauddetector/types/enums.go index 3ff978bc4e5..145e8087616 100644 --- a/service/frauddetector/types/enums.go +++ b/service/frauddetector/types/enums.go @@ -6,9 +6,9 @@ type DataSource string // Enum values for DataSource const ( - DataSourceEvent DataSource = "EVENT" - DataSourceModel_score DataSource = "MODEL_SCORE" - DataSourceExternal_model_score DataSource = "EXTERNAL_MODEL_SCORE" + DataSourceEvent DataSource = "EVENT" + DataSourceModelScore DataSource = "MODEL_SCORE" + DataSourceExternalModelScore DataSource = "EXTERNAL_MODEL_SCORE" ) // Values returns all known values for DataSource. Note that this can be expanded @@ -154,7 +154,7 @@ type ModelTypeEnum string // Enum values for ModelTypeEnum const ( - ModelTypeEnumOnline_fraud_insights ModelTypeEnum = "ONLINE_FRAUD_INSIGHTS" + ModelTypeEnumOnlineFraudInsights ModelTypeEnum = "ONLINE_FRAUD_INSIGHTS" ) // Values returns all known values for ModelTypeEnum. Note that this can be @@ -188,8 +188,8 @@ type RuleExecutionMode string // Enum values for RuleExecutionMode const ( - RuleExecutionModeAll_matched RuleExecutionMode = "ALL_MATCHED" - RuleExecutionModeFirst_matched RuleExecutionMode = "FIRST_MATCHED" + RuleExecutionModeAllMatched RuleExecutionMode = "ALL_MATCHED" + RuleExecutionModeFirstMatched RuleExecutionMode = "FIRST_MATCHED" ) // Values returns all known values for RuleExecutionMode. Note that this can be @@ -206,7 +206,7 @@ type TrainingDataSourceEnum string // Enum values for TrainingDataSourceEnum const ( - TrainingDataSourceEnumExternal_events TrainingDataSourceEnum = "EXTERNAL_EVENTS" + TrainingDataSourceEnumExternalEvents TrainingDataSourceEnum = "EXTERNAL_EVENTS" ) // Values returns all known values for TrainingDataSourceEnum. Note that this can diff --git a/service/frauddetector/types/errors.go b/service/frauddetector/types/errors.go index 5b04721a93a..c56304b0f97 100644 --- a/service/frauddetector/types/errors.go +++ b/service/frauddetector/types/errors.go @@ -29,16 +29,16 @@ func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.F // An exception indicating there was a conflict during a delete operation. The // following delete operations can cause a conflict exception: // -// * -// DeleteDetector: A conflict exception will occur if the detector has associated -// Rules or DetectorVersions. You can only delete a detector if it has no Rules or +// * DeleteDetector: A +// conflict exception will occur if the detector has associated Rules or +// DetectorVersions. You can only delete a detector if it has no Rules or // DetectorVersions. // -// * DeleteDetectorVersion: A conflict exception will occur -// if the DetectorVersion status is ACTIVE. +// * DeleteDetectorVersion: A conflict exception will occur if +// the DetectorVersion status is ACTIVE. // -// * DeleteRule: A conflict exception -// will occur if the RuleVersion is in use by an associated ACTIVE or INACTIVE +// * DeleteRule: A conflict exception will +// occur if the RuleVersion is in use by an associated ACTIVE or INACTIVE // DetectorVersion. type ConflictException struct { Message *string diff --git a/service/fsx/api_op_CancelDataRepositoryTask.go b/service/fsx/api_op_CancelDataRepositoryTask.go index 5c3e8eb704e..c6e4ea94f17 100644 --- a/service/fsx/api_op_CancelDataRepositoryTask.go +++ b/service/fsx/api_op_CancelDataRepositoryTask.go @@ -15,14 +15,14 @@ import ( // in either the PENDING or EXECUTING state. When you cancel a task, Amazon FSx // does the following. // -// * Any files that FSx has already exported are not +// * Any files that FSx has already exported are not // reverted. // -// * FSx continues to export any files that are "in-flight" when the +// * FSx continues to export any files that are "in-flight" when the // cancel operation is received. // -// * FSx does not export any files that have not -// yet been exported. +// * FSx does not export any files that have not yet +// been exported. func (c *Client) CancelDataRepositoryTask(ctx context.Context, params *CancelDataRepositoryTaskInput, optFns ...func(*Options)) (*CancelDataRepositoryTaskOutput, error) { if params == nil { params = &CancelDataRepositoryTaskInput{} @@ -51,24 +51,24 @@ type CancelDataRepositoryTaskOutput struct { // The lifecycle status of the data repository task, as follows: // - // * PENDING - + // * PENDING - // Amazon FSx has not started the task. // - // * EXECUTING - Amazon FSx is processing - // the task. + // * EXECUTING - Amazon FSx is processing the + // task. // - // * FAILED - Amazon FSx was not able to complete the task. For - // example, there may be files the task failed to process. The + // * FAILED - Amazon FSx was not able to complete the task. For example, + // there may be files the task failed to process. The // DataRepositoryTaskFailureDetails property provides more information about task // failures. // - // * SUCCEEDED - FSx completed the task successfully. + // * SUCCEEDED - FSx completed the task successfully. // - // * - // CANCELED - Amazon FSx canceled the task and it did not complete. + // * CANCELED - + // Amazon FSx canceled the task and it did not complete. // - // * - // CANCELING - FSx is in process of canceling the task. + // * CANCELING - FSx is in + // process of canceling the task. Lifecycle types.DataRepositoryTaskLifecycle // The ID of the task being canceled. diff --git a/service/fsx/api_op_CreateBackup.go b/service/fsx/api_op_CreateBackup.go index 210a7418952..05727c18e4b 100644 --- a/service/fsx/api_op_CreateBackup.go +++ b/service/fsx/api_op_CreateBackup.go @@ -18,13 +18,13 @@ import ( // for Lustre file systems, you can create a backup only for file systems with the // following configuration: // -// * a Persistent deployment type +// * a Persistent deployment type // -// * is not -// linked to a data respository. +// * is not linked to a +// data respository. // -// For more information about backing up Amazon FSx -// for Lustre file systems, see Working with FSx for Lustre backups +// For more information about backing up Amazon FSx for Lustre +// file systems, see Working with FSx for Lustre backups // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). For // more information about backing up Amazon FSx for Lustre file systems, see // Working with FSx for Windows backups @@ -35,22 +35,21 @@ import ( // operation returns IncompatibleParameterError. If a backup with the specified // client request token doesn't exist, CreateBackup does the following: // -// * -// Creates a new Amazon FSx backup with an assigned ID, and an initial lifecycle -// state of CREATING. +// * Creates +// a new Amazon FSx backup with an assigned ID, and an initial lifecycle state of +// CREATING. // -// * Returns the description of the backup. +// * Returns the description of the backup. // -// By using the -// idempotent operation, you can retry a CreateBackup operation without the risk of -// creating an extra backup. This approach can be useful when an initial call fails -// in a way that makes it unclear whether a backup was created. If you use the same -// client request token and the initial call created a backup, the operation -// returns a successful result because all the parameters are the same. The -// CreateBackup operation returns while the backup's lifecycle state is still -// CREATING. You can check the backup creation status by calling the -// DescribeBackups operation, which returns the backup state along with other -// information. +// By using the idempotent +// operation, you can retry a CreateBackup operation without the risk of creating +// an extra backup. This approach can be useful when an initial call fails in a way +// that makes it unclear whether a backup was created. If you use the same client +// request token and the initial call created a backup, the operation returns a +// successful result because all the parameters are the same. The CreateBackup +// operation returns while the backup's lifecycle state is still CREATING. You can +// check the backup creation status by calling the DescribeBackups operation, which +// returns the backup state along with other information. func (c *Client) CreateBackup(ctx context.Context, params *CreateBackupInput, optFns ...func(*Options)) (*CreateBackupOutput, error) { if params == nil { params = &CreateBackupInput{} diff --git a/service/fsx/api_op_CreateFileSystem.go b/service/fsx/api_op_CreateFileSystem.go index 4307462738e..8d943f8b8f1 100644 --- a/service/fsx/api_op_CreateFileSystem.go +++ b/service/fsx/api_op_CreateFileSystem.go @@ -19,11 +19,11 @@ import ( // IncompatibleParameterError. If a file system with the specified client request // token doesn't exist, CreateFileSystem does the following: // -// * Creates a new, +// * Creates a new, // empty Amazon FSx file system with an assigned ID, and an initial lifecycle state // of CREATING. // -// * Returns the description of the file system. +// * Returns the description of the file system. // // This operation // requires a client request token in the request that Amazon FSx uses to ensure @@ -64,24 +64,24 @@ type CreateFileSystemInput struct { // Sets the storage capacity of the file system that you're creating. For Lustre // file systems: // - // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid + // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid // values are 1200 GiB, 2400 GiB, and increments of 2400 GiB. // - // * For PERSISTENT - // HDD file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file + // * For PERSISTENT HDD + // file systems, valid values are increments of 6000 GiB for 12 MB/s/TiB file // systems and increments of 1800 GiB for 40 MB/s/TiB file systems. // - // * For + // * For // SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, and increments // of 3600 GiB. // // For Windows file systems: // - // * If StorageType=SSD, valid values - // are 32 GiB - 65,536 GiB (64 TiB). + // * If StorageType=SSD, valid values are + // 32 GiB - 65,536 GiB (64 TiB). // - // * If StorageType=HDD, valid values are - // 2000 GiB - 65,536 GiB (64 TiB). + // * If StorageType=HDD, valid values are 2000 GiB - + // 65,536 GiB (64 TiB). // // This member is required. StorageCapacity *int32 @@ -124,12 +124,12 @@ type CreateFileSystemInput struct { // Sets the storage type for the file system you're creating. Valid values are SSD // and HDD. // - // * Set to SSD to use solid state drive storage. SSD is supported on - // all Windows and Lustre deployment types. + // * Set to SSD to use solid state drive storage. SSD is supported on all + // Windows and Lustre deployment types. // - // * Set to HDD to use hard disk - // drive storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file - // system deployment types, and on PERSISTENT Lustre file system deployment + // * Set to HDD to use hard disk drive + // storage. HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system + // deployment types, and on PERSISTENT Lustre file system deployment // types. // // Default value is SSD. For more information, see Storage Type Options diff --git a/service/fsx/api_op_CreateFileSystemFromBackup.go b/service/fsx/api_op_CreateFileSystemFromBackup.go index 402593c1870..ddcf0463b12 100644 --- a/service/fsx/api_op_CreateFileSystemFromBackup.go +++ b/service/fsx/api_op_CreateFileSystemFromBackup.go @@ -20,16 +20,16 @@ import ( // specified client request token doesn't exist, this operation does the // following: // -// * Creates a new Amazon FSx file system from backup with an -// assigned ID, and an initial lifecycle state of CREATING. +// * Creates a new Amazon FSx file system from backup with an assigned +// ID, and an initial lifecycle state of CREATING. // -// * Returns the -// description of the file system. +// * Returns the description of +// the file system. // -// Parameters like Active Directory, default share -// name, automatic backup, and backup settings default to the parameters of the -// file system that was backed up, unless overridden. You can explicitly supply -// other settings. By using the idempotent operation, you can retry a +// Parameters like Active Directory, default share name, +// automatic backup, and backup settings default to the parameters of the file +// system that was backed up, unless overridden. You can explicitly supply other +// settings. By using the idempotent operation, you can retry a // CreateFileSystemFromBackup call without the risk of creating an extra file // system. This approach can be useful when an initial call fails in a way that // makes it unclear whether a file system was created. Examples are if a transport @@ -91,19 +91,18 @@ type CreateFileSystemFromBackupInput struct { // Sets the storage type for the Windows file system you're creating from a backup. // Valid values are SSD and HDD. // - // * Set to SSD to use solid state drive - // storage. Supported on all Windows deployment types. + // * Set to SSD to use solid state drive storage. + // Supported on all Windows deployment types. // - // * Set to HDD to use - // hard disk drive storage. Supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file - // system deployment types. + // * Set to HDD to use hard disk drive + // storage. Supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment + // types. // - // Default value is SSD. HDD and SSD storage types have - // different minimum storage capacity requirements. A restored file system's - // storage capacity is tied to the file system that was backed up. You can create a - // file system that uses HDD storage from a backup of a file system that used SSD - // storage only if the original SSD file system had a storage capacity of at least - // 2000 GiB. + // Default value is SSD. HDD and SSD storage types have different minimum + // storage capacity requirements. A restored file system's storage capacity is tied + // to the file system that was backed up. You can create a file system that uses + // HDD storage from a backup of a file system that used SSD storage only if the + // original SSD file system had a storage capacity of at least 2000 GiB. StorageType types.StorageType // The tags to be applied to the file system at file system creation. The key value diff --git a/service/fsx/api_op_DescribeBackups.go b/service/fsx/api_op_DescribeBackups.go index 8b408618f0f..edeb75be696 100644 --- a/service/fsx/api_op_DescribeBackups.go +++ b/service/fsx/api_op_DescribeBackups.go @@ -24,13 +24,13 @@ import ( // the last NextToken value until a response has no NextToken. When using this // action, keep the following in mind: // -// * The implementation might return fewer +// * The implementation might return fewer // than MaxResults file system descriptions while still including a NextToken // value. // -// * The order of backups returned in the response of one -// DescribeBackups call and the order of backups returned across the responses of a -// multi-call iteration is unspecified. +// * The order of backups returned in the response of one DescribeBackups +// call and the order of backups returned across the responses of a multi-call +// iteration is unspecified. func (c *Client) DescribeBackups(ctx context.Context, params *DescribeBackupsInput, optFns ...func(*Options)) (*DescribeBackupsOutput, error) { if params == nil { params = &DescribeBackupsInput{} diff --git a/service/fsx/api_op_DescribeFileSystems.go b/service/fsx/api_op_DescribeFileSystems.go index 9055364e91c..6dfb3ed0229 100644 --- a/service/fsx/api_op_DescribeFileSystems.go +++ b/service/fsx/api_op_DescribeFileSystems.go @@ -25,13 +25,13 @@ import ( // parameter set to the value of the last NextToken value until a response has no // NextToken. When using this action, keep the following in mind: // -// * The +// * The // implementation might return fewer than MaxResults file system descriptions while // still including a NextToken value. // -// * The order of file systems returned in -// the response of one DescribeFileSystems call and the order of file systems -// returned across the responses of a multicall iteration is unspecified. +// * The order of file systems returned in the +// response of one DescribeFileSystems call and the order of file systems returned +// across the responses of a multicall iteration is unspecified. func (c *Client) DescribeFileSystems(ctx context.Context, params *DescribeFileSystemsInput, optFns ...func(*Options)) (*DescribeFileSystemsOutput, error) { if params == nil { params = &DescribeFileSystemsInput{} diff --git a/service/fsx/api_op_ListTagsForResource.go b/service/fsx/api_op_ListTagsForResource.go index 9f7d859cd88..70a0712449c 100644 --- a/service/fsx/api_op_ListTagsForResource.go +++ b/service/fsx/api_op_ListTagsForResource.go @@ -22,13 +22,13 @@ import ( // parameter set to the value of the last NextToken value until a response has no // NextToken. When using this action, keep the following in mind: // -// * The +// * The // implementation might return fewer than MaxResults file system descriptions while // still including a NextToken value. // -// * The order of tags returned in the -// response of one ListTagsForResource call and the order of tags returned across -// the responses of a multi-call iteration is unspecified. +// * The order of tags returned in the response +// of one ListTagsForResource call and the order of tags returned across the +// responses of a multi-call iteration is unspecified. func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { if params == nil { params = &ListTagsForResourceInput{} diff --git a/service/fsx/api_op_UpdateFileSystem.go b/service/fsx/api_op_UpdateFileSystem.go index 24aee9666ef..9d945439912 100644 --- a/service/fsx/api_op_UpdateFileSystem.go +++ b/service/fsx/api_op_UpdateFileSystem.go @@ -14,34 +14,34 @@ import ( // Use this operation to update the configuration of an existing Amazon FSx file // system. You can update multiple properties in a single request. For Amazon FSx -// for Windows File Server file systems, you can update the following properties: -// +// for Windows File Server file systems, you can update the following +// properties: // // * AutomaticBackupRetentionDays // -// * DailyAutomaticBackupStartTime +// * DailyAutomaticBackupStartTime // -// * +// * // SelfManagedActiveDirectoryConfiguration // -// * StorageCapacity +// * StorageCapacity // -// * +// * // ThroughputCapacity // -// * WeeklyMaintenanceStartTime -// -// For Amazon FSx for Lustre -// file systems, you can update the following properties: +// * WeeklyMaintenanceStartTime // -// * AutoImportPolicy +// For Amazon FSx for Lustre file +// systems, you can update the following properties: // +// * AutoImportPolicy // -// * AutomaticBackupRetentionDays +// * +// AutomaticBackupRetentionDays // -// * DailyAutomaticBackupStartTime +// * DailyAutomaticBackupStartTime // -// * +// * // WeeklyMaintenanceStartTime func (c *Client) UpdateFileSystem(ctx context.Context, params *UpdateFileSystemInput, optFns ...func(*Options)) (*UpdateFileSystemOutput, error) { if params == nil { diff --git a/service/fsx/types/enums.go b/service/fsx/types/enums.go index 4c544485fa4..0a60d502484 100644 --- a/service/fsx/types/enums.go +++ b/service/fsx/types/enums.go @@ -6,10 +6,10 @@ type ActiveDirectoryErrorType string // Enum values for ActiveDirectoryErrorType const ( - ActiveDirectoryErrorTypeDomain_not_found ActiveDirectoryErrorType = "DOMAIN_NOT_FOUND" - ActiveDirectoryErrorTypeIncompatible_domain_mode ActiveDirectoryErrorType = "INCOMPATIBLE_DOMAIN_MODE" - ActiveDirectoryErrorTypeWrong_vpc ActiveDirectoryErrorType = "WRONG_VPC" - ActiveDirectoryErrorTypeInvalid_domain_stage ActiveDirectoryErrorType = "INVALID_DOMAIN_STAGE" + ActiveDirectoryErrorTypeDomainNotFound ActiveDirectoryErrorType = "DOMAIN_NOT_FOUND" + ActiveDirectoryErrorTypeIncompatibleDomainMode ActiveDirectoryErrorType = "INCOMPATIBLE_DOMAIN_MODE" + ActiveDirectoryErrorTypeWrongVpc ActiveDirectoryErrorType = "WRONG_VPC" + ActiveDirectoryErrorTypeInvalidDomainStage ActiveDirectoryErrorType = "INVALID_DOMAIN_STAGE" ) // Values returns all known values for ActiveDirectoryErrorType. Note that this can @@ -28,8 +28,8 @@ type AdministrativeActionType string // Enum values for AdministrativeActionType const ( - AdministrativeActionTypeFile_system_update AdministrativeActionType = "FILE_SYSTEM_UPDATE" - AdministrativeActionTypeStorage_optimization AdministrativeActionType = "STORAGE_OPTIMIZATION" + AdministrativeActionTypeFileSystemUpdate AdministrativeActionType = "FILE_SYSTEM_UPDATE" + AdministrativeActionTypeStorageOptimization AdministrativeActionType = "STORAGE_OPTIMIZATION" ) // Values returns all known values for AdministrativeActionType. Note that this can @@ -46,9 +46,9 @@ type AutoImportPolicyType string // Enum values for AutoImportPolicyType const ( - AutoImportPolicyTypeNone AutoImportPolicyType = "NONE" - AutoImportPolicyTypeNew AutoImportPolicyType = "NEW" - AutoImportPolicyTypeNew_changed AutoImportPolicyType = "NEW_CHANGED" + AutoImportPolicyTypeNone AutoImportPolicyType = "NONE" + AutoImportPolicyTypeNew AutoImportPolicyType = "NEW" + AutoImportPolicyTypeNewChanged AutoImportPolicyType = "NEW_CHANGED" ) // Values returns all known values for AutoImportPolicyType. Note that this can be @@ -90,8 +90,8 @@ type BackupType string // Enum values for BackupType const ( - BackupTypeAutomatic BackupType = "AUTOMATIC" - BackupTypeUser_initiated BackupType = "USER_INITIATED" + BackupTypeAutomatic BackupType = "AUTOMATIC" + BackupTypeUserInitiated BackupType = "USER_INITIATED" ) // Values returns all known values for BackupType. Note that this can be expanded @@ -132,8 +132,8 @@ type DataRepositoryTaskFilterName string // Enum values for DataRepositoryTaskFilterName const ( - DataRepositoryTaskFilterNameFile_system_id DataRepositoryTaskFilterName = "file-system-id" - DataRepositoryTaskFilterNameTask_lifecycle DataRepositoryTaskFilterName = "task-lifecycle" + DataRepositoryTaskFilterNameFileSystemId DataRepositoryTaskFilterName = "file-system-id" + DataRepositoryTaskFilterNameTaskLifecycle DataRepositoryTaskFilterName = "task-lifecycle" ) // Values returns all known values for DataRepositoryTaskFilterName. Note that this @@ -236,8 +236,8 @@ type FileSystemMaintenanceOperation string // Enum values for FileSystemMaintenanceOperation const ( - FileSystemMaintenanceOperationPatching FileSystemMaintenanceOperation = "PATCHING" - FileSystemMaintenanceOperationBacking_up FileSystemMaintenanceOperation = "BACKING_UP" + FileSystemMaintenanceOperationPatching FileSystemMaintenanceOperation = "PATCHING" + FileSystemMaintenanceOperationBackingUp FileSystemMaintenanceOperation = "BACKING_UP" ) // Values returns all known values for FileSystemMaintenanceOperation. Note that @@ -273,9 +273,9 @@ type FilterName string // Enum values for FilterName const ( - FilterNameFile_system_id FilterName = "file-system-id" - FilterNameBackup_type FilterName = "backup-type" - FilterNameFile_system_type FilterName = "file-system-type" + FilterNameFileSystemId FilterName = "file-system-id" + FilterNameBackupType FilterName = "backup-type" + FilterNameFileSystemType FilterName = "file-system-type" ) // Values returns all known values for FilterName. Note that this can be expanded @@ -293,9 +293,9 @@ type LustreDeploymentType string // Enum values for LustreDeploymentType const ( - LustreDeploymentTypeScratch_1 LustreDeploymentType = "SCRATCH_1" - LustreDeploymentTypeScratch_2 LustreDeploymentType = "SCRATCH_2" - LustreDeploymentTypePersistent_1 LustreDeploymentType = "PERSISTENT_1" + LustreDeploymentTypeScratch1 LustreDeploymentType = "SCRATCH_1" + LustreDeploymentTypeScratch2 LustreDeploymentType = "SCRATCH_2" + LustreDeploymentTypePersistent1 LustreDeploymentType = "PERSISTENT_1" ) // Values returns all known values for LustreDeploymentType. Note that this can be @@ -313,7 +313,7 @@ type ReportFormat string // Enum values for ReportFormat const ( - ReportFormatReport_csv_20191124 ReportFormat = "REPORT_CSV_20191124" + ReportFormatReportCsv20191124 ReportFormat = "REPORT_CSV_20191124" ) // Values returns all known values for ReportFormat. Note that this can be expanded @@ -329,7 +329,7 @@ type ReportScope string // Enum values for ReportScope const ( - ReportScopeFailed_files_only ReportScope = "FAILED_FILES_ONLY" + ReportScopeFailedFilesOnly ReportScope = "FAILED_FILES_ONLY" ) // Values returns all known values for ReportScope. Note that this can be expanded @@ -345,10 +345,10 @@ type ServiceLimit string // Enum values for ServiceLimit const ( - ServiceLimitFile_system_count ServiceLimit = "FILE_SYSTEM_COUNT" - ServiceLimitTotal_throughput_capacity ServiceLimit = "TOTAL_THROUGHPUT_CAPACITY" - ServiceLimitTotal_storage ServiceLimit = "TOTAL_STORAGE" - ServiceLimitTotal_user_initiated_backups ServiceLimit = "TOTAL_USER_INITIATED_BACKUPS" + ServiceLimitFileSystemCount ServiceLimit = "FILE_SYSTEM_COUNT" + ServiceLimitTotalThroughputCapacity ServiceLimit = "TOTAL_THROUGHPUT_CAPACITY" + ServiceLimitTotalStorage ServiceLimit = "TOTAL_STORAGE" + ServiceLimitTotalUserInitiatedBackups ServiceLimit = "TOTAL_USER_INITIATED_BACKUPS" ) // Values returns all known values for ServiceLimit. Note that this can be expanded @@ -367,11 +367,11 @@ type Status string // Enum values for Status const ( - StatusFailed Status = "FAILED" - StatusIn_progress Status = "IN_PROGRESS" - StatusPending Status = "PENDING" - StatusCompleted Status = "COMPLETED" - StatusUpdated_optimizing Status = "UPDATED_OPTIMIZING" + StatusFailed Status = "FAILED" + StatusInProgress Status = "IN_PROGRESS" + StatusPending Status = "PENDING" + StatusCompleted Status = "COMPLETED" + StatusUpdatedOptimizing Status = "UPDATED_OPTIMIZING" ) // Values returns all known values for Status. Note that this can be expanded in @@ -409,9 +409,9 @@ type WindowsDeploymentType string // Enum values for WindowsDeploymentType const ( - WindowsDeploymentTypeMulti_az_1 WindowsDeploymentType = "MULTI_AZ_1" - WindowsDeploymentTypeSingle_az_1 WindowsDeploymentType = "SINGLE_AZ_1" - WindowsDeploymentTypeSingle_az_2 WindowsDeploymentType = "SINGLE_AZ_2" + WindowsDeploymentTypeMultiAz1 WindowsDeploymentType = "MULTI_AZ_1" + WindowsDeploymentTypeSingleAz1 WindowsDeploymentType = "SINGLE_AZ_1" + WindowsDeploymentTypeSingleAz2 WindowsDeploymentType = "SINGLE_AZ_2" ) // Values returns all known values for WindowsDeploymentType. Note that this can be diff --git a/service/fsx/types/types.go b/service/fsx/types/types.go index 979ac1613ad..7d505962182 100644 --- a/service/fsx/types/types.go +++ b/service/fsx/types/types.go @@ -24,19 +24,18 @@ type AdministrativeAction struct { // Describes the type of administrative action, as follows: // - // * - // FILE_SYSTEM_UPDATE - A file system update administrative action initiated by the - // user from the Amazon FSx console, API (UpdateFileSystem), or CLI - // (update-file-system). A - // - // * STORAGE_OPTIMIZATION - Once the - // FILE_SYSTEM_UPDATE task to increase a file system's storage capacity completes - // successfully, a STORAGE_OPTIMIZATION task starts. Storage optimization is the - // process of migrating the file system data to the new, larger disks. You can - // track the storage migration progress using the ProgressPercent property. When - // STORAGE_OPTIMIZATION completes successfully, the parent FILE_SYSTEM_UPDATE - // action status changes to COMPLETED. For more information, see Managing Storage - // Capacity + // * FILE_SYSTEM_UPDATE - + // A file system update administrative action initiated by the user from the Amazon + // FSx console, API (UpdateFileSystem), or CLI (update-file-system). A + // + // * + // STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase a file + // system's storage capacity completes successfully, a STORAGE_OPTIMIZATION task + // starts. Storage optimization is the process of migrating the file system data to + // the new, larger disks. You can track the storage migration progress using the + // ProgressPercent property. When STORAGE_OPTIMIZATION completes successfully, the + // parent FILE_SYSTEM_UPDATE action status changes to COMPLETED. For more + // information, see Managing Storage Capacity // (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). AdministrativeActionType AdministrativeActionType @@ -51,19 +50,19 @@ type AdministrativeAction struct { // Describes the status of the administrative action, as follows: // - // * FAILED - + // * FAILED - // Amazon FSx failed to process the administrative action successfully. // - // * + // * // IN_PROGRESS - Amazon FSx is processing the administrative action. // - // * PENDING - // - Amazon FSx is waiting to process the administrative action. + // * PENDING - + // Amazon FSx is waiting to process the administrative action. // - // * COMPLETED - + // * COMPLETED - // Amazon FSx has finished processing the administrative task. // - // * + // * // UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon FSx has // updated the file system with the new storage capacity, and is now performing the // storage optimization process. For more information, see Managing Storage @@ -105,19 +104,19 @@ type Backup struct { // The lifecycle status of the backup. // - // * AVAILABLE - The backup is fully + // * AVAILABLE - The backup is fully // available. // - // * CREATING - FSx is creating the backup. + // * CREATING - FSx is creating the backup. // - // * TRANSFERRING - - // For Lustre file systems only; FSx is transferring the backup to S3. + // * TRANSFERRING - For + // Lustre file systems only; FSx is transferring the backup to S3. // - // * - // DELETED - The backup was deleted is no longer available. + // * DELETED - The + // backup was deleted is no longer available. // - // * FAILED - Amazon - // FSx could not complete the backup. + // * FAILED - Amazon FSx could not + // complete the backup. // // This member is required. Lifecycle BackupLifecycle @@ -200,23 +199,23 @@ type CreateFileSystemLustreConfiguration struct { // your file and directory listings up to date as you add or modify objects in your // linked S3 bucket. AutoImportPolicy can have the following values: // - // * NONE - + // * NONE - // (Default) AutoImport is off. Amazon FSx only updates file and directory listings // from the linked S3 bucket when the file system is created. FSx does not update // file and directory listings for any new or changed objects after choosing this // option. // - // * NEW - AutoImport is on. Amazon FSx automatically imports - // directory listings of any new objects added to the linked S3 bucket that do not - // currently exist in the FSx file system. + // * NEW - AutoImport is on. Amazon FSx automatically imports directory + // listings of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. // - // * NEW_CHANGED - AutoImport is on. - // Amazon FSx automatically imports file and directory listings of any new objects - // added to the S3 bucket and any existing objects that are changed in the S3 - // bucket after you choose this option. + // * NEW_CHANGED - AutoImport is on. Amazon FSx + // automatically imports file and directory listings of any new objects added to + // the S3 bucket and any existing objects that are changed in the S3 bucket after + // you choose this option. // - // For more information, see Automatically - // import updates from your S3 bucket + // For more information, see Automatically import updates + // from your S3 bucket // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). AutoImportPolicy AutoImportPolicyType @@ -348,17 +347,17 @@ type CreateFileSystemWindowsConfiguration struct { // Specifies the file system deployment type, valid values are the following: // - // - // * MULTI_AZ_1 - Deploys a high availability file system that is configured for + // * + // MULTI_AZ_1 - Deploys a high availability file system that is configured for // Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. // You can only deploy a Multi-AZ file system in AWS Regions that have a minimum of // three Availability Zones. Also supports HDD storage type // - // * SINGLE_AZ_1 - + // * SINGLE_AZ_1 - // (Default) Choose to deploy a file system that is configured for single AZ // redundancy. // - // * SINGLE_AZ_2 - The latest generation Single AZ file system. + // * SINGLE_AZ_2 - The latest generation Single AZ file system. // Specifies a file system that is configured for single AZ redundancy and supports // HDD storage type. // @@ -394,19 +393,19 @@ type DataRepositoryConfiguration struct { // listings up to date as you add or modify objects in your linked S3 bucket. // AutoImportPolicy can have the following values: // - // * NONE - (Default) - // AutoImport is off. Amazon FSx only updates file and directory listings from the - // linked S3 bucket when the file system is created. FSx does not update file and - // directory listings for any new or changed objects after choosing this option. + // * NONE - (Default) AutoImport + // is off. Amazon FSx only updates file and directory listings from the linked S3 + // bucket when the file system is created. FSx does not update file and directory + // listings for any new or changed objects after choosing this option. // + // * NEW - + // AutoImport is on. Amazon FSx automatically imports directory listings of any new + // objects added to the linked S3 bucket that do not currently exist in the FSx + // file system. // - // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings of - // any new objects added to the linked S3 bucket that do not currently exist in the - // FSx file system. - // - // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically - // imports file and directory listings of any new objects added to the S3 bucket - // and any existing objects that are changed in the S3 bucket after you choose this + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports + // file and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this // option. // // For more information, see Automatically import updates from your S3 @@ -437,23 +436,23 @@ type DataRepositoryConfiguration struct { ImportedFileChunkSize *int32 // Describes the state of the file system's S3 durable data repository, if it is - // configured with an S3 repository. The lifecycle can have the following values: + // configured with an S3 repository. The lifecycle can have the following + // values: // + // * CREATING - The data repository configuration between the FSx file + // system and the linked S3 data repository is being created. The data repository + // is unavailable. // - // * CREATING - The data repository configuration between the FSx file system and - // the linked S3 data repository is being created. The data repository is - // unavailable. + // * AVAILABLE - The data repository is available for use. // - // * AVAILABLE - The data repository is available for use. - // - // * + // * // MISCONFIGURED - Amazon FSx cannot automatically import updates from the S3 // bucket until the data repository configuration is corrected. For more // information, see Troubleshooting a Misconfigured linked S3 bucket // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository). // - // - // * UPDATING - The data repository is undergoing a customer initiated update and + // * + // UPDATING - The data repository is undergoing a customer initiated update and // availability may be impacted. Lifecycle DataRepositoryLifecycle } @@ -484,29 +483,29 @@ type DataRepositoryTask struct { // The lifecycle status of the data repository task, as follows: // - // * PENDING - + // * PENDING - // Amazon FSx has not started the task. // - // * EXECUTING - Amazon FSx is processing - // the task. + // * EXECUTING - Amazon FSx is processing the + // task. // - // * FAILED - Amazon FSx was not able to complete the task. For - // example, there may be files the task failed to process. The + // * FAILED - Amazon FSx was not able to complete the task. For example, + // there may be files the task failed to process. The // DataRepositoryTaskFailureDetails property provides more information about task // failures. // - // * SUCCEEDED - FSx completed the task successfully. + // * SUCCEEDED - FSx completed the task successfully. // - // * - // CANCELED - Amazon FSx canceled the task and it did not complete. + // * CANCELED - + // Amazon FSx canceled the task and it did not complete. // - // * - // CANCELING - FSx is in process of canceling the task. + // * CANCELING - FSx is in + // process of canceling the task. // - // You cannot delete an FSx - // for Lustre file system if there are data repository tasks for the file system in - // the PENDING or EXECUTING states. Please retry when the data repository task is - // finished (with a status of CANCELED, SUCCEEDED, or FAILED). You can use the + // You cannot delete an FSx for Lustre file system + // if there are data repository tasks for the file system in the PENDING or + // EXECUTING states. Please retry when the data repository task is finished (with a + // status of CANCELED, SUCCEEDED, or FAILED). You can use the // DescribeDataRepositoryTask action to monitor the task status. Contact the FSx // team if you need to delete your file system immediately. // @@ -583,12 +582,12 @@ type DataRepositoryTaskFilter struct { // Name of the task property to use in filtering the tasks returned in the // response. // - // * Use file-system-id to retrieve data repository tasks for - // specific file systems. + // * Use file-system-id to retrieve data repository tasks for specific + // file systems. // - // * Use task-lifecycle to retrieve data repository - // tasks with one or more specific lifecycle states, as follows: CANCELED, - // EXECUTING, FAILED, PENDING, and SUCCEEDED. + // * Use task-lifecycle to retrieve data repository tasks with one + // or more specific lifecycle states, as follows: CANCELED, EXECUTING, FAILED, + // PENDING, and SUCCEEDED. Name DataRepositoryTaskFilterName // Use Values to include the specific file system IDs and task lifecycle states for @@ -706,24 +705,24 @@ type FileSystem struct { // The lifecycle status of the file system, following are the possible values and // what they mean: // - // * AVAILABLE - The file system is in a healthy state, and is + // * AVAILABLE - The file system is in a healthy state, and is // reachable and available for use. // - // * CREATING - Amazon FSx is creating the - // new file system. + // * CREATING - Amazon FSx is creating the new + // file system. // - // * DELETING - Amazon FSx is deleting an existing file - // system. + // * DELETING - Amazon FSx is deleting an existing file system. // - // * FAILED - An existing file system has experienced an unrecoverable - // failure. When creating a new file system, Amazon FSx was unable to create the - // file system. + // * + // FAILED - An existing file system has experienced an unrecoverable failure. When + // creating a new file system, Amazon FSx was unable to create the file system. // - // * MISCONFIGURED indicates that the file system is in a failed - // but recoverable state. + // * + // MISCONFIGURED indicates that the file system is in a failed but recoverable + // state. // - // * UPDATING indicates that the file system is - // undergoing a customer initiated update. + // * UPDATING indicates that the file system is undergoing a customer + // initiated update. Lifecycle FileSystemLifecycle // The configuration for the Amazon FSx for Lustre file system. @@ -896,13 +895,13 @@ type SelfManagedActiveDirectoryConfiguration struct { // in the private IP version 4 (IPv4) address ranges, as specified in RFC 1918 // (http://www.faqs.org/rfcs/rfc1918.html): // - // * 10.0.0.0 - 10.255.255.255 (10/8 + // * 10.0.0.0 - 10.255.255.255 (10/8 // prefix) // - // * 172.16.0.0 - 172.31.255.255 (172.16/12 prefix) + // * 172.16.0.0 - 172.31.255.255 (172.16/12 prefix) // - // * 192.168.0.0 - // - 192.168.255.255 (192.168/16 prefix) + // * 192.168.0.0 - + // 192.168.255.255 (192.168/16 prefix) // // This member is required. DnsIps []*string @@ -993,17 +992,17 @@ type UpdateFileSystemLustreConfiguration struct { // your file and directory listing up to date as you add or modify objects in your // linked S3 bucket. AutoImportPolicy can have the following values: // - // * NONE - + // * NONE - // (Default) AutoImport is off. Amazon FSx only updates file and directory listings // from the linked S3 bucket when the file system is created. FSx does not update // the file and directory listing for any new or changed objects after choosing // this option. // - // * NEW - AutoImport is on. Amazon FSx automatically imports + // * NEW - AutoImport is on. Amazon FSx automatically imports // directory listings of any new objects added to the linked S3 bucket that do not // currently exist in the FSx file system. // - // * NEW_CHANGED - AutoImport is on. + // * NEW_CHANGED - AutoImport is on. // Amazon FSx automatically imports file and directory listings of any new objects // added to the S3 bucket and any existing objects that are changed in the S3 // bucket after you choose this option. @@ -1090,18 +1089,18 @@ type WindowsFileSystemConfiguration struct { // Specifies the file system deployment type, valid values are the following: // - // - // * MULTI_AZ_1 - Specifies a high availability file system that is configured for + // * + // MULTI_AZ_1 - Specifies a high availability file system that is configured for // Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, // and supports SSD and HDD storage. // - // * SINGLE_AZ_1 - (Default) Specifies a - // file system that is configured for single AZ redundancy, only supports SSD + // * SINGLE_AZ_1 - (Default) Specifies a file + // system that is configured for single AZ redundancy, only supports SSD // storage. // - // * SINGLE_AZ_2 - Latest generation Single AZ file system. Specifies - // a file system that is configured for single AZ redundancy and supports SSD and - // HDD storage. + // * SINGLE_AZ_2 - Latest generation Single AZ file system. Specifies a + // file system that is configured for single AZ redundancy and supports SSD and HDD + // storage. // // For more information, see Single-AZ and Multi-AZ File Systems // (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). diff --git a/service/gamelift/api_op_AcceptMatch.go b/service/gamelift/api_op_AcceptMatch.go index 07ae0b57f07..a06c3d8a614 100644 --- a/service/gamelift/api_op_AcceptMatch.go +++ b/service/gamelift/api_op_AcceptMatch.go @@ -34,16 +34,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-events.html) // Related operations // -// * StartMatchmaking +// * StartMatchmaking // -// * DescribeMatchmaking +// * DescribeMatchmaking // -// * +// * // StopMatchmaking // -// * AcceptMatch +// * AcceptMatch // -// * StartMatchBackfill +// * StartMatchBackfill func (c *Client) AcceptMatch(ctx context.Context, params *AcceptMatchInput, optFns ...func(*Options)) (*AcceptMatchOutput, error) { if params == nil { params = &AcceptMatchInput{} diff --git a/service/gamelift/api_op_ClaimGameServer.go b/service/gamelift/api_op_ClaimGameServer.go index 8fc58f9f945..e2aa256ead7 100644 --- a/service/gamelift/api_op_ClaimGameServer.go +++ b/service/gamelift/api_op_ClaimGameServer.go @@ -31,10 +31,10 @@ import ( // value and is not configurable. If you try to claim a specific game server, this // request will fail in the following cases: // -// * If the game server utilization +// * If the game server utilization // status is UTILIZED. // -// * If the game server claim status is CLAIMED. +// * If the game server claim status is CLAIMED. // // When // claiming a specific game server, this request will succeed even if the game @@ -44,18 +44,18 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * RegisterGameServer +// * RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * +// * // ClaimGameServer // -// * DescribeGameServer +// * DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * +// * // DeregisterGameServer func (c *Client) ClaimGameServer(ctx context.Context, params *ClaimGameServerInput, optFns ...func(*Options)) (*ClaimGameServerOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_CreateAlias.go b/service/gamelift/api_op_CreateAlias.go index eb7eaf1187c..72b3cf5347a 100644 --- a/service/gamelift/api_op_CreateAlias.go +++ b/service/gamelift/api_op_CreateAlias.go @@ -25,18 +25,18 @@ import ( // including an alias ID and an ARN. You can reassign an alias to another fleet by // calling UpdateAlias. // -// * CreateAlias +// * CreateAlias // -// * ListAliases +// * ListAliases // -// * -// DescribeAlias +// * DescribeAlias // -// * UpdateAlias +// * +// UpdateAlias // -// * DeleteAlias +// * DeleteAlias // -// * ResolveAlias +// * ResolveAlias func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) { if params == nil { params = &CreateAliasInput{} diff --git a/service/gamelift/api_op_CreateBuild.go b/service/gamelift/api_op_CreateBuild.go index a8d85de03c1..9e8147b3db2 100644 --- a/service/gamelift/api_op_CreateBuild.go +++ b/service/gamelift/api_op_CreateBuild.go @@ -20,19 +20,19 @@ import ( // file directory to a GameLift Amazon S3 location, and (2) it creates a new build // resource. The CreateBuild operation can used in the following scenarios: // -// * -// To create a new game build with build files that are in an S3 location under an -// AWS account that you control. To use this option, you must first give Amazon +// * To +// create a new game build with build files that are in an S3 location under an AWS +// account that you control. To use this option, you must first give Amazon // GameLift access to the S3 bucket. With permissions in place, call CreateBuild // and specify a build name, operating system, and the S3 storage location of your // game build. // -// * To directly upload your build files to a GameLift S3 -// location. To use this option, first call CreateBuild and specify a build name -// and operating system. This operation creates a new build resource and also -// returns an S3 location with temporary access credentials. Use the credentials to -// manually upload your build files to the specified S3 location. For more -// information, see Uploading Objects +// * To directly upload your build files to a GameLift S3 location. To +// use this option, first call CreateBuild and specify a build name and operating +// system. This operation creates a new build resource and also returns an S3 +// location with temporary access credentials. Use the credentials to manually +// upload your build files to the specified S3 location. For more information, see +// Uploading Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html) in the // Amazon S3 Developer Guide. Build files can be uploaded to the GameLift S3 // location once only; that can't be updated. @@ -46,16 +46,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build) // Related operations // -// * CreateBuild +// * CreateBuild // -// * ListBuilds +// * ListBuilds // -// * DescribeBuild +// * DescribeBuild // +// * +// UpdateBuild // -// * UpdateBuild -// -// * DeleteBuild +// * DeleteBuild func (c *Client) CreateBuild(ctx context.Context, params *CreateBuildInput, optFns ...func(*Options)) (*CreateBuildOutput, error) { if params == nil { params = &CreateBuildInput{} diff --git a/service/gamelift/api_op_CreateFleet.go b/service/gamelift/api_op_CreateFleet.go index 2c235325d5f..d7721f7adc6 100644 --- a/service/gamelift/api_op_CreateFleet.go +++ b/service/gamelift/api_op_CreateFleet.go @@ -24,47 +24,47 @@ import ( // following tasks. You can track the process of a fleet by checking the fleet // status or by monitoring fleet creation events: // -// * Creates a fleet resource. +// * Creates a fleet resource. // Status: NEW. // -// * Begins writing events to the fleet event log, which can be +// * Begins writing events to the fleet event log, which can be // accessed in the Amazon GameLift console. // -// * Sets the fleet's target capacity -// to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 +// * Sets the fleet's target capacity to +// 1 (desired instances), which triggers Amazon GameLift to start one new EC2 // instance. // -// * Downloads the game build or Realtime script to the new instance -// and installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING. +// * Downloads the game build or Realtime script to the new instance and +// installs it. Statuses: DOWNLOADING, VALIDATING, BUILDING. // -// * Starts -// launching server processes on the instance. If the fleet is configured to run -// multiple server processes per instance, Amazon GameLift staggers each process -// launch by a few seconds. Status: ACTIVATING. +// * Starts launching +// server processes on the instance. If the fleet is configured to run multiple +// server processes per instance, Amazon GameLift staggers each process launch by a +// few seconds. Status: ACTIVATING. // -// * Sets the fleet's status to -// ACTIVE as soon as one server process is ready to host a game session. +// * Sets the fleet's status to ACTIVE as soon as +// one server process is ready to host a game session. // -// Learn -// more Setting Up Fleets +// Learn more Setting Up +// Fleets // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html)Debug // Fleet Creation Issues // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * UpdateFleetAttributes // -// * UpdateFleetAttributes -// -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) CreateFleet(ctx context.Context, params *CreateFleetInput, optFns ...func(*Options)) (*CreateFleetOutput, error) { if params == nil { params = &CreateFleetInput{} @@ -116,11 +116,11 @@ type CreateFleetInput struct { // fleet with certificate generation results fails with a 4xx unsupported Region // error. Valid values include: // - // * GENERATED - Generate a TLS/SSL certificate - // for this fleet. + // * GENERATED - Generate a TLS/SSL certificate for + // this fleet. // - // * DISABLED - (default) Do not generate a TLS/SSL - // certificate for this fleet. + // * DISABLED - (default) Do not generate a TLS/SSL certificate for + // this fleet. CertificateConfiguration *types.CertificateConfiguration // A human-readable description of a fleet. @@ -170,11 +170,11 @@ type CreateFleetInput struct { // change will only affect sessions created after the policy change. You can also // set protection for individual instances using UpdateGameSession. // - // * - // NoProtection - The game session can be terminated during a scale-down event. - // + // * NoProtection + // - The game session can be terminated during a scale-down event. // - // * FullProtection - If the game session is in an ACTIVE status, it cannot be + // * + // FullProtection - If the game session is in an ACTIVE status, it cannot be // terminated during a scale-down event. NewGameSessionProtectionPolicy types.ProtectionPolicy diff --git a/service/gamelift/api_op_CreateGameServerGroup.go b/service/gamelift/api_op_CreateGameServerGroup.go index c529edbe625..77e573087ca 100644 --- a/service/gamelift/api_op_CreateGameServerGroup.go +++ b/service/gamelift/api_op_CreateGameServerGroup.go @@ -20,14 +20,14 @@ import ( // emitted to Amazon CloudWatch. Before creating a new game server group, you must // have the following: // -// * An Amazon EC2 launch template that specifies how to +// * An Amazon EC2 launch template that specifies how to // launch Amazon EC2 instances with your game server build. For more information, // see Launching an Instance from a Launch Template // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) // in the Amazon EC2 User Guide. // -// * An IAM role that extends limited access to -// your AWS account to allow GameLift FleetIQ to create and interact with the Auto +// * An IAM role that extends limited access to your +// AWS account to allow GameLift FleetIQ to create and interact with the Auto // Scaling group. For more information, see Create IAM roles for cross-service // interaction // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gsg-iam-permissions-roles.html) @@ -49,24 +49,23 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) CreateGameServerGroup(ctx context.Context, params *CreateGameServerGroupInput, optFns ...func(*Options)) (*CreateGameServerGroupOutput, error) { if params == nil { params = &CreateGameServerGroupInput{} @@ -153,23 +152,23 @@ type CreateGameServerGroupInput struct { // Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand // Instances in the game server group. Method options include the following: // - // * + // * // SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot // Instances are unavailable or not viable for game hosting, the game server group // provides no hosting capacity until Spot Instances can again be used. Until then, // no new instances are started, and the existing nonviable Spot Instances are // terminated (after current gameplay ends) and are not replaced. // - // * - // SPOT_PREFERRED - (default value) Spot Instances are used whenever available in - // the game server group. If Spot Instances are unavailable, the game server group - // continues to provide hosting capacity by falling back to On-Demand Instances. - // Existing nonviable Spot Instances are terminated (after current gameplay ends) - // and are replaced with new On-Demand Instances. + // * SPOT_PREFERRED + // - (default value) Spot Instances are used whenever available in the game server + // group. If Spot Instances are unavailable, the game server group continues to + // provide hosting capacity by falling back to On-Demand Instances. Existing + // nonviable Spot Instances are terminated (after current gameplay ends) and are + // replaced with new On-Demand Instances. // - // * ON_DEMAND_ONLY - Only - // On-Demand Instances are used in the game server group. No Spot Instances are - // used, even when available, while this balancing strategy is in force. + // * ON_DEMAND_ONLY - Only On-Demand + // Instances are used in the game server group. No Spot Instances are used, even + // when available, while this balancing strategy is in force. BalancingStrategy types.BalancingStrategy // A flag that indicates whether instances in the game server group are protected diff --git a/service/gamelift/api_op_CreateGameSession.go b/service/gamelift/api_op_CreateGameSession.go index 2bb7d58fe97..f2e08468ff8 100644 --- a/service/gamelift/api_op_CreateGameSession.go +++ b/service/gamelift/api_op_CreateGameSession.go @@ -34,29 +34,28 @@ import ( // GetGameSessionLogUrl to download the log files. Available in Amazon GameLift // Local. // -// * CreateGameSession +// * CreateGameSession // -// * DescribeGameSessions +// * DescribeGameSessions // -// * +// * // DescribeGameSessionDetails // -// * SearchGameSessions +// * SearchGameSessions // -// * UpdateGameSession +// * UpdateGameSession // +// * +// GetGameSessionLogUrl // -// * GetGameSessionLogUrl +// * Game session placements // -// * Game session placements +// * StartGameSessionPlacement // -// * -// StartGameSessionPlacement +// * +// DescribeGameSessionPlacement // -// * DescribeGameSessionPlacement -// -// * -// StopGameSessionPlacement +// * StopGameSessionPlacement func (c *Client) CreateGameSession(ctx context.Context, params *CreateGameSessionInput, optFns ...func(*Options)) (*CreateGameSessionOutput, error) { if params == nil { params = &CreateGameSessionInput{} diff --git a/service/gamelift/api_op_CreateGameSessionQueue.go b/service/gamelift/api_op_CreateGameSessionQueue.go index 317762c2f52..fef07f2468c 100644 --- a/service/gamelift/api_op_CreateGameSessionQueue.go +++ b/service/gamelift/api_op_CreateGameSessionQueue.go @@ -42,15 +42,14 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-creating.html) // Related operations // -// * CreateGameSessionQueue +// * CreateGameSessionQueue // -// * -// DescribeGameSessionQueues +// * DescribeGameSessionQueues // -// * UpdateGameSessionQueue +// * +// UpdateGameSessionQueue // -// * -// DeleteGameSessionQueue +// * DeleteGameSessionQueue func (c *Client) CreateGameSessionQueue(ctx context.Context, params *CreateGameSessionQueueInput, optFns ...func(*Options)) (*CreateGameSessionQueueOutput, error) { if params == nil { params = &CreateGameSessionQueueInput{} diff --git a/service/gamelift/api_op_CreateMatchmakingConfiguration.go b/service/gamelift/api_op_CreateMatchmakingConfiguration.go index 81f637d8b07..2530283f1ea 100644 --- a/service/gamelift/api_op_CreateMatchmakingConfiguration.go +++ b/service/gamelift/api_op_CreateMatchmakingConfiguration.go @@ -31,24 +31,24 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) // Related operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) CreateMatchmakingConfiguration(ctx context.Context, params *CreateMatchmakingConfigurationInput, optFns ...func(*Options)) (*CreateMatchmakingConfigurationOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_CreateMatchmakingRuleSet.go b/service/gamelift/api_op_CreateMatchmakingRuleSet.go index 5f38d32c2d7..df2717d1e30 100644 --- a/service/gamelift/api_op_CreateMatchmakingRuleSet.go +++ b/service/gamelift/api_op_CreateMatchmakingRuleSet.go @@ -21,38 +21,38 @@ import ( // it is a good idea to check the rule set syntax using ValidateMatchmakingRuleSet // before creating a new rule set. Learn more // -// * Build a Rule Set +// * Build a Rule Set // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) // -// -// * Design a Matchmaker +// * +// Design a Matchmaker // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) // -// -// * Matchmaking with FlexMatch +// * +// Matchmaking with FlexMatch // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-intro.html) // // Related // operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) CreateMatchmakingRuleSet(ctx context.Context, params *CreateMatchmakingRuleSetInput, optFns ...func(*Options)) (*CreateMatchmakingRuleSetOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_CreatePlayerSession.go b/service/gamelift/api_op_CreatePlayerSession.go index d7a37f01f66..c454dc9f9a0 100644 --- a/service/gamelift/api_op_CreatePlayerSession.go +++ b/service/gamelift/api_op_CreatePlayerSession.go @@ -22,22 +22,22 @@ import ( // player and a new PlayerSession object is returned. Player sessions cannot be // updated. Available in Amazon GameLift Local. // -// * CreatePlayerSession +// * CreatePlayerSession // -// * +// * // CreatePlayerSessions // -// * DescribePlayerSessions +// * DescribePlayerSessions // -// * Game session -// placements +// * Game session placements // -// * StartGameSessionPlacement +// * +// StartGameSessionPlacement // -// * -// DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * StopGameSessionPlacement +// * +// StopGameSessionPlacement func (c *Client) CreatePlayerSession(ctx context.Context, params *CreatePlayerSessionInput, optFns ...func(*Options)) (*CreatePlayerSessionOutput, error) { if params == nil { params = &CreatePlayerSessionInput{} diff --git a/service/gamelift/api_op_CreatePlayerSessions.go b/service/gamelift/api_op_CreatePlayerSessions.go index af760ba63ea..e007e931c8c 100644 --- a/service/gamelift/api_op_CreatePlayerSessions.go +++ b/service/gamelift/api_op_CreatePlayerSessions.go @@ -22,22 +22,22 @@ import ( // session for each player and a set of new PlayerSession objects is returned. // Player sessions cannot be updated. Available in Amazon GameLift Local. // -// * +// * // CreatePlayerSession // -// * CreatePlayerSessions +// * CreatePlayerSessions // -// * DescribePlayerSessions +// * DescribePlayerSessions // +// * Game +// session placements // -// * Game session placements +// * StartGameSessionPlacement // -// * StartGameSessionPlacement -// -// * +// * // DescribeGameSessionPlacement // -// * StopGameSessionPlacement +// * StopGameSessionPlacement func (c *Client) CreatePlayerSessions(ctx context.Context, params *CreatePlayerSessionsInput, optFns ...func(*Options)) (*CreatePlayerSessionsOutput, error) { if params == nil { params = &CreatePlayerSessionsInput{} diff --git a/service/gamelift/api_op_CreateScript.go b/service/gamelift/api_op_CreateScript.go index dca70a81010..77fefadddb0 100644 --- a/service/gamelift/api_op_CreateScript.go +++ b/service/gamelift/api_op_CreateScript.go @@ -19,12 +19,12 @@ import ( // script file(s). The script files and all dependencies must be zipped into a // single file. You can pull the zip file from either of these locations: // -// * A +// * A // locally available directory. Use the ZipFile parameter for this option. // -// * -// An Amazon Simple Storage Service (Amazon S3) bucket under your AWS account. Use -// the StorageLocation parameter for this option. You'll need to have an Identity +// * An +// Amazon Simple Storage Service (Amazon S3) bucket under your AWS account. Use the +// StorageLocation parameter for this option. You'll need to have an Identity // Access Management (IAM) role that allows the Amazon GameLift service to access // your S3 bucket. // @@ -39,16 +39,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/setting-up-role.html) // Related operations // -// * CreateScript +// * CreateScript // -// * ListScripts +// * ListScripts // -// * -// DescribeScript +// * DescribeScript // -// * UpdateScript +// * +// UpdateScript // -// * DeleteScript +// * DeleteScript func (c *Client) CreateScript(ctx context.Context, params *CreateScriptInput, optFns ...func(*Options)) (*CreateScriptOutput, error) { if params == nil { params = &CreateScriptInput{} diff --git a/service/gamelift/api_op_CreateVpcPeeringAuthorization.go b/service/gamelift/api_op_CreateVpcPeeringAuthorization.go index 7bd37c3b688..7fa3145bcfe 100644 --- a/service/gamelift/api_op_CreateVpcPeeringAuthorization.go +++ b/service/gamelift/api_op_CreateVpcPeeringAuthorization.go @@ -35,20 +35,20 @@ import ( // is canceled by a call to DeleteVpcPeeringAuthorization. You must create or // delete the peering connection while the authorization is valid. // -// * +// * // CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection func (c *Client) CreateVpcPeeringAuthorization(ctx context.Context, params *CreateVpcPeeringAuthorizationInput, optFns ...func(*Options)) (*CreateVpcPeeringAuthorizationOutput, error) { if params == nil { params = &CreateVpcPeeringAuthorizationInput{} diff --git a/service/gamelift/api_op_CreateVpcPeeringConnection.go b/service/gamelift/api_op_CreateVpcPeeringConnection.go index 7c2cd6ca6b4..0fa0041db42 100644 --- a/service/gamelift/api_op_CreateVpcPeeringConnection.go +++ b/service/gamelift/api_op_CreateVpcPeeringConnection.go @@ -32,20 +32,20 @@ import ( // the request's status using DescribeVpcPeeringConnections, or by monitoring fleet // events for success or failure using DescribeFleetEvents. // -// * +// * // CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection func (c *Client) CreateVpcPeeringConnection(ctx context.Context, params *CreateVpcPeeringConnectionInput, optFns ...func(*Options)) (*CreateVpcPeeringConnectionOutput, error) { if params == nil { params = &CreateVpcPeeringConnectionInput{} diff --git a/service/gamelift/api_op_DeleteAlias.go b/service/gamelift/api_op_DeleteAlias.go index f537c204cd1..a6211d76d43 100644 --- a/service/gamelift/api_op_DeleteAlias.go +++ b/service/gamelift/api_op_DeleteAlias.go @@ -14,19 +14,18 @@ import ( // attempting to access a server process using the deleted alias receive an error. // To delete an alias, specify the alias ID to be deleted. // -// * CreateAlias +// * CreateAlias // +// * +// ListAliases // -// * ListAliases +// * DescribeAlias // -// * DescribeAlias +// * UpdateAlias // -// * UpdateAlias +// * DeleteAlias // -// * DeleteAlias -// -// * -// ResolveAlias +// * ResolveAlias func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) { if params == nil { params = &DeleteAliasInput{} diff --git a/service/gamelift/api_op_DeleteBuild.go b/service/gamelift/api_op_DeleteBuild.go index 496002ea0e7..039277953be 100644 --- a/service/gamelift/api_op_DeleteBuild.go +++ b/service/gamelift/api_op_DeleteBuild.go @@ -18,16 +18,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // Related operations // -// * CreateBuild +// * CreateBuild // -// * ListBuilds +// * ListBuilds // -// * DescribeBuild +// * DescribeBuild // +// * +// UpdateBuild // -// * UpdateBuild -// -// * DeleteBuild +// * DeleteBuild func (c *Client) DeleteBuild(ctx context.Context, params *DeleteBuildInput, optFns ...func(*Options)) (*DeleteBuildOutput, error) { if params == nil { params = &DeleteBuildInput{} diff --git a/service/gamelift/api_op_DeleteFleet.go b/service/gamelift/api_op_DeleteFleet.go index 4c919424510..d4062413a59 100644 --- a/service/gamelift/api_op_DeleteFleet.go +++ b/service/gamelift/api_op_DeleteFleet.go @@ -21,19 +21,19 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * UpdateFleetAttributes // -// * UpdateFleetAttributes -// -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DeleteFleet(ctx context.Context, params *DeleteFleetInput, optFns ...func(*Options)) (*DeleteFleetOutput, error) { if params == nil { params = &DeleteFleetInput{} diff --git a/service/gamelift/api_op_DeleteGameServerGroup.go b/service/gamelift/api_op_DeleteGameServerGroup.go index a5b8e15453a..7327d2eada2 100644 --- a/service/gamelift/api_op_DeleteGameServerGroup.go +++ b/service/gamelift/api_op_DeleteGameServerGroup.go @@ -17,45 +17,44 @@ import ( // deleting the game server group. Depending on the type of delete operation // selected, this operation might affect these resources: // -// * The game server +// * The game server // group // -// * The corresponding Auto Scaling group +// * The corresponding Auto Scaling group // -// * All game servers that -// are currently running in the group +// * All game servers that are +// currently running in the group // -// To delete a game server group, identify the -// game server group to delete and specify the type of delete operation to -// initiate. Game server groups can only be deleted if they are in ACTIVE or ERROR -// status. If the delete request is successful, a series of operations are kicked -// off. The game server group status is changed to DELETE_SCHEDULED, which prevents -// new game servers from being registered and stops automatic scaling activity. -// Once all game servers in the game server group are deregistered, GameLift -// FleetIQ can begin deleting resources. If any of the delete operations fail, the -// game server group is placed in ERROR status. GameLift FleetIQ emits delete -// events to Amazon CloudWatch. Learn more GameLift FleetIQ Guide +// To delete a game server group, identify the game +// server group to delete and specify the type of delete operation to initiate. +// Game server groups can only be deleted if they are in ACTIVE or ERROR status. If +// the delete request is successful, a series of operations are kicked off. The +// game server group status is changed to DELETE_SCHEDULED, which prevents new game +// servers from being registered and stops automatic scaling activity. Once all +// game servers in the game server group are deregistered, GameLift FleetIQ can +// begin deleting resources. If any of the delete operations fail, the game server +// group is placed in ERROR status. GameLift FleetIQ emits delete events to Amazon +// CloudWatch. Learn more GameLift FleetIQ Guide // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) DeleteGameServerGroup(ctx context.Context, params *DeleteGameServerGroupInput, optFns ...func(*Options)) (*DeleteGameServerGroupOutput, error) { if params == nil { params = &DeleteGameServerGroupInput{} @@ -81,16 +80,16 @@ type DeleteGameServerGroupInput struct { // The type of delete to perform. Options include the following: // - // * SAFE_DELETE - // – Terminates the game server group and EC2 Auto Scaling group only when it has - // no game servers that are in UTILIZED status. + // * SAFE_DELETE – + // Terminates the game server group and EC2 Auto Scaling group only when it has no + // game servers that are in UTILIZED status. // - // * FORCE_DELETE – Terminates - // the game server group, including all active game servers regardless of their - // utilization status, and the EC2 Auto Scaling group. + // * FORCE_DELETE – Terminates the game + // server group, including all active game servers regardless of their utilization + // status, and the EC2 Auto Scaling group. // - // * RETAIN – Does a safe - // delete of the game server group but retains the EC2 Auto Scaling group as is. + // * RETAIN – Does a safe delete of the + // game server group but retains the EC2 Auto Scaling group as is. DeleteOption types.GameServerGroupDeleteOption } diff --git a/service/gamelift/api_op_DeleteGameSessionQueue.go b/service/gamelift/api_op_DeleteGameSessionQueue.go index 63b209467b4..aa47d288e21 100644 --- a/service/gamelift/api_op_DeleteGameSessionQueue.go +++ b/service/gamelift/api_op_DeleteGameSessionQueue.go @@ -16,15 +16,14 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html) // Related operations // -// * CreateGameSessionQueue +// * CreateGameSessionQueue // -// * -// DescribeGameSessionQueues +// * DescribeGameSessionQueues // -// * UpdateGameSessionQueue +// * +// UpdateGameSessionQueue // -// * -// DeleteGameSessionQueue +// * DeleteGameSessionQueue func (c *Client) DeleteGameSessionQueue(ctx context.Context, params *DeleteGameSessionQueueInput, optFns ...func(*Options)) (*DeleteGameSessionQueueOutput, error) { if params == nil { params = &DeleteGameSessionQueueInput{} diff --git a/service/gamelift/api_op_DeleteMatchmakingConfiguration.go b/service/gamelift/api_op_DeleteMatchmakingConfiguration.go index 49271e69cd6..56252b244d9 100644 --- a/service/gamelift/api_op_DeleteMatchmakingConfiguration.go +++ b/service/gamelift/api_op_DeleteMatchmakingConfiguration.go @@ -14,25 +14,25 @@ import ( // the configuration name. A matchmaking configuration cannot be deleted if it is // being used in any active matchmaking tickets. Related operations // -// * +// * // CreateMatchmakingConfiguration // -// * DescribeMatchmakingConfigurations +// * DescribeMatchmakingConfigurations // -// * +// * // UpdateMatchmakingConfiguration // -// * DeleteMatchmakingConfiguration +// * DeleteMatchmakingConfiguration // -// * +// * // CreateMatchmakingRuleSet // -// * DescribeMatchmakingRuleSets +// * DescribeMatchmakingRuleSets // -// * +// * // ValidateMatchmakingRuleSet // -// * DeleteMatchmakingRuleSet +// * DeleteMatchmakingRuleSet func (c *Client) DeleteMatchmakingConfiguration(ctx context.Context, params *DeleteMatchmakingConfigurationInput, optFns ...func(*Options)) (*DeleteMatchmakingConfigurationOutput, error) { if params == nil { params = &DeleteMatchmakingConfigurationInput{} diff --git a/service/gamelift/api_op_DeleteMatchmakingRuleSet.go b/service/gamelift/api_op_DeleteMatchmakingRuleSet.go index ccaf7b81ef1..9a87c9adb07 100644 --- a/service/gamelift/api_op_DeleteMatchmakingRuleSet.go +++ b/service/gamelift/api_op_DeleteMatchmakingRuleSet.go @@ -14,30 +14,30 @@ import ( // rule set name. Rule sets cannot be deleted if they are currently being used by a // matchmaking configuration. Learn more // -// * Build a Rule Set +// * Build a Rule Set // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) // // Related // operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) DeleteMatchmakingRuleSet(ctx context.Context, params *DeleteMatchmakingRuleSetInput, optFns ...func(*Options)) (*DeleteMatchmakingRuleSetOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_DeleteScalingPolicy.go b/service/gamelift/api_op_DeleteScalingPolicy.go index ec038345291..7ac44bbe1fb 100644 --- a/service/gamelift/api_op_DeleteScalingPolicy.go +++ b/service/gamelift/api_op_DeleteScalingPolicy.go @@ -16,28 +16,26 @@ import ( // suspend scaling policies, call StopFleetActions. This operation suspends all // policies for the fleet. // -// * DescribeFleetCapacity +// * DescribeFleetCapacity // -// * -// UpdateFleetCapacity +// * UpdateFleetCapacity // -// * DescribeEC2InstanceLimits +// * +// DescribeEC2InstanceLimits // -// * Manage scaling -// policies: +// * Manage scaling policies: // -// * PutScalingPolicy (auto-scaling) +// * PutScalingPolicy +// (auto-scaling) // -// * -// DescribeScalingPolicies (auto-scaling) +// * DescribeScalingPolicies (auto-scaling) // -// * DeleteScalingPolicy +// * DeleteScalingPolicy // (auto-scaling) // -// * Manage fleet actions: -// -// * StartFleetActions +// * Manage fleet actions: // +// * StartFleetActions // // * StopFleetActions func (c *Client) DeleteScalingPolicy(ctx context.Context, params *DeleteScalingPolicyInput, optFns ...func(*Options)) (*DeleteScalingPolicyOutput, error) { diff --git a/service/gamelift/api_op_DeleteScript.go b/service/gamelift/api_op_DeleteScript.go index d10e3d0adf9..89c3e2f9253 100644 --- a/service/gamelift/api_op_DeleteScript.go +++ b/service/gamelift/api_op_DeleteScript.go @@ -20,16 +20,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html) // Related operations // -// * CreateScript +// * CreateScript // -// * ListScripts +// * ListScripts // -// * -// DescribeScript +// * DescribeScript // -// * UpdateScript +// * +// UpdateScript // -// * DeleteScript +// * DeleteScript func (c *Client) DeleteScript(ctx context.Context, params *DeleteScriptInput, optFns ...func(*Options)) (*DeleteScriptOutput, error) { if params == nil { params = &DeleteScriptInput{} diff --git a/service/gamelift/api_op_DeleteVpcPeeringAuthorization.go b/service/gamelift/api_op_DeleteVpcPeeringAuthorization.go index 3e185b980a9..bc3d3407f97 100644 --- a/service/gamelift/api_op_DeleteVpcPeeringAuthorization.go +++ b/service/gamelift/api_op_DeleteVpcPeeringAuthorization.go @@ -11,22 +11,23 @@ import ( ) // Cancels a pending VPC peering authorization for the specified VPC. If you need -// to delete an existing VPC peering connection, call DeleteVpcPeeringConnection. -// +// to delete an existing VPC peering connection, call +// DeleteVpcPeeringConnection. // // * CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * +// DescribeVpcPeeringAuthorizations // -// * -// DeleteVpcPeeringAuthorization +// * DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * +// CreateVpcPeeringConnection // -// * -// DescribeVpcPeeringConnections +// * DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * +// DeleteVpcPeeringConnection func (c *Client) DeleteVpcPeeringAuthorization(ctx context.Context, params *DeleteVpcPeeringAuthorizationInput, optFns ...func(*Options)) (*DeleteVpcPeeringAuthorizationOutput, error) { if params == nil { params = &DeleteVpcPeeringAuthorizationInput{} diff --git a/service/gamelift/api_op_DeleteVpcPeeringConnection.go b/service/gamelift/api_op_DeleteVpcPeeringConnection.go index 7cd15f5b44a..e172f3777eb 100644 --- a/service/gamelift/api_op_DeleteVpcPeeringConnection.go +++ b/service/gamelift/api_op_DeleteVpcPeeringConnection.go @@ -18,20 +18,20 @@ import ( // manage the Amazon GameLift fleets. Identify the connection to delete by the // connection ID and fleet ID. If successful, the connection is removed. // -// * +// * // CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection func (c *Client) DeleteVpcPeeringConnection(ctx context.Context, params *DeleteVpcPeeringConnectionInput, optFns ...func(*Options)) (*DeleteVpcPeeringConnectionOutput, error) { if params == nil { params = &DeleteVpcPeeringConnectionInput{} diff --git a/service/gamelift/api_op_DeregisterGameServer.go b/service/gamelift/api_op_DeregisterGameServer.go index 0115877e5d8..0742ef4d692 100644 --- a/service/gamelift/api_op_DeregisterGameServer.go +++ b/service/gamelift/api_op_DeregisterGameServer.go @@ -20,18 +20,18 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * RegisterGameServer +// * RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * +// * // ClaimGameServer // -// * DescribeGameServer +// * DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * +// * // DeregisterGameServer func (c *Client) DeregisterGameServer(ctx context.Context, params *DeregisterGameServerInput, optFns ...func(*Options)) (*DeregisterGameServerOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_DescribeAlias.go b/service/gamelift/api_op_DescribeAlias.go index f5e6aec433c..669c3c3b3eb 100644 --- a/service/gamelift/api_op_DescribeAlias.go +++ b/service/gamelift/api_op_DescribeAlias.go @@ -16,18 +16,18 @@ import ( // properties, specify the alias ID. If successful, the requested alias record is // returned. // -// * CreateAlias +// * CreateAlias // -// * ListAliases +// * ListAliases // -// * DescribeAlias +// * DescribeAlias // -// * -// UpdateAlias +// * UpdateAlias // -// * DeleteAlias +// * +// DeleteAlias // -// * ResolveAlias +// * ResolveAlias func (c *Client) DescribeAlias(ctx context.Context, params *DescribeAliasInput, optFns ...func(*Options)) (*DescribeAliasOutput, error) { if params == nil { params = &DescribeAliasInput{} diff --git a/service/gamelift/api_op_DescribeBuild.go b/service/gamelift/api_op_DescribeBuild.go index 76868819af7..b4b59b869e5 100644 --- a/service/gamelift/api_op_DescribeBuild.go +++ b/service/gamelift/api_op_DescribeBuild.go @@ -17,16 +17,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // Related operations // -// * CreateBuild +// * CreateBuild // -// * ListBuilds +// * ListBuilds // -// * DescribeBuild +// * DescribeBuild // +// * +// UpdateBuild // -// * UpdateBuild -// -// * DeleteBuild +// * DeleteBuild func (c *Client) DescribeBuild(ctx context.Context, params *DescribeBuildInput, optFns ...func(*Options)) (*DescribeBuildOutput, error) { if params == nil { params = &DescribeBuildInput{} diff --git a/service/gamelift/api_op_DescribeEC2InstanceLimits.go b/service/gamelift/api_op_DescribeEC2InstanceLimits.go index f3aaa5a10f0..143747edadf 100644 --- a/service/gamelift/api_op_DescribeEC2InstanceLimits.go +++ b/service/gamelift/api_op_DescribeEC2InstanceLimits.go @@ -13,11 +13,11 @@ import ( // Retrieves the following information for the specified EC2 instance type: // -// * +// * // Maximum number of instances allowed per AWS account (service limit). // -// * -// Current usage for the AWS account. +// * Current +// usage for the AWS account. // // To learn more about the capabilities of each // instance type, see Amazon EC2 Instance Types @@ -26,19 +26,19 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * UpdateFleetAttributes // -// * UpdateFleetAttributes -// -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeEC2InstanceLimits(ctx context.Context, params *DescribeEC2InstanceLimitsInput, optFns ...func(*Options)) (*DescribeEC2InstanceLimitsOutput, error) { if params == nil { params = &DescribeEC2InstanceLimitsInput{} diff --git a/service/gamelift/api_op_DescribeFleetAttributes.go b/service/gamelift/api_op_DescribeFleetAttributes.go index 14b62faaff6..8a655375834 100644 --- a/service/gamelift/api_op_DescribeFleetAttributes.go +++ b/service/gamelift/api_op_DescribeFleetAttributes.go @@ -24,36 +24,36 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * Describe +// fleets: // -// * Describe fleets: +// * DescribeFleetAttributes // -// * DescribeFleetAttributes +// * DescribeFleetCapacity // -// * -// DescribeFleetCapacity +// * +// DescribeFleetPortSettings // -// * DescribeFleetPortSettings +// * DescribeFleetUtilization // -// * -// DescribeFleetUtilization +// * +// DescribeRuntimeConfiguration // -// * DescribeRuntimeConfiguration +// * DescribeEC2InstanceLimits // -// * -// DescribeEC2InstanceLimits +// * +// DescribeFleetEvents // -// * DescribeFleetEvents +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes -// -// * StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeFleetAttributes(ctx context.Context, params *DescribeFleetAttributesInput, optFns ...func(*Options)) (*DescribeFleetAttributesOutput, error) { if params == nil { params = &DescribeFleetAttributesInput{} diff --git a/service/gamelift/api_op_DescribeFleetCapacity.go b/service/gamelift/api_op_DescribeFleetCapacity.go index 513a2227e08..23f329cddcd 100644 --- a/service/gamelift/api_op_DescribeFleetCapacity.go +++ b/service/gamelift/api_op_DescribeFleetCapacity.go @@ -28,36 +28,36 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * Describe +// fleets: // -// * Describe fleets: +// * DescribeFleetAttributes // -// * DescribeFleetAttributes +// * DescribeFleetCapacity // -// * -// DescribeFleetCapacity +// * +// DescribeFleetPortSettings // -// * DescribeFleetPortSettings +// * DescribeFleetUtilization // -// * -// DescribeFleetUtilization +// * +// DescribeRuntimeConfiguration // -// * DescribeRuntimeConfiguration +// * DescribeEC2InstanceLimits // -// * -// DescribeEC2InstanceLimits +// * +// DescribeFleetEvents // -// * DescribeFleetEvents +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes -// -// * StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeFleetCapacity(ctx context.Context, params *DescribeFleetCapacityInput, optFns ...func(*Options)) (*DescribeFleetCapacityOutput, error) { if params == nil { params = &DescribeFleetCapacityInput{} diff --git a/service/gamelift/api_op_DescribeFleetEvents.go b/service/gamelift/api_op_DescribeFleetEvents.go index e7057280e64..ad4b2164542 100644 --- a/service/gamelift/api_op_DescribeFleetEvents.go +++ b/service/gamelift/api_op_DescribeFleetEvents.go @@ -19,36 +19,36 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * Describe +// fleets: // -// * Describe fleets: +// * DescribeFleetAttributes // -// * DescribeFleetAttributes +// * DescribeFleetCapacity // -// * -// DescribeFleetCapacity +// * +// DescribeFleetPortSettings // -// * DescribeFleetPortSettings +// * DescribeFleetUtilization // -// * -// DescribeFleetUtilization +// * +// DescribeRuntimeConfiguration // -// * DescribeRuntimeConfiguration +// * DescribeEC2InstanceLimits // -// * -// DescribeEC2InstanceLimits +// * +// DescribeFleetEvents // -// * DescribeFleetEvents +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes -// -// * StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeFleetEvents(ctx context.Context, params *DescribeFleetEventsInput, optFns ...func(*Options)) (*DescribeFleetEventsOutput, error) { if params == nil { params = &DescribeFleetEventsInput{} diff --git a/service/gamelift/api_op_DescribeFleetPortSettings.go b/service/gamelift/api_op_DescribeFleetPortSettings.go index 86451b1951e..4cef5e8f48b 100644 --- a/service/gamelift/api_op_DescribeFleetPortSettings.go +++ b/service/gamelift/api_op_DescribeFleetPortSettings.go @@ -22,36 +22,36 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * Describe +// fleets: // -// * Describe fleets: +// * DescribeFleetAttributes // -// * DescribeFleetAttributes +// * DescribeFleetCapacity // -// * -// DescribeFleetCapacity +// * +// DescribeFleetPortSettings // -// * DescribeFleetPortSettings +// * DescribeFleetUtilization // -// * -// DescribeFleetUtilization +// * +// DescribeRuntimeConfiguration // -// * DescribeRuntimeConfiguration +// * DescribeEC2InstanceLimits // -// * -// DescribeEC2InstanceLimits +// * +// DescribeFleetEvents // -// * DescribeFleetEvents +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes -// -// * StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeFleetPortSettings(ctx context.Context, params *DescribeFleetPortSettingsInput, optFns ...func(*Options)) (*DescribeFleetPortSettingsOutput, error) { if params == nil { params = &DescribeFleetPortSettingsInput{} diff --git a/service/gamelift/api_op_DescribeFleetUtilization.go b/service/gamelift/api_op_DescribeFleetUtilization.go index 9d178c08b41..c21457aa408 100644 --- a/service/gamelift/api_op_DescribeFleetUtilization.go +++ b/service/gamelift/api_op_DescribeFleetUtilization.go @@ -26,36 +26,36 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * Describe +// fleets: // -// * Describe fleets: +// * DescribeFleetAttributes // -// * DescribeFleetAttributes +// * DescribeFleetCapacity // -// * -// DescribeFleetCapacity +// * +// DescribeFleetPortSettings // -// * DescribeFleetPortSettings +// * DescribeFleetUtilization // -// * -// DescribeFleetUtilization +// * +// DescribeRuntimeConfiguration // -// * DescribeRuntimeConfiguration +// * DescribeEC2InstanceLimits // -// * -// DescribeEC2InstanceLimits +// * +// DescribeFleetEvents // -// * DescribeFleetEvents +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes -// -// * StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeFleetUtilization(ctx context.Context, params *DescribeFleetUtilizationInput, optFns ...func(*Options)) (*DescribeFleetUtilizationOutput, error) { if params == nil { params = &DescribeFleetUtilizationInput{} diff --git a/service/gamelift/api_op_DescribeGameServer.go b/service/gamelift/api_op_DescribeGameServer.go index c4c379f0263..9495082df6e 100644 --- a/service/gamelift/api_op_DescribeGameServer.go +++ b/service/gamelift/api_op_DescribeGameServer.go @@ -20,18 +20,18 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * RegisterGameServer +// * RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * +// * // ClaimGameServer // -// * DescribeGameServer +// * DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * +// * // DeregisterGameServer func (c *Client) DescribeGameServer(ctx context.Context, params *DescribeGameServerInput, optFns ...func(*Options)) (*DescribeGameServerOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_DescribeGameServerGroup.go b/service/gamelift/api_op_DescribeGameServerGroup.go index fabb74ce858..6a34ab98876 100644 --- a/service/gamelift/api_op_DescribeGameServerGroup.go +++ b/service/gamelift/api_op_DescribeGameServerGroup.go @@ -22,24 +22,23 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) DescribeGameServerGroup(ctx context.Context, params *DescribeGameServerGroupInput, optFns ...func(*Options)) (*DescribeGameServerGroupOutput, error) { if params == nil { params = &DescribeGameServerGroupInput{} diff --git a/service/gamelift/api_op_DescribeGameServerInstances.go b/service/gamelift/api_op_DescribeGameServerInstances.go index ae33620ec16..e9513d962aa 100644 --- a/service/gamelift/api_op_DescribeGameServerInstances.go +++ b/service/gamelift/api_op_DescribeGameServerInstances.go @@ -28,24 +28,23 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) DescribeGameServerInstances(ctx context.Context, params *DescribeGameServerInstancesInput, optFns ...func(*Options)) (*DescribeGameServerInstancesOutput, error) { if params == nil { params = &DescribeGameServerInstancesInput{} diff --git a/service/gamelift/api_op_DescribeGameSessionDetails.go b/service/gamelift/api_op_DescribeGameSessionDetails.go index 45d1f9ae871..e7ed7d6e719 100644 --- a/service/gamelift/api_op_DescribeGameSessionDetails.go +++ b/service/gamelift/api_op_DescribeGameSessionDetails.go @@ -21,29 +21,29 @@ import ( // results as a set of sequential pages. If successful, a GameSessionDetail object // is returned for each session matching the request. // -// * CreateGameSession +// * CreateGameSession // +// * +// DescribeGameSessions // -// * DescribeGameSessions +// * DescribeGameSessionDetails // -// * DescribeGameSessionDetails +// * SearchGameSessions // -// * -// SearchGameSessions +// * +// UpdateGameSession // -// * UpdateGameSession +// * GetGameSessionLogUrl // -// * GetGameSessionLogUrl +// * Game session placements // -// * -// Game session placements +// * +// StartGameSessionPlacement // -// * StartGameSessionPlacement +// * DescribeGameSessionPlacement // -// * -// DescribeGameSessionPlacement -// -// * StopGameSessionPlacement +// * +// StopGameSessionPlacement func (c *Client) DescribeGameSessionDetails(ctx context.Context, params *DescribeGameSessionDetailsInput, optFns ...func(*Options)) (*DescribeGameSessionDetailsOutput, error) { if params == nil { params = &DescribeGameSessionDetailsInput{} diff --git a/service/gamelift/api_op_DescribeGameSessionPlacement.go b/service/gamelift/api_op_DescribeGameSessionPlacement.go index ddd1ff9533b..c27deb8fb4c 100644 --- a/service/gamelift/api_op_DescribeGameSessionPlacement.go +++ b/service/gamelift/api_op_DescribeGameSessionPlacement.go @@ -15,29 +15,29 @@ import ( // get game session placement details, specify the placement ID. If successful, a // GameSessionPlacement object is returned. // -// * CreateGameSession +// * CreateGameSession // -// * +// * // DescribeGameSessions // -// * DescribeGameSessionDetails +// * DescribeGameSessionDetails // -// * -// SearchGameSessions +// * SearchGameSessions // -// * UpdateGameSession +// * +// UpdateGameSession // -// * GetGameSessionLogUrl +// * GetGameSessionLogUrl // -// * -// Game session placements +// * Game session placements // -// * StartGameSessionPlacement +// * +// StartGameSessionPlacement // -// * -// DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * StopGameSessionPlacement +// * +// StopGameSessionPlacement func (c *Client) DescribeGameSessionPlacement(ctx context.Context, params *DescribeGameSessionPlacementInput, optFns ...func(*Options)) (*DescribeGameSessionPlacementOutput, error) { if params == nil { params = &DescribeGameSessionPlacementInput{} diff --git a/service/gamelift/api_op_DescribeGameSessionQueues.go b/service/gamelift/api_op_DescribeGameSessionQueues.go index 7e648df7b84..e2c593fdacd 100644 --- a/service/gamelift/api_op_DescribeGameSessionQueues.go +++ b/service/gamelift/api_op_DescribeGameSessionQueues.go @@ -19,15 +19,14 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-console.html) // Related operations // -// * CreateGameSessionQueue +// * CreateGameSessionQueue // -// * -// DescribeGameSessionQueues +// * DescribeGameSessionQueues // -// * UpdateGameSessionQueue +// * +// UpdateGameSessionQueue // -// * -// DeleteGameSessionQueue +// * DeleteGameSessionQueue func (c *Client) DescribeGameSessionQueues(ctx context.Context, params *DescribeGameSessionQueuesInput, optFns ...func(*Options)) (*DescribeGameSessionQueuesOutput, error) { if params == nil { params = &DescribeGameSessionQueuesInput{} diff --git a/service/gamelift/api_op_DescribeGameSessions.go b/service/gamelift/api_op_DescribeGameSessions.go index 0354c98acbd..f52c46874e8 100644 --- a/service/gamelift/api_op_DescribeGameSessions.go +++ b/service/gamelift/api_op_DescribeGameSessions.go @@ -21,29 +21,28 @@ import ( // sequential pages. If successful, a GameSession object is returned for each game // session matching the request. Available in Amazon GameLift Local. // -// * +// * // CreateGameSession // -// * DescribeGameSessions +// * DescribeGameSessions // -// * -// DescribeGameSessionDetails +// * DescribeGameSessionDetails // -// * SearchGameSessions -// -// * UpdateGameSession +// * +// SearchGameSessions // +// * UpdateGameSession // // * GetGameSessionLogUrl // -// * Game session placements +// * Game session +// placements // -// * -// StartGameSessionPlacement +// * StartGameSessionPlacement // -// * DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * +// * // StopGameSessionPlacement func (c *Client) DescribeGameSessions(ctx context.Context, params *DescribeGameSessionsInput, optFns ...func(*Options)) (*DescribeGameSessionsOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_DescribeInstances.go b/service/gamelift/api_op_DescribeInstances.go index f81f57554fb..0346d91665a 100644 --- a/service/gamelift/api_op_DescribeInstances.go +++ b/service/gamelift/api_op_DescribeInstances.go @@ -23,9 +23,9 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html) // Related operations // -// * DescribeInstances +// * DescribeInstances // -// * GetInstanceAccess +// * GetInstanceAccess func (c *Client) DescribeInstances(ctx context.Context, params *DescribeInstancesInput, optFns ...func(*Options)) (*DescribeInstancesOutput, error) { if params == nil { params = &DescribeInstancesInput{} diff --git a/service/gamelift/api_op_DescribeMatchmaking.go b/service/gamelift/api_op_DescribeMatchmaking.go index 0a9c75c7988..a1bc0519e81 100644 --- a/service/gamelift/api_op_DescribeMatchmaking.go +++ b/service/gamelift/api_op_DescribeMatchmaking.go @@ -28,16 +28,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) // Related operations // -// * StartMatchmaking +// * StartMatchmaking // -// * DescribeMatchmaking +// * DescribeMatchmaking // -// * +// * // StopMatchmaking // -// * AcceptMatch +// * AcceptMatch // -// * StartMatchBackfill +// * StartMatchBackfill func (c *Client) DescribeMatchmaking(ctx context.Context, params *DescribeMatchmakingInput, optFns ...func(*Options)) (*DescribeMatchmakingOutput, error) { if params == nil { params = &DescribeMatchmakingInput{} diff --git a/service/gamelift/api_op_DescribeMatchmakingConfigurations.go b/service/gamelift/api_op_DescribeMatchmakingConfigurations.go index e7246c51f08..e9adc499162 100644 --- a/service/gamelift/api_op_DescribeMatchmakingConfigurations.go +++ b/service/gamelift/api_op_DescribeMatchmakingConfigurations.go @@ -22,24 +22,24 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/matchmaker-build.html) // Related operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) DescribeMatchmakingConfigurations(ctx context.Context, params *DescribeMatchmakingConfigurationsInput, optFns ...func(*Options)) (*DescribeMatchmakingConfigurationsOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_DescribeMatchmakingRuleSets.go b/service/gamelift/api_op_DescribeMatchmakingRuleSets.go index 233a1fcc372..f2e6ad5632b 100644 --- a/service/gamelift/api_op_DescribeMatchmakingRuleSets.go +++ b/service/gamelift/api_op_DescribeMatchmakingRuleSets.go @@ -17,30 +17,30 @@ import ( // results as a set of sequential pages. If successful, a rule set is returned for // each requested name. Learn more // -// * Build a Rule Set +// * Build a Rule Set // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) // // Related // operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) DescribeMatchmakingRuleSets(ctx context.Context, params *DescribeMatchmakingRuleSetsInput, optFns ...func(*Options)) (*DescribeMatchmakingRuleSetsOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_DescribePlayerSessions.go b/service/gamelift/api_op_DescribePlayerSessions.go index 1d1cd2d42b0..be60840d8db 100644 --- a/service/gamelift/api_op_DescribePlayerSessions.go +++ b/service/gamelift/api_op_DescribePlayerSessions.go @@ -22,22 +22,22 @@ import ( // pages. If successful, a PlayerSession object is returned for each session // matching the request. Available in Amazon GameLift Local. // -// * +// * // CreatePlayerSession // -// * CreatePlayerSessions +// * CreatePlayerSessions // -// * DescribePlayerSessions +// * DescribePlayerSessions // +// * Game +// session placements // -// * Game session placements +// * StartGameSessionPlacement // -// * StartGameSessionPlacement -// -// * +// * // DescribeGameSessionPlacement // -// * StopGameSessionPlacement +// * StopGameSessionPlacement func (c *Client) DescribePlayerSessions(ctx context.Context, params *DescribePlayerSessionsInput, optFns ...func(*Options)) (*DescribePlayerSessionsOutput, error) { if params == nil { params = &DescribePlayerSessionsInput{} @@ -79,19 +79,18 @@ type DescribePlayerSessionsInput struct { // Player session status to filter results on. Possible player session statuses // include the following: // - // * RESERVED -- The player session request has been + // * RESERVED -- The player session request has been // received, but the player has not yet connected to the server process and/or been // validated. // - // * ACTIVE -- The player has been validated by the server process - // and is currently connected. + // * ACTIVE -- The player has been validated by the server process and + // is currently connected. // - // * COMPLETED -- The player connection has been + // * COMPLETED -- The player connection has been // dropped. // - // * TIMEDOUT -- A player session request was received, but the - // player did not connect and/or was not validated within the timeout limit (60 - // seconds). + // * TIMEDOUT -- A player session request was received, but the player + // did not connect and/or was not validated within the timeout limit (60 seconds). PlayerSessionStatusFilter *string } diff --git a/service/gamelift/api_op_DescribeRuntimeConfiguration.go b/service/gamelift/api_op_DescribeRuntimeConfiguration.go index 27b1f9cb647..f52dcb1baf2 100644 --- a/service/gamelift/api_op_DescribeRuntimeConfiguration.go +++ b/service/gamelift/api_op_DescribeRuntimeConfiguration.go @@ -22,36 +22,36 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * Describe +// fleets: // -// * Describe fleets: +// * DescribeFleetAttributes // -// * DescribeFleetAttributes +// * DescribeFleetCapacity // -// * -// DescribeFleetCapacity +// * +// DescribeFleetPortSettings // -// * DescribeFleetPortSettings +// * DescribeFleetUtilization // -// * -// DescribeFleetUtilization +// * +// DescribeRuntimeConfiguration // -// * DescribeRuntimeConfiguration +// * DescribeEC2InstanceLimits // -// * -// DescribeEC2InstanceLimits +// * +// DescribeFleetEvents // -// * DescribeFleetEvents +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes -// -// * StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) DescribeRuntimeConfiguration(ctx context.Context, params *DescribeRuntimeConfigurationInput, optFns ...func(*Options)) (*DescribeRuntimeConfigurationOutput, error) { if params == nil { params = &DescribeRuntimeConfigurationInput{} diff --git a/service/gamelift/api_op_DescribeScalingPolicies.go b/service/gamelift/api_op_DescribeScalingPolicies.go index 22b96fd2f28..68b06fedc7d 100644 --- a/service/gamelift/api_op_DescribeScalingPolicies.go +++ b/service/gamelift/api_op_DescribeScalingPolicies.go @@ -21,28 +21,27 @@ import ( // fleet's scaling policies are in force or suspended, call DescribeFleetAttributes // and check the stopped actions. // -// * DescribeFleetCapacity +// * DescribeFleetCapacity // -// * +// * // UpdateFleetCapacity // -// * DescribeEC2InstanceLimits +// * DescribeEC2InstanceLimits // -// * Manage scaling -// policies: +// * Manage scaling policies: // -// * PutScalingPolicy (auto-scaling) +// * +// PutScalingPolicy (auto-scaling) // -// * -// DescribeScalingPolicies (auto-scaling) +// * DescribeScalingPolicies (auto-scaling) // -// * DeleteScalingPolicy -// (auto-scaling) +// * +// DeleteScalingPolicy (auto-scaling) // -// * Manage fleet actions: -// -// * StartFleetActions +// * Manage fleet actions: // +// * +// StartFleetActions // // * StopFleetActions func (c *Client) DescribeScalingPolicies(ctx context.Context, params *DescribeScalingPoliciesInput, optFns ...func(*Options)) (*DescribeScalingPoliciesOutput, error) { @@ -81,25 +80,25 @@ type DescribeScalingPoliciesInput struct { // Scaling policy status to filter results on. A scaling policy is only in force // when in an ACTIVE status. // - // * ACTIVE -- The scaling policy is currently in + // * ACTIVE -- The scaling policy is currently in // force. // - // * UPDATEREQUESTED -- A request to update the scaling policy has been + // * UPDATEREQUESTED -- A request to update the scaling policy has been // received. // - // * UPDATING -- A change is being made to the scaling policy. - // - // - // * DELETEREQUESTED -- A request to delete the scaling policy has been received. + // * UPDATING -- A change is being made to the scaling policy. // + // * + // DELETEREQUESTED -- A request to delete the scaling policy has been received. // - // * DELETING -- The scaling policy is being deleted. + // * + // DELETING -- The scaling policy is being deleted. // - // * DELETED -- The scaling + // * DELETED -- The scaling // policy has been deleted. // - // * ERROR -- An error occurred in creating the - // policy. It should be removed and recreated. + // * ERROR -- An error occurred in creating the policy. + // It should be removed and recreated. StatusFilter types.ScalingStatusType } diff --git a/service/gamelift/api_op_DescribeScript.go b/service/gamelift/api_op_DescribeScript.go index ed482909121..7bac72b8747 100644 --- a/service/gamelift/api_op_DescribeScript.go +++ b/service/gamelift/api_op_DescribeScript.go @@ -17,16 +17,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html) // Related operations // -// * CreateScript +// * CreateScript // -// * ListScripts +// * ListScripts // -// * -// DescribeScript +// * DescribeScript // -// * UpdateScript +// * +// UpdateScript // -// * DeleteScript +// * DeleteScript func (c *Client) DescribeScript(ctx context.Context, params *DescribeScriptInput, optFns ...func(*Options)) (*DescribeScriptOutput, error) { if params == nil { params = &DescribeScriptInput{} diff --git a/service/gamelift/api_op_DescribeVpcPeeringAuthorizations.go b/service/gamelift/api_op_DescribeVpcPeeringAuthorizations.go index 524a24f86bf..4f3442f62c0 100644 --- a/service/gamelift/api_op_DescribeVpcPeeringAuthorizations.go +++ b/service/gamelift/api_op_DescribeVpcPeeringAuthorizations.go @@ -15,20 +15,20 @@ import ( // This operation returns all VPC peering authorizations and requests for peering. // This includes those initiated and received by this account. // -// * +// * // CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection func (c *Client) DescribeVpcPeeringAuthorizations(ctx context.Context, params *DescribeVpcPeeringAuthorizationsInput, optFns ...func(*Options)) (*DescribeVpcPeeringAuthorizationsOutput, error) { if params == nil { params = &DescribeVpcPeeringAuthorizationsInput{} diff --git a/service/gamelift/api_op_DescribeVpcPeeringConnections.go b/service/gamelift/api_op_DescribeVpcPeeringConnections.go index 003c00fa255..5c6a3cb31b0 100644 --- a/service/gamelift/api_op_DescribeVpcPeeringConnections.go +++ b/service/gamelift/api_op_DescribeVpcPeeringConnections.go @@ -19,20 +19,20 @@ import ( // information includes both active and pending connections. Active connections // identify the IpV4 CIDR block that the VPC uses to connect. // -// * +// * // CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection func (c *Client) DescribeVpcPeeringConnections(ctx context.Context, params *DescribeVpcPeeringConnectionsInput, optFns ...func(*Options)) (*DescribeVpcPeeringConnectionsOutput, error) { if params == nil { params = &DescribeVpcPeeringConnectionsInput{} diff --git a/service/gamelift/api_op_GetGameSessionLogUrl.go b/service/gamelift/api_op_GetGameSessionLogUrl.go index 91a609d91a8..f0a88f9f0c7 100644 --- a/service/gamelift/api_op_GetGameSessionLogUrl.go +++ b/service/gamelift/api_op_GetGameSessionLogUrl.go @@ -18,29 +18,28 @@ import ( // page for maximum log file sizes. Log files that exceed this limit are not // saved. // -// * CreateGameSession +// * CreateGameSession // -// * DescribeGameSessions +// * DescribeGameSessions // -// * +// * // DescribeGameSessionDetails // -// * SearchGameSessions +// * SearchGameSessions // -// * UpdateGameSession +// * UpdateGameSession // +// * +// GetGameSessionLogUrl // -// * GetGameSessionLogUrl +// * Game session placements // -// * Game session placements +// * StartGameSessionPlacement // -// * -// StartGameSessionPlacement +// * +// DescribeGameSessionPlacement // -// * DescribeGameSessionPlacement -// -// * -// StopGameSessionPlacement +// * StopGameSessionPlacement func (c *Client) GetGameSessionLogUrl(ctx context.Context, params *GetGameSessionLogUrlInput, optFns ...func(*Options)) (*GetGameSessionLogUrlOutput, error) { if params == nil { params = &GetGameSessionLogUrlInput{} diff --git a/service/gamelift/api_op_GetInstanceAccess.go b/service/gamelift/api_op_GetInstanceAccess.go index 863b026b408..5f7b34ae975 100644 --- a/service/gamelift/api_op_GetInstanceAccess.go +++ b/service/gamelift/api_op_GetInstanceAccess.go @@ -30,9 +30,9 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html) // Related operations // -// * DescribeInstances +// * DescribeInstances // -// * GetInstanceAccess +// * GetInstanceAccess func (c *Client) GetInstanceAccess(ctx context.Context, params *GetInstanceAccessInput, optFns ...func(*Options)) (*GetInstanceAccessOutput, error) { if params == nil { params = &GetInstanceAccessInput{} diff --git a/service/gamelift/api_op_ListAliases.go b/service/gamelift/api_op_ListAliases.go index bd768427f7f..79cce5e908c 100644 --- a/service/gamelift/api_op_ListAliases.go +++ b/service/gamelift/api_op_ListAliases.go @@ -16,18 +16,18 @@ import ( // retrieve results in sequential pages. Returned aliases are not listed in any // particular order. // -// * CreateAlias +// * CreateAlias // -// * ListAliases +// * ListAliases // -// * DescribeAlias +// * DescribeAlias // +// * +// UpdateAlias // -// * UpdateAlias +// * DeleteAlias // -// * DeleteAlias -// -// * ResolveAlias +// * ResolveAlias func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) { if params == nil { params = &ListAliasesInput{} @@ -63,12 +63,12 @@ type ListAliasesInput struct { // aliases with a certain routing type. To retrieve all aliases, leave this // parameter empty. Possible routing types include the following: // - // * SIMPLE -- - // The alias resolves to one specific fleet. Use this type when routing to active + // * SIMPLE -- The + // alias resolves to one specific fleet. Use this type when routing to active // fleets. // - // * TERMINAL -- The alias does not resolve to a fleet but instead can - // be used to display a message to the user. A terminal alias throws a + // * TERMINAL -- The alias does not resolve to a fleet but instead can be + // used to display a message to the user. A terminal alias throws a // TerminalRoutingStrategyException with the RoutingStrategy message embedded. RoutingStrategyType types.RoutingStrategyType } diff --git a/service/gamelift/api_op_ListBuilds.go b/service/gamelift/api_op_ListBuilds.go index 7926b19dcfd..9adb91bc9f8 100644 --- a/service/gamelift/api_op_ListBuilds.go +++ b/service/gamelift/api_op_ListBuilds.go @@ -19,16 +19,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // Related operations // -// * CreateBuild +// * CreateBuild // -// * ListBuilds +// * ListBuilds // -// * DescribeBuild +// * DescribeBuild // +// * +// UpdateBuild // -// * UpdateBuild -// -// * DeleteBuild +// * DeleteBuild func (c *Client) ListBuilds(ctx context.Context, params *ListBuildsInput, optFns ...func(*Options)) (*ListBuildsOutput, error) { if params == nil { params = &ListBuildsInput{} @@ -59,17 +59,16 @@ type ListBuildsInput struct { // Build status to filter results by. To retrieve all builds, leave this parameter // empty. Possible build statuses include the following: // - // * INITIALIZED -- A - // new build has been defined, but no files have been uploaded. You cannot create + // * INITIALIZED -- A new + // build has been defined, but no files have been uploaded. You cannot create // fleets for builds that are in this status. When a build is successfully created, // the build status is set to this value. // - // * READY -- The game build has been + // * READY -- The game build has been // successfully uploaded. You can now create new fleets for this build. // - // * - // FAILED -- The game build upload failed. You cannot create new fleets for this - // build. + // * FAILED + // -- The game build upload failed. You cannot create new fleets for this build. Status types.BuildStatus } diff --git a/service/gamelift/api_op_ListFleets.go b/service/gamelift/api_op_ListFleets.go index bd3362e26fb..036a6072f7a 100644 --- a/service/gamelift/api_op_ListFleets.go +++ b/service/gamelift/api_op_ListFleets.go @@ -18,19 +18,19 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * UpdateFleetAttributes // -// * UpdateFleetAttributes -// -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) ListFleets(ctx context.Context, params *ListFleetsInput, optFns ...func(*Options)) (*ListFleetsOutput, error) { if params == nil { params = &ListFleetsInput{} diff --git a/service/gamelift/api_op_ListGameServerGroups.go b/service/gamelift/api_op_ListGameServerGroups.go index 37534048eff..953f9df28d1 100644 --- a/service/gamelift/api_op_ListGameServerGroups.go +++ b/service/gamelift/api_op_ListGameServerGroups.go @@ -18,24 +18,23 @@ import ( // Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) ListGameServerGroups(ctx context.Context, params *ListGameServerGroupsInput, optFns ...func(*Options)) (*ListGameServerGroupsOutput, error) { if params == nil { params = &ListGameServerGroupsInput{} diff --git a/service/gamelift/api_op_ListGameServers.go b/service/gamelift/api_op_ListGameServers.go index 9418a1695d6..cdae931fa5a 100644 --- a/service/gamelift/api_op_ListGameServers.go +++ b/service/gamelift/api_op_ListGameServers.go @@ -19,18 +19,18 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * RegisterGameServer +// * RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * +// * // ClaimGameServer // -// * DescribeGameServer +// * DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * +// * // DeregisterGameServer func (c *Client) ListGameServers(ctx context.Context, params *ListGameServersInput, optFns ...func(*Options)) (*ListGameServersOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_ListScripts.go b/service/gamelift/api_op_ListScripts.go index 027c25d785e..64a105f63f8 100644 --- a/service/gamelift/api_op_ListScripts.go +++ b/service/gamelift/api_op_ListScripts.go @@ -16,16 +16,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html) // Related operations // -// * CreateScript +// * CreateScript // -// * ListScripts +// * ListScripts // -// * -// DescribeScript +// * DescribeScript // -// * UpdateScript +// * +// UpdateScript // -// * DeleteScript +// * DeleteScript func (c *Client) ListScripts(ctx context.Context, params *ListScriptsInput, optFns ...func(*Options)) (*ListScriptsOutput, error) { if params == nil { params = &ListScriptsInput{} diff --git a/service/gamelift/api_op_ListTagsForResource.go b/service/gamelift/api_op_ListTagsForResource.go index fa3a3428d52..dc1b5ad18f1 100644 --- a/service/gamelift/api_op_ListTagsForResource.go +++ b/service/gamelift/api_op_ListTagsForResource.go @@ -16,35 +16,33 @@ import ( // the permissions necessary to manage tags for the following GameLift resource // types: // -// * Build +// * Build // -// * Script +// * Script // -// * Fleet +// * Fleet // -// * Alias +// * Alias // -// * -// GameSessionQueue +// * GameSessionQueue // -// * MatchmakingConfiguration +// * +// MatchmakingConfiguration // -// * MatchmakingRuleSet +// * MatchmakingRuleSet // -// To -// list tags for a resource, specify the unique ARN value for the resource. Learn -// more Tagging AWS Resources +// To list tags for a resource, +// specify the unique ARN value for the resource. Learn more Tagging AWS Resources // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the AWS // General Reference AWS Tagging Strategies // (http://aws.amazon.com/answers/account-management/aws-tagging-strategies/) // Related operations // -// * TagResource +// * TagResource // -// * UntagResource +// * UntagResource // -// * -// ListTagsForResource +// * ListTagsForResource func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { if params == nil { params = &ListTagsForResourceInput{} diff --git a/service/gamelift/api_op_PutScalingPolicy.go b/service/gamelift/api_op_PutScalingPolicy.go index 84659598154..5821f96ff49 100644 --- a/service/gamelift/api_op_PutScalingPolicy.go +++ b/service/gamelift/api_op_PutScalingPolicy.go @@ -67,29 +67,28 @@ import ( // temporarily suspended, the new policy will be in force once the fleet actions // are restarted. // -// * DescribeFleetCapacity +// * DescribeFleetCapacity // -// * UpdateFleetCapacity +// * UpdateFleetCapacity // -// * +// * // DescribeEC2InstanceLimits // -// * Manage scaling policies: +// * Manage scaling policies: // -// * -// PutScalingPolicy (auto-scaling) -// -// * DescribeScalingPolicies +// * PutScalingPolicy // (auto-scaling) // -// * DeleteScalingPolicy (auto-scaling) +// * DescribeScalingPolicies (auto-scaling) +// +// * DeleteScalingPolicy +// (auto-scaling) // -// * Manage fleet -// actions: +// * Manage fleet actions: // -// * StartFleetActions +// * StartFleetActions // -// * StopFleetActions +// * StopFleetActions func (c *Client) PutScalingPolicy(ctx context.Context, params *PutScalingPolicyInput, optFns ...func(*Options)) (*PutScalingPolicyOutput, error) { if params == nil { params = &PutScalingPolicyInput{} @@ -120,46 +119,45 @@ type PutScalingPolicyInput struct { // GameLift with Amazon CloudWatch // (https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html). // + // * + // ActivatingGameSessions -- Game sessions in the process of being created. // - // * ActivatingGameSessions -- Game sessions in the process of being created. - // - // - // * ActiveGameSessions -- Game sessions that are currently running. + // * + // ActiveGameSessions -- Game sessions that are currently running. // - // * + // * // ActiveInstances -- Fleet instances that are currently running at least one game // session. // - // * AvailableGameSessions -- Additional game sessions that fleet - // could host simultaneously, given current capacity. + // * AvailableGameSessions -- Additional game sessions that fleet could + // host simultaneously, given current capacity. // - // * - // AvailablePlayerSessions -- Empty player slots in currently active game sessions. - // This includes game sessions that are not currently accepting players. Reserved - // player slots are not included. + // * AvailablePlayerSessions -- Empty + // player slots in currently active game sessions. This includes game sessions that + // are not currently accepting players. Reserved player slots are not included. // - // * CurrentPlayerSessions -- Player slots in - // active game sessions that are being used by a player or are reserved for a - // player. + // * + // CurrentPlayerSessions -- Player slots in active game sessions that are being + // used by a player or are reserved for a player. // - // * IdleInstances -- Active instances that are currently hosting zero - // game sessions. + // * IdleInstances -- Active + // instances that are currently hosting zero game sessions. // - // * PercentAvailableGameSessions -- Unused percentage of the - // total number of game sessions that a fleet could host simultaneously, given - // current capacity. Use this metric for a target-based scaling policy. + // * + // PercentAvailableGameSessions -- Unused percentage of the total number of game + // sessions that a fleet could host simultaneously, given current capacity. Use + // this metric for a target-based scaling policy. // - // * - // PercentIdleInstances -- Percentage of the total number of active instances that - // are hosting zero game sessions. + // * PercentIdleInstances -- + // Percentage of the total number of active instances that are hosting zero game + // sessions. // - // * QueueDepth -- Pending game session - // placement requests, in any queue, where the current fleet is the top-priority - // destination. + // * QueueDepth -- Pending game session placement requests, in any + // queue, where the current fleet is the top-priority destination. // - // * WaitTime -- Current wait time for pending game session - // placement requests, in any queue, where the current fleet is the top-priority - // destination. + // * WaitTime -- + // Current wait time for pending game session placement requests, in any queue, + // where the current fleet is the top-priority destination. // // This member is required. MetricName types.MetricName @@ -192,17 +190,17 @@ type PutScalingPolicyInput struct { // The type of adjustment to make to a fleet's instance count (see // FleetCapacity): // - // * ChangeInCapacity -- add (or subtract) the scaling - // adjustment value from the current instance count. Positive values scale up while - // negative values scale down. + // * ChangeInCapacity -- add (or subtract) the scaling adjustment + // value from the current instance count. Positive values scale up while negative + // values scale down. // - // * ExactCapacity -- set the instance count to - // the scaling adjustment value. + // * ExactCapacity -- set the instance count to the scaling + // adjustment value. // - // * PercentChangeInCapacity -- increase or - // reduce the current instance count by the scaling adjustment, read as a - // percentage. Positive values scale up while negative values scale down; for - // example, a value of "-10" scales the fleet down by 10%. + // * PercentChangeInCapacity -- increase or reduce the current + // instance count by the scaling adjustment, read as a percentage. Positive values + // scale up while negative values scale down; for example, a value of "-10" scales + // the fleet down by 10%. ScalingAdjustmentType types.ScalingAdjustmentType // The settings for a target-based scaling policy. diff --git a/service/gamelift/api_op_RegisterGameServer.go b/service/gamelift/api_op_RegisterGameServer.go index a52eacc7033..c2de5650b76 100644 --- a/service/gamelift/api_op_RegisterGameServer.go +++ b/service/gamelift/api_op_RegisterGameServer.go @@ -28,18 +28,18 @@ import ( // Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * RegisterGameServer +// * RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * +// * // ClaimGameServer // -// * DescribeGameServer +// * DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * +// * // DeregisterGameServer func (c *Client) RegisterGameServer(ctx context.Context, params *RegisterGameServerInput, optFns ...func(*Options)) (*RegisterGameServerOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_RequestUploadCredentials.go b/service/gamelift/api_op_RequestUploadCredentials.go index 3caa9d1e410..b7b054d4a8a 100644 --- a/service/gamelift/api_op_RequestUploadCredentials.go +++ b/service/gamelift/api_op_RequestUploadCredentials.go @@ -20,16 +20,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build) // Related operations // -// * CreateBuild +// * CreateBuild // -// * ListBuilds +// * ListBuilds // -// * DescribeBuild +// * DescribeBuild // +// * +// UpdateBuild // -// * UpdateBuild -// -// * DeleteBuild +// * DeleteBuild func (c *Client) RequestUploadCredentials(ctx context.Context, params *RequestUploadCredentialsInput, optFns ...func(*Options)) (*RequestUploadCredentialsOutput, error) { if params == nil { params = &RequestUploadCredentialsInput{} diff --git a/service/gamelift/api_op_ResolveAlias.go b/service/gamelift/api_op_ResolveAlias.go index c76e436caa2..9e2cd191306 100644 --- a/service/gamelift/api_op_ResolveAlias.go +++ b/service/gamelift/api_op_ResolveAlias.go @@ -12,19 +12,18 @@ import ( // Retrieves the fleet ID that an alias is currently pointing to. // -// * -// CreateAlias +// * CreateAlias // -// * ListAliases +// * +// ListAliases // -// * DescribeAlias +// * DescribeAlias // -// * UpdateAlias +// * UpdateAlias // -// * -// DeleteAlias +// * DeleteAlias // -// * ResolveAlias +// * ResolveAlias func (c *Client) ResolveAlias(ctx context.Context, params *ResolveAliasInput, optFns ...func(*Options)) (*ResolveAliasOutput, error) { if params == nil { params = &ResolveAliasInput{} diff --git a/service/gamelift/api_op_ResumeGameServerGroup.go b/service/gamelift/api_op_ResumeGameServerGroup.go index fc0c142e427..fdfa17aa270 100644 --- a/service/gamelift/api_op_ResumeGameServerGroup.go +++ b/service/gamelift/api_op_ResumeGameServerGroup.go @@ -25,24 +25,23 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) ResumeGameServerGroup(ctx context.Context, params *ResumeGameServerGroupInput, optFns ...func(*Options)) (*ResumeGameServerGroupOutput, error) { if params == nil { params = &ResumeGameServerGroupInput{} diff --git a/service/gamelift/api_op_SearchGameSessions.go b/service/gamelift/api_op_SearchGameSessions.go index 348d2c89585..e7c8541d5db 100644 --- a/service/gamelift/api_op_SearchGameSessions.go +++ b/service/gamelift/api_op_SearchGameSessions.go @@ -15,40 +15,39 @@ import ( // them in a specified order. You can search or sort by the following game session // attributes: // -// * gameSessionId -- A unique identifier for the game session. -// You can use either a GameSessionId or GameSessionArn value. -// -// * -// gameSessionName -- Name assigned to a game session. This value is set when -// requesting a new game session with CreateGameSession or updating with -// UpdateGameSession. Game session names do not need to be unique to a game -// session. -// -// * gameSessionProperties -- Custom data defined in a game session's -// GameProperty parameter. GameProperty values are stored as key:value pairs; the -// filter expression must indicate the key and a string to search the data values -// for. For example, to search for game sessions with custom data containing the -// key:value pair "gameMode:brawl", specify the following: -// gameSessionProperties.gameMode = "brawl". All custom data values are searched as -// strings. -// -// * maximumSessions -- Maximum number of player sessions allowed for -// a game session. This value is set when requesting a new game session with -// CreateGameSession or updating with UpdateGameSession. -// -// * creationTimeMillis -// -- Value indicating when a game session was created. It is expressed in Unix -// time as milliseconds. -// -// * playerSessionCount -- Number of players currently -// connected to a game session. This value changes rapidly as players join the -// session or drop out. -// -// * hasAvailablePlayerSessions -- Boolean value -// indicating whether a game session has reached its maximum number of players. It -// is highly recommended that all search requests include this filter attribute to -// optimize search performance and return only sessions that players can -// join. +// * gameSessionId -- A unique identifier for the game session. You +// can use either a GameSessionId or GameSessionArn value. +// +// * gameSessionName -- +// Name assigned to a game session. This value is set when requesting a new game +// session with CreateGameSession or updating with UpdateGameSession. Game session +// names do not need to be unique to a game session. +// +// * gameSessionProperties -- +// Custom data defined in a game session's GameProperty parameter. GameProperty +// values are stored as key:value pairs; the filter expression must indicate the +// key and a string to search the data values for. For example, to search for game +// sessions with custom data containing the key:value pair "gameMode:brawl", +// specify the following: gameSessionProperties.gameMode = "brawl". All custom data +// values are searched as strings. +// +// * maximumSessions -- Maximum number of player +// sessions allowed for a game session. This value is set when requesting a new +// game session with CreateGameSession or updating with UpdateGameSession. +// +// * +// creationTimeMillis -- Value indicating when a game session was created. It is +// expressed in Unix time as milliseconds. +// +// * playerSessionCount -- Number of +// players currently connected to a game session. This value changes rapidly as +// players join the session or drop out. +// +// * hasAvailablePlayerSessions -- Boolean +// value indicating whether a game session has reached its maximum number of +// players. It is highly recommended that all search requests include this filter +// attribute to optimize search performance and return only sessions that players +// can join. // // Returned values for playerSessionCount and hasAvailablePlayerSessions // change quickly as players join sessions and others drop out. Results should be @@ -63,29 +62,28 @@ import ( // search feature finds only game sessions that are in ACTIVE status. To locate // games in statuses other than active, use DescribeGameSessionDetails. // -// * +// * // CreateGameSession // -// * DescribeGameSessions +// * DescribeGameSessions // -// * -// DescribeGameSessionDetails +// * DescribeGameSessionDetails // -// * SearchGameSessions -// -// * UpdateGameSession +// * +// SearchGameSessions // +// * UpdateGameSession // // * GetGameSessionLogUrl // -// * Game session placements +// * Game session +// placements // -// * -// StartGameSessionPlacement +// * StartGameSessionPlacement // -// * DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * +// * // StopGameSessionPlacement func (c *Client) SearchGameSessions(ctx context.Context, params *SearchGameSessionsInput, optFns ...func(*Options)) (*SearchGameSessionsOutput, error) { if params == nil { @@ -115,41 +113,41 @@ type SearchGameSessionsInput struct { // fleet that are in ACTIVE status. A filter expression can contain one or multiple // conditions. Each condition consists of the following: // - // * Operand -- Name of - // a game session attribute. Valid values are gameSessionName, gameSessionId, + // * Operand -- Name of a + // game session attribute. Valid values are gameSessionName, gameSessionId, // gameSessionProperties, maximumSessions, creationTimeMillis, playerSessionCount, // hasAvailablePlayerSessions. // - // * Comparator -- Valid comparators are: =, <>, - // <, >, <=, >=. + // * Comparator -- Valid comparators are: =, <>, <, >, + // <=, >=. // - // * Value -- Value to be searched for. Values may be numbers, - // boolean values (true/false) or strings depending on the operand. String values - // are case sensitive and must be enclosed in single quotes. Special characters - // must be escaped. Boolean and string values can only be used with the comparators - // = and <>. For example, the following filter expression searches on - // gameSessionName: "FilterExpression": "gameSessionName = 'Matt\\'s Awesome Game - // 1'". + // * Value -- Value to be searched for. Values may be numbers, boolean + // values (true/false) or strings depending on the operand. String values are case + // sensitive and must be enclosed in single quotes. Special characters must be + // escaped. Boolean and string values can only be used with the comparators = and + // <>. For example, the following filter expression searches on gameSessionName: + // "FilterExpression": "gameSessionName = 'Matt\\'s Awesome Game 1'". // - // To chain multiple conditions in a single expression, use the logical - // keywords AND, OR, and NOT and parentheses as needed. For example: x AND y AND - // NOT z, NOT (x OR y). Session search evaluates conditions from left to right - // using the following precedence rules: + // To chain + // multiple conditions in a single expression, use the logical keywords AND, OR, + // and NOT and parentheses as needed. For example: x AND y AND NOT z, NOT (x OR y). + // Session search evaluates conditions from left to right using the following + // precedence rules: // - // * =, <>, <, >, <=, >= + // * =, <>, <, >, <=, >= // - // * - // Parentheses + // * Parentheses // - // * NOT + // * NOT // - // * AND + // * AND // - // * OR + // * OR // - // For example, this filter expression - // retrieves game sessions hosting at least ten players that have an open player - // slot: "maximumSessions>=10 AND hasAvailablePlayerSessions=true". + // For + // example, this filter expression retrieves game sessions hosting at least ten + // players that have an open player slot: "maximumSessions>=10 AND + // hasAvailablePlayerSessions=true". FilterExpression *string // A unique identifier for a fleet to search for active game sessions. You can use @@ -171,18 +169,17 @@ type SearchGameSessionsInput struct { // included, the request returns results in random order. A sort expression // consists of the following elements: // - // * Operand -- Name of a game session + // * Operand -- Name of a game session // attribute. Valid values are gameSessionName, gameSessionId, // gameSessionProperties, maximumSessions, creationTimeMillis, playerSessionCount, // hasAvailablePlayerSessions. // - // * Order -- Valid sort orders are ASC - // (ascending) and DESC (descending). + // * Order -- Valid sort orders are ASC (ascending) + // and DESC (descending). // - // For example, this sort expression returns - // the oldest active sessions first: "SortExpression": "creationTimeMillis ASC". - // Results with a null value for the sort operand are returned at the end of the - // list. + // For example, this sort expression returns the oldest + // active sessions first: "SortExpression": "creationTimeMillis ASC". Results with + // a null value for the sort operand are returned at the end of the list. SortExpression *string } diff --git a/service/gamelift/api_op_StartFleetActions.go b/service/gamelift/api_op_StartFleetActions.go index 03aabd597cc..ab5287f98dd 100644 --- a/service/gamelift/api_op_StartFleetActions.go +++ b/service/gamelift/api_op_StartFleetActions.go @@ -22,19 +22,19 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * UpdateFleetAttributes // -// * UpdateFleetAttributes -// -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) StartFleetActions(ctx context.Context, params *StartFleetActionsInput, optFns ...func(*Options)) (*StartFleetActionsOutput, error) { if params == nil { params = &StartFleetActionsInput{} diff --git a/service/gamelift/api_op_StartGameSessionPlacement.go b/service/gamelift/api_op_StartGameSessionPlacement.go index 9105399f746..eb290090347 100644 --- a/service/gamelift/api_op_StartGameSessionPlacement.go +++ b/service/gamelift/api_op_StartGameSessionPlacement.go @@ -29,50 +29,50 @@ import ( // across all players. To place a new game session request, specify the // following: // -// * The queue name and a set of game session properties and +// * The queue name and a set of game session properties and // settings // -// * A unique ID (such as a UUID) for the placement. You use this ID -// to track the status of the placement request +// * A unique ID (such as a UUID) for the placement. You use this ID to +// track the status of the placement request // -// * (Optional) A set of player -// data and a unique player ID for each player that you are joining to the new game -// session (player data is optional, but if you include it, you must also provide a -// unique ID for each player) +// * (Optional) A set of player data and +// a unique player ID for each player that you are joining to the new game session +// (player data is optional, but if you include it, you must also provide a unique +// ID for each player) // -// * Latency data for all players (if you want to -// optimize game play for the players) +// * Latency data for all players (if you want to optimize +// game play for the players) // -// If successful, a new game session placement -// is created. To track the status of a placement request, call +// If successful, a new game session placement is +// created. To track the status of a placement request, call // DescribeGameSessionPlacement and check the request's status. If the status is // FULFILLED, a new game session has been created and a game session ARN and Region // are referenced. If the placement request times out, you can resubmit the request // or retry it with a different queue. // -// * CreateGameSession +// * CreateGameSession // -// * +// * // DescribeGameSessions // -// * DescribeGameSessionDetails +// * DescribeGameSessionDetails // -// * -// SearchGameSessions +// * SearchGameSessions // -// * UpdateGameSession +// * +// UpdateGameSession // -// * GetGameSessionLogUrl +// * GetGameSessionLogUrl // -// * -// Game session placements +// * Game session placements // -// * StartGameSessionPlacement +// * +// StartGameSessionPlacement // -// * -// DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * StopGameSessionPlacement +// * +// StopGameSessionPlacement func (c *Client) StartGameSessionPlacement(ctx context.Context, params *StartGameSessionPlacementInput, optFns ...func(*Options)) (*StartGameSessionPlacementOutput, error) { if params == nil { params = &StartGameSessionPlacementInput{} diff --git a/service/gamelift/api_op_StartMatchBackfill.go b/service/gamelift/api_op_StartMatchBackfill.go index 820c23e086c..b1b9186979b 100644 --- a/service/gamelift/api_op_StartMatchBackfill.go +++ b/service/gamelift/api_op_StartMatchBackfill.go @@ -38,16 +38,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) // Related operations // -// * StartMatchmaking +// * StartMatchmaking // -// * DescribeMatchmaking +// * DescribeMatchmaking // -// * +// * // StopMatchmaking // -// * AcceptMatch +// * AcceptMatch // -// * StartMatchBackfill +// * StartMatchBackfill func (c *Client) StartMatchBackfill(ctx context.Context, params *StartMatchBackfillInput, optFns ...func(*Options)) (*StartMatchBackfillOutput, error) { if params == nil { params = &StartMatchBackfillInput{} @@ -86,16 +86,16 @@ type StartMatchBackfillInput struct { // session. This information is used by the matchmaker to find new players and add // them to the existing game. // - // * PlayerID, PlayerAttributes, Team -\\- This + // * PlayerID, PlayerAttributes, Team -\\- This // information is maintained in the GameSession object, MatchmakerData property, // for all players who are currently assigned to the game session. The matchmaker // data is in JSON syntax, formatted as a string. For more details, see Match Data // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data). // - // - // * LatencyInMs -\\- If the matchmaker uses player latency, include a latency - // value, in milliseconds, for the Region that the game session is currently in. Do - // not include latency values for any other Region. + // * + // LatencyInMs -\\- If the matchmaker uses player latency, include a latency value, + // in milliseconds, for the Region that the game session is currently in. Do not + // include latency values for any other Region. // // This member is required. Players []*types.Player diff --git a/service/gamelift/api_op_StartMatchmaking.go b/service/gamelift/api_op_StartMatchmaking.go index cd36086fc22..6a9fcba78b2 100644 --- a/service/gamelift/api_op_StartMatchmaking.go +++ b/service/gamelift/api_op_StartMatchmaking.go @@ -30,34 +30,34 @@ import ( // Processing a matchmaking request -- FlexMatch handles a matchmaking request as // follows: // -// * Your client code submits a StartMatchmaking request for one or -// more players and tracks the status of the request ticket. +// * Your client code submits a StartMatchmaking request for one or more +// players and tracks the status of the request ticket. // -// * FlexMatch uses -// this ticket and others in process to build an acceptable match. When a potential +// * FlexMatch uses this +// ticket and others in process to build an acceptable match. When a potential // match is identified, all tickets in the proposed match are advanced to the next // status. // -// * If the match requires player acceptance (set in the matchmaking +// * If the match requires player acceptance (set in the matchmaking // configuration), the tickets move into status REQUIRES_ACCEPTANCE. This status // triggers your client code to solicit acceptance from all players in every ticket // involved in the match, and then call AcceptMatch for each player. If any player // rejects or fails to accept the match before a specified timeout, the proposed // match is dropped (see AcceptMatch for more details). // -// * Once a match is -// proposed and accepted, the matchmaking tickets move into status PLACING. -// FlexMatch locates resources for a new game session using the game session queue -// (set in the matchmaking configuration) and creates the game session based on the -// match data. +// * Once a match is proposed +// and accepted, the matchmaking tickets move into status PLACING. FlexMatch +// locates resources for a new game session using the game session queue (set in +// the matchmaking configuration) and creates the game session based on the match +// data. // -// * When the match is successfully placed, the matchmaking -// tickets move into COMPLETED status. Connection information (including game -// session endpoint and player session) is added to the matchmaking tickets. -// Matched players can use the connection information to join the game. +// * When the match is successfully placed, the matchmaking tickets move +// into COMPLETED status. Connection information (including game session endpoint +// and player session) is added to the matchmaking tickets. Matched players can use +// the connection information to join the game. // -// Learn more -// Add FlexMatch to a Game Client +// Learn more Add FlexMatch to a +// Game Client // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) // Set Up FlexMatch Event Notification // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) @@ -67,16 +67,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-match.html) // Related operations // -// * StartMatchmaking +// * StartMatchmaking // -// * DescribeMatchmaking +// * DescribeMatchmaking // -// * +// * // StopMatchmaking // -// * AcceptMatch +// * AcceptMatch // -// * StartMatchBackfill +// * StartMatchBackfill func (c *Client) StartMatchmaking(ctx context.Context, params *StartMatchmakingInput, optFns ...func(*Options)) (*StartMatchmakingOutput, error) { if params == nil { params = &StartMatchmakingInput{} diff --git a/service/gamelift/api_op_StopFleetActions.go b/service/gamelift/api_op_StopFleetActions.go index 71634076872..c787536cfc8 100644 --- a/service/gamelift/api_op_StopFleetActions.go +++ b/service/gamelift/api_op_StopFleetActions.go @@ -22,19 +22,19 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * UpdateFleetAttributes // -// * UpdateFleetAttributes -// -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions func (c *Client) StopFleetActions(ctx context.Context, params *StopFleetActionsInput, optFns ...func(*Options)) (*StopFleetActionsOutput, error) { if params == nil { params = &StopFleetActionsInput{} diff --git a/service/gamelift/api_op_StopGameSessionPlacement.go b/service/gamelift/api_op_StopGameSessionPlacement.go index 1efdce87f59..8c46c63be8b 100644 --- a/service/gamelift/api_op_StopGameSessionPlacement.go +++ b/service/gamelift/api_op_StopGameSessionPlacement.go @@ -15,29 +15,28 @@ import ( // provide the placement ID values. If successful, the placement is moved to // CANCELLED status. // -// * CreateGameSession +// * CreateGameSession // -// * DescribeGameSessions +// * DescribeGameSessions // -// * +// * // DescribeGameSessionDetails // -// * SearchGameSessions +// * SearchGameSessions // -// * UpdateGameSession +// * UpdateGameSession // +// * +// GetGameSessionLogUrl // -// * GetGameSessionLogUrl +// * Game session placements // -// * Game session placements +// * StartGameSessionPlacement // -// * -// StartGameSessionPlacement +// * +// DescribeGameSessionPlacement // -// * DescribeGameSessionPlacement -// -// * -// StopGameSessionPlacement +// * StopGameSessionPlacement func (c *Client) StopGameSessionPlacement(ctx context.Context, params *StopGameSessionPlacementInput, optFns ...func(*Options)) (*StopGameSessionPlacementOutput, error) { if params == nil { params = &StopGameSessionPlacementInput{} diff --git a/service/gamelift/api_op_StopMatchmaking.go b/service/gamelift/api_op_StopMatchmaking.go index 844f67b45b1..e0efcbb34fb 100644 --- a/service/gamelift/api_op_StopMatchmaking.go +++ b/service/gamelift/api_op_StopMatchmaking.go @@ -23,16 +23,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) // Related operations // -// * StartMatchmaking +// * StartMatchmaking // -// * DescribeMatchmaking +// * DescribeMatchmaking // -// * +// * // StopMatchmaking // -// * AcceptMatch +// * AcceptMatch // -// * StartMatchBackfill +// * StartMatchBackfill func (c *Client) StopMatchmaking(ctx context.Context, params *StopMatchmakingInput, optFns ...func(*Options)) (*StopMatchmakingOutput, error) { if params == nil { params = &StopMatchmakingInput{} diff --git a/service/gamelift/api_op_SuspendGameServerGroup.go b/service/gamelift/api_op_SuspendGameServerGroup.go index 66795596f2c..030a515c0d3 100644 --- a/service/gamelift/api_op_SuspendGameServerGroup.go +++ b/service/gamelift/api_op_SuspendGameServerGroup.go @@ -16,11 +16,11 @@ import ( // instances or the game server group. You can restart activity by calling // ResumeGameServerGroup. You can suspend the following activity: // -// * Instance -// type replacement - This activity evaluates the current game hosting viability of -// all Spot instance types that are defined for the game server group. It updates -// the Auto Scaling group to remove nonviable Spot Instance types, which have a -// higher chance of game server interruptions. It then balances capacity across the +// * Instance type +// replacement - This activity evaluates the current game hosting viability of all +// Spot instance types that are defined for the game server group. It updates the +// Auto Scaling group to remove nonviable Spot Instance types, which have a higher +// chance of game server interruptions. It then balances capacity across the // remaining viable Spot Instance types. When this activity is suspended, the Auto // Scaling group continues with its current balance, regardless of viability. // Instance protection, utilization metrics, and capacity scaling activities @@ -33,24 +33,23 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) SuspendGameServerGroup(ctx context.Context, params *SuspendGameServerGroupInput, optFns ...func(*Options)) (*SuspendGameServerGroupOutput, error) { if params == nil { params = &SuspendGameServerGroupInput{} diff --git a/service/gamelift/api_op_TagResource.go b/service/gamelift/api_op_TagResource.go index 67010dc0ec0..e0ad4717d33 100644 --- a/service/gamelift/api_op_TagResource.go +++ b/service/gamelift/api_op_TagResource.go @@ -17,37 +17,36 @@ import ( // breakdowns, etc. This operation handles the permissions necessary to manage tags // for the following GameLift resource types: // -// * Build +// * Build // -// * Script +// * Script // -// * -// Fleet +// * Fleet // -// * Alias +// * +// Alias // -// * GameSessionQueue -// -// * MatchmakingConfiguration +// * GameSessionQueue // +// * MatchmakingConfiguration // // * MatchmakingRuleSet // -// To add a tag to a resource, specify the unique ARN value -// for the resource and provide a tag list containing one or more tags. The -// operation succeeds even if the list includes tags that are already assigned to -// the specified resource. Learn more Tagging AWS Resources +// To +// add a tag to a resource, specify the unique ARN value for the resource and +// provide a tag list containing one or more tags. The operation succeeds even if +// the list includes tags that are already assigned to the specified resource. +// Learn more Tagging AWS Resources // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the AWS // General Reference AWS Tagging Strategies // (http://aws.amazon.com/answers/account-management/aws-tagging-strategies/) // Related operations // -// * TagResource +// * TagResource // -// * UntagResource +// * UntagResource // -// * -// ListTagsForResource +// * ListTagsForResource func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} diff --git a/service/gamelift/api_op_UntagResource.go b/service/gamelift/api_op_UntagResource.go index 09d66e6e94e..f6c0343fbd0 100644 --- a/service/gamelift/api_op_UntagResource.go +++ b/service/gamelift/api_op_UntagResource.go @@ -15,37 +15,36 @@ import ( // permissions necessary to manage tags for the following GameLift resource // types: // -// * Build +// * Build // -// * Script +// * Script // -// * Fleet +// * Fleet // -// * Alias +// * Alias // -// * -// GameSessionQueue +// * GameSessionQueue // -// * MatchmakingConfiguration +// * +// MatchmakingConfiguration // -// * MatchmakingRuleSet +// * MatchmakingRuleSet // -// To -// remove a tag from a resource, specify the unique ARN value for the resource and -// provide a string list containing one or more tags to be removed. This operation -// succeeds even if the list includes tags that are not currently assigned to the -// specified resource. Learn more Tagging AWS Resources +// To remove a tag from a resource, +// specify the unique ARN value for the resource and provide a string list +// containing one or more tags to be removed. This operation succeeds even if the +// list includes tags that are not currently assigned to the specified resource. +// Learn more Tagging AWS Resources // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the AWS // General Reference AWS Tagging Strategies // (http://aws.amazon.com/answers/account-management/aws-tagging-strategies/) // Related operations // -// * TagResource +// * TagResource // -// * UntagResource +// * UntagResource // -// * -// ListTagsForResource +// * ListTagsForResource func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} diff --git a/service/gamelift/api_op_UpdateAlias.go b/service/gamelift/api_op_UpdateAlias.go index cc6dc679ca3..e8d2c50ebda 100644 --- a/service/gamelift/api_op_UpdateAlias.go +++ b/service/gamelift/api_op_UpdateAlias.go @@ -16,18 +16,18 @@ import ( // another fleet, provide an updated routing strategy. If successful, the updated // alias record is returned. // -// * CreateAlias +// * CreateAlias // -// * ListAliases +// * ListAliases // -// * -// DescribeAlias +// * DescribeAlias // -// * UpdateAlias +// * +// UpdateAlias // -// * DeleteAlias +// * DeleteAlias // -// * ResolveAlias +// * ResolveAlias func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) { if params == nil { params = &UpdateAliasInput{} diff --git a/service/gamelift/api_op_UpdateBuild.go b/service/gamelift/api_op_UpdateBuild.go index d8319e985e2..8a018344fde 100644 --- a/service/gamelift/api_op_UpdateBuild.go +++ b/service/gamelift/api_op_UpdateBuild.go @@ -18,16 +18,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // Related operations // -// * CreateBuild +// * CreateBuild // -// * ListBuilds +// * ListBuilds // -// * DescribeBuild +// * DescribeBuild // +// * +// UpdateBuild // -// * UpdateBuild -// -// * DeleteBuild +// * DeleteBuild func (c *Client) UpdateBuild(ctx context.Context, params *UpdateBuildInput, optFns ...func(*Options)) (*UpdateBuildOutput, error) { if params == nil { params = &UpdateBuildInput{} diff --git a/service/gamelift/api_op_UpdateFleetAttributes.go b/service/gamelift/api_op_UpdateFleetAttributes.go index 78a86c2de46..2e5a93509ee 100644 --- a/service/gamelift/api_op_UpdateFleetAttributes.go +++ b/service/gamelift/api_op_UpdateFleetAttributes.go @@ -18,28 +18,27 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * Update fleets: // -// * Update fleets: +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes +// * +// UpdateFleetCapacity // -// * UpdateFleetCapacity +// * UpdateFleetPortSettings // -// * -// UpdateFleetPortSettings +// * UpdateRuntimeConfiguration // -// * UpdateRuntimeConfiguration -// -// * +// * // StartFleetActions or StopFleetActions func (c *Client) UpdateFleetAttributes(ctx context.Context, params *UpdateFleetAttributesInput, optFns ...func(*Options)) (*UpdateFleetAttributesOutput, error) { if params == nil { @@ -82,12 +81,12 @@ type UpdateFleetAttributesInput struct { // fleet. Instances that already exist are not affected. You can set protection for // individual instances using UpdateGameSession. // - // * NoProtection -- The game + // * NoProtection -- The game // session can be terminated during a scale-down event. // - // * FullProtection -- If - // the game session is in an ACTIVE status, it cannot be terminated during a - // scale-down event. + // * FullProtection -- If the + // game session is in an ACTIVE status, it cannot be terminated during a scale-down + // event. NewGameSessionProtectionPolicy types.ProtectionPolicy // Policy that limits the number of game sessions an individual player can create diff --git a/service/gamelift/api_op_UpdateFleetCapacity.go b/service/gamelift/api_op_UpdateFleetCapacity.go index b940b65ef2c..81477c9db2c 100644 --- a/service/gamelift/api_op_UpdateFleetCapacity.go +++ b/service/gamelift/api_op_UpdateFleetCapacity.go @@ -27,28 +27,27 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * Update fleets: // -// * Update fleets: +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes +// * +// UpdateFleetCapacity // -// * UpdateFleetCapacity +// * UpdateFleetPortSettings // -// * -// UpdateFleetPortSettings +// * UpdateRuntimeConfiguration // -// * UpdateRuntimeConfiguration -// -// * +// * // StartFleetActions or StopFleetActions func (c *Client) UpdateFleetCapacity(ctx context.Context, params *UpdateFleetCapacityInput, optFns ...func(*Options)) (*UpdateFleetCapacityOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_UpdateFleetPortSettings.go b/service/gamelift/api_op_UpdateFleetPortSettings.go index dd0a1c81b24..4c41cd9d3cf 100644 --- a/service/gamelift/api_op_UpdateFleetPortSettings.go +++ b/service/gamelift/api_op_UpdateFleetPortSettings.go @@ -20,28 +20,27 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * Update fleets: // -// * Update fleets: +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes +// * +// UpdateFleetCapacity // -// * UpdateFleetCapacity +// * UpdateFleetPortSettings // -// * -// UpdateFleetPortSettings +// * UpdateRuntimeConfiguration // -// * UpdateRuntimeConfiguration -// -// * +// * // StartFleetActions or StopFleetActions func (c *Client) UpdateFleetPortSettings(ctx context.Context, params *UpdateFleetPortSettingsInput, optFns ...func(*Options)) (*UpdateFleetPortSettingsOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_UpdateGameServer.go b/service/gamelift/api_op_UpdateGameServer.go index 9f832c09c9d..3a982b9c419 100644 --- a/service/gamelift/api_op_UpdateGameServer.go +++ b/service/gamelift/api_op_UpdateGameServer.go @@ -18,20 +18,20 @@ import ( // operation to update the following types of game server information. You can make // all three types of updates in the same request: // -// * To update the game -// server's utilization status, identify the game server and game server group and -// specify the current utilization status. Use this status to identify when game -// servers are currently hosting games and when they are available to be claimed. +// * To update the game server's +// utilization status, identify the game server and game server group and specify +// the current utilization status. Use this status to identify when game servers +// are currently hosting games and when they are available to be claimed. // -// -// * To report health status, identify the game server and game server group and -// set health check to HEALTHY. If a game server does not report health status for -// a certain length of time, the game server is no longer considered healthy. As a +// * To +// report health status, identify the game server and game server group and set +// health check to HEALTHY. If a game server does not report health status for a +// certain length of time, the game server is no longer considered healthy. As a // result, it will be eventually deregistered from the game server group to avoid // affecting utilization metrics. The best practice is to report health every 60 // seconds. // -// * To change game server metadata, provide updated game server +// * To change game server metadata, provide updated game server // data. // // Once a game server is successfully updated, the relevant statuses and @@ -39,18 +39,18 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * RegisterGameServer +// * RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * +// * // ClaimGameServer // -// * DescribeGameServer +// * DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * +// * // DeregisterGameServer func (c *Client) UpdateGameServer(ctx context.Context, params *UpdateGameServerInput, optFns ...func(*Options)) (*UpdateGameServerOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_UpdateGameServerGroup.go b/service/gamelift/api_op_UpdateGameServerGroup.go index 34d1d5a55f3..e2a393b5a8e 100644 --- a/service/gamelift/api_op_UpdateGameServerGroup.go +++ b/service/gamelift/api_op_UpdateGameServerGroup.go @@ -23,24 +23,23 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // Related operations // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * ListGameServerGroups +// * ListGameServerGroups // +// * +// DescribeGameServerGroup // -// * DescribeGameServerGroup +// * UpdateGameServerGroup // -// * UpdateGameServerGroup +// * DeleteGameServerGroup // -// * -// DeleteGameServerGroup +// * +// ResumeGameServerGroup // -// * ResumeGameServerGroup +// * SuspendGameServerGroup // -// * -// SuspendGameServerGroup -// -// * DescribeGameServerInstances +// * DescribeGameServerInstances func (c *Client) UpdateGameServerGroup(ctx context.Context, params *UpdateGameServerGroupInput, optFns ...func(*Options)) (*UpdateGameServerGroupOutput, error) { if params == nil { params = &UpdateGameServerGroupInput{} @@ -67,23 +66,23 @@ type UpdateGameServerGroupInput struct { // Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand // Instances in the game server group. Method options include the following: // - // * + // * // SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot // Instances are unavailable or not viable for game hosting, the game server group // provides no hosting capacity until Spot Instances can again be used. Until then, // no new instances are started, and the existing nonviable Spot Instances are // terminated (after current gameplay ends) and are not replaced. // - // * - // SPOT_PREFERRED - (default value) Spot Instances are used whenever available in - // the game server group. If Spot Instances are unavailable, the game server group - // continues to provide hosting capacity by falling back to On-Demand Instances. - // Existing nonviable Spot Instances are terminated (after current gameplay ends) - // and are replaced with new On-Demand Instances. + // * SPOT_PREFERRED + // - (default value) Spot Instances are used whenever available in the game server + // group. If Spot Instances are unavailable, the game server group continues to + // provide hosting capacity by falling back to On-Demand Instances. Existing + // nonviable Spot Instances are terminated (after current gameplay ends) and are + // replaced with new On-Demand Instances. // - // * ON_DEMAND_ONLY - Only - // On-Demand Instances are used in the game server group. No Spot Instances are - // used, even when available, while this balancing strategy is in force. + // * ON_DEMAND_ONLY - Only On-Demand + // Instances are used in the game server group. No Spot Instances are used, even + // when available, while this balancing strategy is in force. BalancingStrategy types.BalancingStrategy // A flag that indicates whether instances in the game server group are protected diff --git a/service/gamelift/api_op_UpdateGameSession.go b/service/gamelift/api_op_UpdateGameSession.go index f48e94c1a85..5c65018d9fd 100644 --- a/service/gamelift/api_op_UpdateGameSession.go +++ b/service/gamelift/api_op_UpdateGameSession.go @@ -18,29 +18,28 @@ import ( // update a game session, specify the game session ID and the values you want to // change. If successful, an updated GameSession object is returned. // -// * +// * // CreateGameSession // -// * DescribeGameSessions +// * DescribeGameSessions // -// * -// DescribeGameSessionDetails +// * DescribeGameSessionDetails // -// * SearchGameSessions -// -// * UpdateGameSession +// * +// SearchGameSessions // +// * UpdateGameSession // // * GetGameSessionLogUrl // -// * Game session placements +// * Game session +// placements // -// * -// StartGameSessionPlacement +// * StartGameSessionPlacement // -// * DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * +// * // StopGameSessionPlacement func (c *Client) UpdateGameSession(ctx context.Context, params *UpdateGameSessionInput, optFns ...func(*Options)) (*UpdateGameSessionOutput, error) { if params == nil { @@ -78,11 +77,11 @@ type UpdateGameSessionInput struct { // Game session protection policy to apply to this game session only. // - // * + // * // NoProtection -- The game session can be terminated during a scale-down event. // - // - // * FullProtection -- If the game session is in an ACTIVE status, it cannot be + // * + // FullProtection -- If the game session is in an ACTIVE status, it cannot be // terminated during a scale-down event. ProtectionPolicy types.ProtectionPolicy } diff --git a/service/gamelift/api_op_UpdateGameSessionQueue.go b/service/gamelift/api_op_UpdateGameSessionQueue.go index 5573afd2506..59d0069466f 100644 --- a/service/gamelift/api_op_UpdateGameSessionQueue.go +++ b/service/gamelift/api_op_UpdateGameSessionQueue.go @@ -18,15 +18,14 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html) // Related operations // -// * CreateGameSessionQueue +// * CreateGameSessionQueue // -// * -// DescribeGameSessionQueues +// * DescribeGameSessionQueues // -// * UpdateGameSessionQueue +// * +// UpdateGameSessionQueue // -// * -// DeleteGameSessionQueue +// * DeleteGameSessionQueue func (c *Client) UpdateGameSessionQueue(ctx context.Context, params *UpdateGameSessionQueueInput, optFns ...func(*Options)) (*UpdateGameSessionQueueOutput, error) { if params == nil { params = &UpdateGameSessionQueueInput{} diff --git a/service/gamelift/api_op_UpdateMatchmakingConfiguration.go b/service/gamelift/api_op_UpdateMatchmakingConfiguration.go index b2691618ffd..4685bfb2db8 100644 --- a/service/gamelift/api_op_UpdateMatchmakingConfiguration.go +++ b/service/gamelift/api_op_UpdateMatchmakingConfiguration.go @@ -18,24 +18,24 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) // Related operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) UpdateMatchmakingConfiguration(ctx context.Context, params *UpdateMatchmakingConfigurationInput, optFns ...func(*Options)) (*UpdateMatchmakingConfigurationOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_UpdateRuntimeConfiguration.go b/service/gamelift/api_op_UpdateRuntimeConfiguration.go index 0f04962e8fc..d5f0b247e34 100644 --- a/service/gamelift/api_op_UpdateRuntimeConfiguration.go +++ b/service/gamelift/api_op_UpdateRuntimeConfiguration.go @@ -26,28 +26,27 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // Related operations // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // +// * +// DescribeFleetAttributes // -// * DescribeFleetAttributes +// * Update fleets: // -// * Update fleets: +// * UpdateFleetAttributes // -// * -// UpdateFleetAttributes +// * +// UpdateFleetCapacity // -// * UpdateFleetCapacity +// * UpdateFleetPortSettings // -// * -// UpdateFleetPortSettings +// * UpdateRuntimeConfiguration // -// * UpdateRuntimeConfiguration -// -// * +// * // StartFleetActions or StopFleetActions func (c *Client) UpdateRuntimeConfiguration(ctx context.Context, params *UpdateRuntimeConfigurationInput, optFns ...func(*Options)) (*UpdateRuntimeConfigurationOutput, error) { if params == nil { diff --git a/service/gamelift/api_op_UpdateScript.go b/service/gamelift/api_op_UpdateScript.go index 14204be2ebb..ca0dc3ced6a 100644 --- a/service/gamelift/api_op_UpdateScript.go +++ b/service/gamelift/api_op_UpdateScript.go @@ -23,16 +23,16 @@ import ( // (https://docs.aws.amazon.com/gamelift/latest/developerguide/realtime-intro.html) // Related operations // -// * CreateScript +// * CreateScript // -// * ListScripts +// * ListScripts // -// * -// DescribeScript +// * DescribeScript // -// * UpdateScript +// * +// UpdateScript // -// * DeleteScript +// * DeleteScript func (c *Client) UpdateScript(ctx context.Context, params *UpdateScriptInput, optFns ...func(*Options)) (*UpdateScriptOutput, error) { if params == nil { params = &UpdateScriptInput{} diff --git a/service/gamelift/api_op_ValidateMatchmakingRuleSet.go b/service/gamelift/api_op_ValidateMatchmakingRuleSet.go index 8f5ce00172f..3e37b64403e 100644 --- a/service/gamelift/api_op_ValidateMatchmakingRuleSet.go +++ b/service/gamelift/api_op_ValidateMatchmakingRuleSet.go @@ -15,30 +15,30 @@ import ( // allowed property expressions. To validate syntax, provide a rule set JSON // string. Learn more // -// * Build a Rule Set +// * Build a Rule Set // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html) // // Related // operations // -// * CreateMatchmakingConfiguration +// * CreateMatchmakingConfiguration // -// * +// * // DescribeMatchmakingConfigurations // -// * UpdateMatchmakingConfiguration +// * UpdateMatchmakingConfiguration // -// * +// * // DeleteMatchmakingConfiguration // -// * CreateMatchmakingRuleSet +// * CreateMatchmakingRuleSet // -// * +// * // DescribeMatchmakingRuleSets // -// * ValidateMatchmakingRuleSet +// * ValidateMatchmakingRuleSet // -// * +// * // DeleteMatchmakingRuleSet func (c *Client) ValidateMatchmakingRuleSet(ctx context.Context, params *ValidateMatchmakingRuleSetInput, optFns ...func(*Options)) (*ValidateMatchmakingRuleSetOutput, error) { if params == nil { diff --git a/service/gamelift/doc.go b/service/gamelift/doc.go index 12108ef6371..63f4a72cbff 100644 --- a/service/gamelift/doc.go +++ b/service/gamelift/doc.go @@ -12,7 +12,7 @@ // Amazon GameLift Developer Guide // (http://docs.aws.amazon.com/gamelift/latest/developerguide/). // -// * Managed +// * Managed // GameLift -- GameLift offers a fully managed service to set up and maintain // computing machines for hosting, manage game session and player session life // cycle, and handle security, storage, and performance tracking. You can use @@ -20,12 +20,12 @@ // configure your game session management to minimize player latency, or add // FlexMatch for matchmaking. // -// * Managed GameLift with Realtime Servers – With +// * Managed GameLift with Realtime Servers – With // GameLift Realtime Servers, you can quickly configure and set up game servers for // your game. Realtime Servers provides a game server framework with core Amazon // GameLift infrastructure already built in. // -// * GameLift FleetIQ – Use GameLift +// * GameLift FleetIQ – Use GameLift // FleetIQ as a standalone feature while managing your own EC2 instances and Auto // Scaling groups for game hosting. GameLift FleetIQ provides optimizations that // make low-cost Spot Instances viable for game hosting. @@ -35,11 +35,11 @@ // You can find links to language-specific SDK guides and the AWS CLI reference // with each operation and data type topic. Useful links: // -// * GameLift API +// * GameLift API // operations listed by tasks // (https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html) // -// -// * GameLift tools and resources +// * +// GameLift tools and resources // (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-components.html) package gamelift diff --git a/service/gamelift/types/enums.go b/service/gamelift/types/enums.go index 78873a2f4a6..591ec492596 100644 --- a/service/gamelift/types/enums.go +++ b/service/gamelift/types/enums.go @@ -42,9 +42,9 @@ type BalancingStrategy string // Enum values for BalancingStrategy const ( - BalancingStrategySpot_only BalancingStrategy = "SPOT_ONLY" - BalancingStrategySpot_preferred BalancingStrategy = "SPOT_PREFERRED" - BalancingStrategyOn_demand_only BalancingStrategy = "ON_DEMAND_ONLY" + BalancingStrategySpotOnly BalancingStrategy = "SPOT_ONLY" + BalancingStrategySpotPreferred BalancingStrategy = "SPOT_PREFERRED" + BalancingStrategyOnDemandOnly BalancingStrategy = "ON_DEMAND_ONLY" ) // Values returns all known values for BalancingStrategy. Note that this can be @@ -252,39 +252,39 @@ type EventCode string // Enum values for EventCode const ( - EventCodeGeneric_event EventCode = "GENERIC_EVENT" - EventCodeFleet_created EventCode = "FLEET_CREATED" - EventCodeFleet_deleted EventCode = "FLEET_DELETED" - EventCodeFleet_scaling_event EventCode = "FLEET_SCALING_EVENT" - EventCodeFleet_state_downloading EventCode = "FLEET_STATE_DOWNLOADING" - EventCodeFleet_state_validating EventCode = "FLEET_STATE_VALIDATING" - EventCodeFleet_state_building EventCode = "FLEET_STATE_BUILDING" - EventCodeFleet_state_activating EventCode = "FLEET_STATE_ACTIVATING" - EventCodeFleet_state_active EventCode = "FLEET_STATE_ACTIVE" - EventCodeFleet_state_error EventCode = "FLEET_STATE_ERROR" - EventCodeFleet_initialization_failed EventCode = "FLEET_INITIALIZATION_FAILED" - EventCodeFleet_binary_download_failed EventCode = "FLEET_BINARY_DOWNLOAD_FAILED" - EventCodeFleet_validation_launch_path_not_found EventCode = "FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND" - EventCodeFleet_validation_executable_runtime_failure EventCode = "FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE" - EventCodeFleet_validation_timed_out EventCode = "FLEET_VALIDATION_TIMED_OUT" - EventCodeFleet_activation_failed EventCode = "FLEET_ACTIVATION_FAILED" - EventCodeFleet_activation_failed_no_instances EventCode = "FLEET_ACTIVATION_FAILED_NO_INSTANCES" - EventCodeFleet_new_game_session_protection_policy_updated EventCode = "FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" - EventCodeServer_process_invalid_path EventCode = "SERVER_PROCESS_INVALID_PATH" - EventCodeServer_process_sdk_initialization_timeout EventCode = "SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT" - EventCodeServer_process_process_ready_timeout EventCode = "SERVER_PROCESS_PROCESS_READY_TIMEOUT" - EventCodeServer_process_crashed EventCode = "SERVER_PROCESS_CRASHED" - EventCodeServer_process_terminated_unhealthy EventCode = "SERVER_PROCESS_TERMINATED_UNHEALTHY" - EventCodeServer_process_force_terminated EventCode = "SERVER_PROCESS_FORCE_TERMINATED" - EventCodeServer_process_process_exit_timeout EventCode = "SERVER_PROCESS_PROCESS_EXIT_TIMEOUT" - EventCodeGame_session_activation_timeout EventCode = "GAME_SESSION_ACTIVATION_TIMEOUT" - EventCodeFleet_creation_extracting_build EventCode = "FLEET_CREATION_EXTRACTING_BUILD" - EventCodeFleet_creation_running_installer EventCode = "FLEET_CREATION_RUNNING_INSTALLER" - EventCodeFleet_creation_validating_runtime_config EventCode = "FLEET_CREATION_VALIDATING_RUNTIME_CONFIG" - EventCodeFleet_vpc_peering_succeeded EventCode = "FLEET_VPC_PEERING_SUCCEEDED" - EventCodeFleet_vpc_peering_failed EventCode = "FLEET_VPC_PEERING_FAILED" - EventCodeFleet_vpc_peering_deleted EventCode = "FLEET_VPC_PEERING_DELETED" - EventCodeInstance_interrupted EventCode = "INSTANCE_INTERRUPTED" + EventCodeGenericEvent EventCode = "GENERIC_EVENT" + EventCodeFleetCreated EventCode = "FLEET_CREATED" + EventCodeFleetDeleted EventCode = "FLEET_DELETED" + EventCodeFleetScalingEvent EventCode = "FLEET_SCALING_EVENT" + EventCodeFleetStateDownloading EventCode = "FLEET_STATE_DOWNLOADING" + EventCodeFleetStateValidating EventCode = "FLEET_STATE_VALIDATING" + EventCodeFleetStateBuilding EventCode = "FLEET_STATE_BUILDING" + EventCodeFleetStateActivating EventCode = "FLEET_STATE_ACTIVATING" + EventCodeFleetStateActive EventCode = "FLEET_STATE_ACTIVE" + EventCodeFleetStateError EventCode = "FLEET_STATE_ERROR" + EventCodeFleetInitializationFailed EventCode = "FLEET_INITIALIZATION_FAILED" + EventCodeFleetBinaryDownloadFailed EventCode = "FLEET_BINARY_DOWNLOAD_FAILED" + EventCodeFleetValidationLaunchPathNotFound EventCode = "FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND" + EventCodeFleetValidationExecutableRuntimeFailure EventCode = "FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE" + EventCodeFleetValidationTimedOut EventCode = "FLEET_VALIDATION_TIMED_OUT" + EventCodeFleetActivationFailed EventCode = "FLEET_ACTIVATION_FAILED" + EventCodeFleetActivationFailedNoInstances EventCode = "FLEET_ACTIVATION_FAILED_NO_INSTANCES" + EventCodeFleetNewGameSessionProtectionPolicyUpdated EventCode = "FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED" + EventCodeServerProcessInvalidPath EventCode = "SERVER_PROCESS_INVALID_PATH" + EventCodeServerProcessSdkInitializationTimeout EventCode = "SERVER_PROCESS_SDK_INITIALIZATION_TIMEOUT" + EventCodeServerProcessProcessReadyTimeout EventCode = "SERVER_PROCESS_PROCESS_READY_TIMEOUT" + EventCodeServerProcessCrashed EventCode = "SERVER_PROCESS_CRASHED" + EventCodeServerProcessTerminatedUnhealthy EventCode = "SERVER_PROCESS_TERMINATED_UNHEALTHY" + EventCodeServerProcessForceTerminated EventCode = "SERVER_PROCESS_FORCE_TERMINATED" + EventCodeServerProcessProcessExitTimeout EventCode = "SERVER_PROCESS_PROCESS_EXIT_TIMEOUT" + EventCodeGameSessionActivationTimeout EventCode = "GAME_SESSION_ACTIVATION_TIMEOUT" + EventCodeFleetCreationExtractingBuild EventCode = "FLEET_CREATION_EXTRACTING_BUILD" + EventCodeFleetCreationRunningInstaller EventCode = "FLEET_CREATION_RUNNING_INSTALLER" + EventCodeFleetCreationValidatingRuntimeConfig EventCode = "FLEET_CREATION_VALIDATING_RUNTIME_CONFIG" + EventCodeFleetVpcPeeringSucceeded EventCode = "FLEET_VPC_PEERING_SUCCEEDED" + EventCodeFleetVpcPeeringFailed EventCode = "FLEET_VPC_PEERING_FAILED" + EventCodeFleetVpcPeeringDeleted EventCode = "FLEET_VPC_PEERING_DELETED" + EventCodeInstanceInterrupted EventCode = "INSTANCE_INTERRUPTED" ) // Values returns all known values for EventCode. Note that this can be expanded in @@ -414,7 +414,7 @@ type GameServerGroupAction string // Enum values for GameServerGroupAction const ( - GameServerGroupActionReplace_instance_types GameServerGroupAction = "REPLACE_INSTANCE_TYPES" + GameServerGroupActionReplaceInstanceTypes GameServerGroupAction = "REPLACE_INSTANCE_TYPES" ) // Values returns all known values for GameServerGroupAction. Note that this can be @@ -430,9 +430,9 @@ type GameServerGroupDeleteOption string // Enum values for GameServerGroupDeleteOption const ( - GameServerGroupDeleteOptionSafe_delete GameServerGroupDeleteOption = "SAFE_DELETE" - GameServerGroupDeleteOptionForce_delete GameServerGroupDeleteOption = "FORCE_DELETE" - GameServerGroupDeleteOptionRetain GameServerGroupDeleteOption = "RETAIN" + GameServerGroupDeleteOptionSafeDelete GameServerGroupDeleteOption = "SAFE_DELETE" + GameServerGroupDeleteOptionForceDelete GameServerGroupDeleteOption = "FORCE_DELETE" + GameServerGroupDeleteOptionRetain GameServerGroupDeleteOption = "RETAIN" ) // Values returns all known values for GameServerGroupDeleteOption. Note that this @@ -544,13 +544,13 @@ type GameServerGroupStatus string // Enum values for GameServerGroupStatus const ( - GameServerGroupStatusNew GameServerGroupStatus = "NEW" - GameServerGroupStatusActivating GameServerGroupStatus = "ACTIVATING" - GameServerGroupStatusActive GameServerGroupStatus = "ACTIVE" - GameServerGroupStatusDelete_scheduled GameServerGroupStatus = "DELETE_SCHEDULED" - GameServerGroupStatusDeleting GameServerGroupStatus = "DELETING" - GameServerGroupStatusDeleted GameServerGroupStatus = "DELETED" - GameServerGroupStatusError GameServerGroupStatus = "ERROR" + GameServerGroupStatusNew GameServerGroupStatus = "NEW" + GameServerGroupStatusActivating GameServerGroupStatus = "ACTIVATING" + GameServerGroupStatusActive GameServerGroupStatus = "ACTIVE" + GameServerGroupStatusDeleteScheduled GameServerGroupStatus = "DELETE_SCHEDULED" + GameServerGroupStatusDeleting GameServerGroupStatus = "DELETING" + GameServerGroupStatusDeleted GameServerGroupStatus = "DELETED" + GameServerGroupStatusError GameServerGroupStatus = "ERROR" ) // Values returns all known values for GameServerGroupStatus. Note that this can be @@ -588,9 +588,9 @@ type GameServerInstanceStatus string // Enum values for GameServerInstanceStatus const ( - GameServerInstanceStatusActive GameServerInstanceStatus = "ACTIVE" - GameServerInstanceStatusDraining GameServerInstanceStatus = "DRAINING" - GameServerInstanceStatusSpot_terminating GameServerInstanceStatus = "SPOT_TERMINATING" + GameServerInstanceStatusActive GameServerInstanceStatus = "ACTIVE" + GameServerInstanceStatusDraining GameServerInstanceStatus = "DRAINING" + GameServerInstanceStatusSpotTerminating GameServerInstanceStatus = "SPOT_TERMINATING" ) // Values returns all known values for GameServerInstanceStatus. Note that this can @@ -608,8 +608,8 @@ type GameServerProtectionPolicy string // Enum values for GameServerProtectionPolicy const ( - GameServerProtectionPolicyNo_protection GameServerProtectionPolicy = "NO_PROTECTION" - GameServerProtectionPolicyFull_protection GameServerProtectionPolicy = "FULL_PROTECTION" + GameServerProtectionPolicyNoProtection GameServerProtectionPolicy = "NO_PROTECTION" + GameServerProtectionPolicyFullProtection GameServerProtectionPolicy = "FULL_PROTECTION" ) // Values returns all known values for GameServerProtectionPolicy. Note that this @@ -647,7 +647,7 @@ const ( GameSessionPlacementStatePending GameSessionPlacementState = "PENDING" GameSessionPlacementStateFulfilled GameSessionPlacementState = "FULFILLED" GameSessionPlacementStateCancelled GameSessionPlacementState = "CANCELLED" - GameSessionPlacementStateTimed_out GameSessionPlacementState = "TIMED_OUT" + GameSessionPlacementStateTimedOut GameSessionPlacementState = "TIMED_OUT" GameSessionPlacementStateFailed GameSessionPlacementState = "FAILED" ) @@ -746,14 +746,14 @@ type MatchmakingConfigurationStatus string // Enum values for MatchmakingConfigurationStatus const ( - MatchmakingConfigurationStatusCancelled MatchmakingConfigurationStatus = "CANCELLED" - MatchmakingConfigurationStatusCompleted MatchmakingConfigurationStatus = "COMPLETED" - MatchmakingConfigurationStatusFailed MatchmakingConfigurationStatus = "FAILED" - MatchmakingConfigurationStatusPlacing MatchmakingConfigurationStatus = "PLACING" - MatchmakingConfigurationStatusQueued MatchmakingConfigurationStatus = "QUEUED" - MatchmakingConfigurationStatusRequires_acceptance MatchmakingConfigurationStatus = "REQUIRES_ACCEPTANCE" - MatchmakingConfigurationStatusSearching MatchmakingConfigurationStatus = "SEARCHING" - MatchmakingConfigurationStatusTimed_out MatchmakingConfigurationStatus = "TIMED_OUT" + MatchmakingConfigurationStatusCancelled MatchmakingConfigurationStatus = "CANCELLED" + MatchmakingConfigurationStatusCompleted MatchmakingConfigurationStatus = "COMPLETED" + MatchmakingConfigurationStatusFailed MatchmakingConfigurationStatus = "FAILED" + MatchmakingConfigurationStatusPlacing MatchmakingConfigurationStatus = "PLACING" + MatchmakingConfigurationStatusQueued MatchmakingConfigurationStatus = "QUEUED" + MatchmakingConfigurationStatusRequiresAcceptance MatchmakingConfigurationStatus = "REQUIRES_ACCEPTANCE" + MatchmakingConfigurationStatusSearching MatchmakingConfigurationStatus = "SEARCHING" + MatchmakingConfigurationStatusTimedOut MatchmakingConfigurationStatus = "TIMED_OUT" ) // Values returns all known values for MatchmakingConfigurationStatus. Note that @@ -813,9 +813,9 @@ type OperatingSystem string // Enum values for OperatingSystem const ( - OperatingSystemWindows_2012 OperatingSystem = "WINDOWS_2012" - OperatingSystemAmazon_linux OperatingSystem = "AMAZON_LINUX" - OperatingSystemAmazon_linux_2 OperatingSystem = "AMAZON_LINUX_2" + OperatingSystemWindows2012 OperatingSystem = "WINDOWS_2012" + OperatingSystemAmazonLinux OperatingSystem = "AMAZON_LINUX" + OperatingSystemAmazonLinux2 OperatingSystem = "AMAZON_LINUX_2" ) // Values returns all known values for OperatingSystem. Note that this can be @@ -833,8 +833,8 @@ type PlayerSessionCreationPolicy string // Enum values for PlayerSessionCreationPolicy const ( - PlayerSessionCreationPolicyAccept_all PlayerSessionCreationPolicy = "ACCEPT_ALL" - PlayerSessionCreationPolicyDeny_all PlayerSessionCreationPolicy = "DENY_ALL" + PlayerSessionCreationPolicyAcceptAll PlayerSessionCreationPolicy = "ACCEPT_ALL" + PlayerSessionCreationPolicyDenyAll PlayerSessionCreationPolicy = "DENY_ALL" ) // Values returns all known values for PlayerSessionCreationPolicy. Note that this @@ -947,13 +947,13 @@ type ScalingStatusType string // Enum values for ScalingStatusType const ( - ScalingStatusTypeActive ScalingStatusType = "ACTIVE" - ScalingStatusTypeUpdate_requested ScalingStatusType = "UPDATE_REQUESTED" - ScalingStatusTypeUpdating ScalingStatusType = "UPDATING" - ScalingStatusTypeDelete_requested ScalingStatusType = "DELETE_REQUESTED" - ScalingStatusTypeDeleting ScalingStatusType = "DELETING" - ScalingStatusTypeDeleted ScalingStatusType = "DELETED" - ScalingStatusTypeError ScalingStatusType = "ERROR" + ScalingStatusTypeActive ScalingStatusType = "ACTIVE" + ScalingStatusTypeUpdateRequested ScalingStatusType = "UPDATE_REQUESTED" + ScalingStatusTypeUpdating ScalingStatusType = "UPDATING" + ScalingStatusTypeDeleteRequested ScalingStatusType = "DELETE_REQUESTED" + ScalingStatusTypeDeleting ScalingStatusType = "DELETING" + ScalingStatusTypeDeleted ScalingStatusType = "DELETED" + ScalingStatusTypeError ScalingStatusType = "ERROR" ) // Values returns all known values for ScalingStatusType. Note that this can be diff --git a/service/gamelift/types/types.go b/service/gamelift/types/types.go index cd672146d66..32fdb482b13 100644 --- a/service/gamelift/types/types.go +++ b/service/gamelift/types/types.go @@ -8,19 +8,18 @@ import ( // Properties that describe an alias resource. // -// * CreateAlias +// * CreateAlias // -// * -// ListAliases +// * ListAliases // -// * DescribeAlias +// * +// DescribeAlias // -// * UpdateAlias +// * UpdateAlias // -// * DeleteAlias +// * DeleteAlias // -// * -// ResolveAlias +// * ResolveAlias type Alias struct { // Amazon Resource Name (ARN @@ -93,17 +92,16 @@ type AwsCredentials struct { // Properties describing a custom game build. Related operations // -// * -// CreateBuild +// * CreateBuild // -// * ListBuilds +// * +// ListBuilds // -// * DescribeBuild +// * DescribeBuild // -// * UpdateBuild +// * UpdateBuild // -// * -// DeleteBuild +// * DeleteBuild type Build struct { // Amazon Resource Name (ARN @@ -134,16 +132,16 @@ type Build struct { // Current status of the build. Possible build statuses include the following: // - // - // * INITIALIZED -- A new build has been defined, but no files have been uploaded. + // * + // INITIALIZED -- A new build has been defined, but no files have been uploaded. // You cannot create fleets for builds that are in this status. When a build is // successfully created, the build status is set to this value. // - // * READY -- The + // * READY -- The // game build has been successfully uploaded. You can now create new fleets for // this build. // - // * FAILED -- The game build upload failed. You cannot create new + // * FAILED -- The game build upload failed. You cannot create new // fleets for this build. Status BuildStatus @@ -186,18 +184,18 @@ type DesiredPlayerSession struct { // UpdateFleetCapacity request, or if access to resources is temporarily // affected. // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // -// * +// * // DescribeFleetAttributes // -// * UpdateFleetAttributes +// * UpdateFleetAttributes // -// * StartFleetActions or +// * StartFleetActions or // StopFleetActions type EC2InstanceCounts struct { @@ -253,80 +251,79 @@ type Event struct { // The type of event being logged. Fleet creation events (ordered by fleet creation // activity): // - // * FLEET_CREATED -- A fleet resource was successfully created - // with a status of NEW. Event messaging includes the fleet ID. + // * FLEET_CREATED -- A fleet resource was successfully created with a + // status of NEW. Event messaging includes the fleet ID. // - // * - // FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. The - // compressed build has started downloading to a fleet instance for installation. + // * FLEET_STATE_DOWNLOADING + // -- Fleet status changed from NEW to DOWNLOADING. The compressed build has + // started downloading to a fleet instance for installation. // - // - // * FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet + // * + // FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet // instance. // - // * FLEET_CREATION_EXTRACTING_BUILD – The game server build was + // * FLEET_CREATION_EXTRACTING_BUILD – The game server build was // successfully downloaded to an instance, and the build files are now being // extracted from the uploaded build and saved to an instance. Failure at this // stage prevents a fleet from moving to ACTIVE status. Logs for this stage display // a list of the files that are extracted and saved on the instance. Access the // logs by using the URL in PreSignedLogUrl. // - // * - // FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully - // extracted, and the Amazon GameLift is now running the build's install script (if - // one is included). Failure in this stage prevents a fleet from moving to ACTIVE - // status. Logs for this stage list the installation steps and whether or not the - // install completed successfully. Access the logs by using the URL in - // PreSignedLogUrl. - // - // * FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build - // process was successful, and the Amazon GameLift is now verifying that the game - // server launch paths, which are specified in the fleet's runtime configuration, - // exist. If any listed launch path exists, Amazon GameLift tries to launch a game - // server process and waits for the process to report ready. Failures in this stage - // prevent a fleet from moving to ACTIVE status. Logs for this stage list the - // launch paths in the runtime configuration and indicate whether each is found. + // * FLEET_CREATION_RUNNING_INSTALLER – + // The game server build files were successfully extracted, and the Amazon GameLift + // is now running the build's install script (if one is included). Failure in this + // stage prevents a fleet from moving to ACTIVE status. Logs for this stage list + // the installation steps and whether or not the install completed successfully. // Access the logs by using the URL in PreSignedLogUrl. // - // * - // FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING to VALIDATING. - // - // - // * FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime + // * + // FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, + // and the Amazon GameLift is now verifying that the game server launch paths, + // which are specified in the fleet's runtime configuration, exist. If any listed + // launch path exists, Amazon GameLift tries to launch a game server process and + // waits for the process to report ready. Failures in this stage prevent a fleet + // from moving to ACTIVE status. Logs for this stage list the launch paths in the + // runtime configuration and indicate whether each is found. Access the logs by + // using the URL in PreSignedLogUrl. + // + // * FLEET_STATE_VALIDATING -- Fleet status + // changed from DOWNLOADING to VALIDATING. + // + // * + // FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the runtime // configuration failed because the executable specified in a launch path does not // exist on the instance. // - // * FLEET_STATE_BUILDING -- Fleet status changed from + // * FLEET_STATE_BUILDING -- Fleet status changed from // VALIDATING to BUILDING. // - // * FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- + // * FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- // Validation of the runtime configuration failed because the executable specified // in a launch path failed to run on the fleet instance. // - // * - // FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING to ACTIVATING. - // + // * FLEET_STATE_ACTIVATING + // -- Fleet status changed from BUILDING to ACTIVATING. // - // * FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the - // steps in the fleet activation process. This event code indicates that the game - // build was successfully downloaded to a fleet instance, built, and validated, but - // was not able to start a server process. Learn more at Debug Fleet Creation - // Issues + // * FLEET_ACTIVATION_FAILED + // - The fleet failed to successfully complete one of the steps in the fleet + // activation process. This event code indicates that the game build was + // successfully downloaded to a fleet instance, built, and validated, but was not + // able to start a server process. Learn more at Debug Fleet Creation Issues // (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation) // - // - // * FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. - // The fleet is now ready to host game sessions. + // * + // FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING to ACTIVE. The + // fleet is now ready to host game sessions. // // VPC peering events: // - // * + // * // FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established // between the VPC for an Amazon GameLift fleet and a VPC in your AWS account. // - // - // * FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. - // Event details and status information (see DescribeVpcPeeringConnections) provide + // * + // FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event + // details and status information (see DescribeVpcPeeringConnections) provide // additional detail. A common reason for peering failure is that the two VPCs have // overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR // block for the VPC in your AWS account. For more information on VPC peering @@ -334,31 +331,31 @@ type Event struct { // https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html // (https://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html) // - // - // * FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully + // * + // FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully // deleted. // // Spot instance events: // - // * INSTANCE_INTERRUPTED -- A spot instance - // was interrupted by EC2 with a two-minute notification. + // * INSTANCE_INTERRUPTED -- A spot instance was + // interrupted by EC2 with a two-minute notification. // // Other fleet events: // - // - // * FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings + // * + // FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings // (desired instances, minimum/maximum scaling limits). Event messaging includes // the new capacity settings. // - // * - // FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the - // fleet's game session protection policy setting. Event messaging includes both - // the old and new policy setting. + // * FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED + // -- A change was made to the fleet's game session protection policy setting. + // Event messaging includes both the old and new policy setting. // - // * FLEET_DELETED -- A request to delete a - // fleet was initiated. + // * FLEET_DELETED + // -- A request to delete a fleet was initiated. // - // * GENERIC_EVENT -- An unspecified event has occurred. + // * GENERIC_EVENT -- An unspecified + // event has occurred. EventCode EventCode // A unique identifier for a fleet event. @@ -382,18 +379,18 @@ type Event struct { // General properties describing a fleet. // -// * CreateFleet -// -// * ListFleets +// * CreateFleet // +// * ListFleets // -// * DeleteFleet +// * +// DeleteFleet // -// * DescribeFleetAttributes +// * DescribeFleetAttributes // -// * UpdateFleetAttributes +// * UpdateFleetAttributes // -// * +// * // StartFleetActions or StopFleetActions type FleetAttributes struct { @@ -470,11 +467,11 @@ type FleetAttributes struct { // The type of game session protection to set for all new instances started in the // fleet. // - // * NoProtection -- The game session can be terminated during a - // scale-down event. + // * NoProtection -- The game session can be terminated during a scale-down + // event. // - // * FullProtection -- If the game session is in an ACTIVE - // status, it cannot be terminated during a scale-down event. + // * FullProtection -- If the game session is in an ACTIVE status, it + // cannot be terminated during a scale-down event. NewGameSessionProtectionPolicy ProtectionPolicy // Operating system of the fleet's computing resources. A fleet's operating system @@ -508,24 +505,24 @@ type FleetAttributes struct { // Current status of the fleet. Possible fleet statuses include the following: // + // * + // NEW -- A new fleet has been defined and desired instances is set to 1. // - // * NEW -- A new fleet has been defined and desired instances is set to 1. - // - // * + // * // DOWNLOADING/VALIDATING/BUILDING/ACTIVATING -- Amazon GameLift is setting up the // new fleet, creating new instances with the game build or Realtime script and // starting server processes. // - // * ACTIVE -- Hosts can now accept game - // sessions. + // * ACTIVE -- Hosts can now accept game sessions. // - // * ERROR -- An error occurred when downloading, validating, - // building, or activating the fleet. + // * + // ERROR -- An error occurred when downloading, validating, building, or activating + // the fleet. // - // * DELETING -- Hosts are responding to a - // delete fleet request. + // * DELETING -- Hosts are responding to a delete fleet request. // - // * TERMINATED -- The fleet no longer exists. + // * + // TERMINATED -- The fleet no longer exists. Status FleetStatus // List of fleet activity that have been suspended using StopFleetActions. This @@ -542,19 +539,19 @@ type FleetAttributes struct { // updated as needed. The maximum number of instances for a fleet is determined by // the fleet's instance type. // -// * CreateFleet +// * CreateFleet // -// * ListFleets +// * ListFleets // -// * -// DeleteFleet +// * DeleteFleet // -// * DescribeFleetAttributes +// * +// DescribeFleetAttributes // -// * UpdateFleetAttributes +// * UpdateFleetAttributes // -// * -// StartFleetActions or StopFleetActions +// * StartFleetActions or +// StopFleetActions type FleetCapacity struct { // A unique identifier for a fleet. @@ -574,19 +571,19 @@ type FleetCapacity struct { // Current status of fleet utilization, including the number of game and player // sessions being hosted. // -// * CreateFleet -// -// * ListFleets +// * CreateFleet // -// * DeleteFleet +// * ListFleets // +// * DeleteFleet // -// * DescribeFleetAttributes +// * +// DescribeFleetAttributes // -// * UpdateFleetAttributes +// * UpdateFleetAttributes // -// * StartFleetActions -// or StopFleetActions +// * StartFleetActions or +// StopFleetActions type FleetUtilization struct { // Number of active game sessions currently being hosted on all instances in the @@ -635,19 +632,19 @@ type GameProperty struct { // RegisterGameServer and deleted by calling DeregisterGameServer. A game server is // claimed to host a game session by calling ClaimGameServer. // -// * +// * // RegisterGameServer // -// * ListGameServers +// * ListGameServers // -// * ClaimGameServer +// * ClaimGameServer // -// * +// * // DescribeGameServer // -// * UpdateGameServer +// * UpdateGameServer // -// * DeregisterGameServer +// * DeregisterGameServer type GameServer struct { // Indicates when an available game server has been reserved for gameplay but has @@ -705,11 +702,11 @@ type GameServer struct { // Indicates whether the game server is currently available for new games or is // busy. Possible statuses include: // - // * AVAILABLE - The game server is available - // to be claimed. A game server that has been claimed remains in this status until - // it reports game hosting activity. + // * AVAILABLE - The game server is available to + // be claimed. A game server that has been claimed remains in this status until it + // reports game hosting activity. // - // * UTILIZED - The game server is currently + // * UTILIZED - The game server is currently // hosting a game session with players. UtilizationStatus GameServerUtilizationStatus } @@ -722,24 +719,23 @@ type GameServer struct { // temporarily suspended and resumed by calling SuspendGameServerGroup and // ResumeGameServerGroup, respectively. // -// * CreateGameServerGroup +// * CreateGameServerGroup // -// * +// * // ListGameServerGroups // -// * DescribeGameServerGroup +// * DescribeGameServerGroup // -// * -// UpdateGameServerGroup +// * UpdateGameServerGroup // -// * DeleteGameServerGroup +// * +// DeleteGameServerGroup // -// * -// ResumeGameServerGroup +// * ResumeGameServerGroup // -// * SuspendGameServerGroup +// * SuspendGameServerGroup // -// * +// * // DescribeGameServerInstances type GameServerGroup struct { @@ -750,23 +746,23 @@ type GameServerGroup struct { // Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand // Instances in the game server group. Method options include the following: // - // * + // * // SPOT_ONLY - Only Spot Instances are used in the game server group. If Spot // Instances are unavailable or not viable for game hosting, the game server group // provides no hosting capacity until Spot Instances can again be used. Until then, // no new instances are started, and the existing nonviable Spot Instances are // terminated (after current gameplay ends) and are not replaced. // - // * - // SPOT_PREFERRED - (default value) Spot Instances are used whenever available in - // the game server group. If Spot Instances are unavailable, the game server group - // continues to provide hosting capacity by falling back to On-Demand Instances. - // Existing nonviable Spot Instances are terminated (after current gameplay ends) - // and are replaced with new On-Demand Instances. + // * SPOT_PREFERRED + // - (default value) Spot Instances are used whenever available in the game server + // group. If Spot Instances are unavailable, the game server group continues to + // provide hosting capacity by falling back to On-Demand Instances. Existing + // nonviable Spot Instances are terminated (after current gameplay ends) and are + // replaced with new On-Demand Instances. // - // * ON_DEMAND_ONLY - Only - // On-Demand Instances are used in the game server group. No Spot Instances are - // used, even when available, while this balancing strategy is in force. + // * ON_DEMAND_ONLY - Only On-Demand + // Instances are used in the game server group. No Spot Instances are used, even + // when available, while this balancing strategy is in force. BalancingStrategy BalancingStrategy // A timestamp that indicates when this data object was created. Format is a number @@ -803,30 +799,29 @@ type GameServerGroup struct { // The current status of the game server group. Possible statuses include: // - // * - // NEW - GameLift FleetIQ has validated the CreateGameServerGroup() request. + // * NEW - + // GameLift FleetIQ has validated the CreateGameServerGroup() request. // - // * + // * // ACTIVATING - GameLift FleetIQ is setting up a game server group, which includes // creating an Auto Scaling group in your AWS account. // - // * ACTIVE - The game - // server group has been successfully created. + // * ACTIVE - The game server + // group has been successfully created. // - // * DELETE_SCHEDULED - A request - // to delete the game server group has been received. + // * DELETE_SCHEDULED - A request to delete + // the game server group has been received. // - // * DELETING - GameLift - // FleetIQ has received a valid DeleteGameServerGroup() request and is processing - // it. GameLift FleetIQ must first complete and release hosts before it deletes the - // Auto Scaling group and the game server group. + // * DELETING - GameLift FleetIQ has + // received a valid DeleteGameServerGroup() request and is processing it. GameLift + // FleetIQ must first complete and release hosts before it deletes the Auto Scaling + // group and the game server group. // - // * DELETED - The game server - // group has been successfully deleted. + // * DELETED - The game server group has been + // successfully deleted. // - // * ERROR - The asynchronous processes - // of activating or deleting a game server group has failed, resulting in an error - // state. + // * ERROR - The asynchronous processes of activating or + // deleting a game server group has failed, resulting in an error state. Status GameServerGroupStatus // Additional information about the current game server group status. This @@ -870,24 +865,23 @@ type GameServerGroupAutoScalingPolicy struct { // creating the game server group. Retrieve game server instances for a game server // group by calling DescribeGameServerInstances. // -// * CreateGameServerGroup -// +// * CreateGameServerGroup // -// * ListGameServerGroups +// * +// ListGameServerGroups // -// * DescribeGameServerGroup +// * DescribeGameServerGroup // -// * -// UpdateGameServerGroup +// * UpdateGameServerGroup // -// * DeleteGameServerGroup +// * +// DeleteGameServerGroup // -// * -// ResumeGameServerGroup +// * ResumeGameServerGroup // -// * SuspendGameServerGroup +// * SuspendGameServerGroup // -// * +// * // DescribeGameServerInstances type GameServerInstance struct { @@ -906,19 +900,19 @@ type GameServerInstance struct { // Current status of the game server instance. // - // * ACTIVE -- The instance is - // viable for hosting game servers. + // * ACTIVE -- The instance is viable + // for hosting game servers. // - // * DRAINING -- The instance is not viable - // for hosting game servers. Existing game servers are in the process of ending, - // and new game servers are not started on this instance unless no other resources - // are available. When the instance is put in DRAINING, a new instance is started - // up to replace it. Once the instance has no UTILIZED game servers, it will be + // * DRAINING -- The instance is not viable for hosting + // game servers. Existing game servers are in the process of ending, and new game + // servers are not started on this instance unless no other resources are + // available. When the instance is put in DRAINING, a new instance is started up to + // replace it. Once the instance has no UTILIZED game servers, it will be // terminated in favor of the new instance. // - // * SPOT_TERMINATING -- The instance - // is in the process of shutting down due to a Spot instance interruption. No new - // game servers are started on this instance. + // * SPOT_TERMINATING -- The instance is + // in the process of shutting down due to a Spot instance interruption. No new game + // servers are started on this instance. InstanceStatus GameServerInstanceStatus } @@ -928,29 +922,28 @@ type GameServerInstance struct { // can reuse idempotency token values after this time. Game session logs are // retained for 14 days. // -// * CreateGameSession -// -// * DescribeGameSessions +// * CreateGameSession // +// * DescribeGameSessions // -// * DescribeGameSessionDetails +// * +// DescribeGameSessionDetails // -// * SearchGameSessions +// * SearchGameSessions // -// * -// UpdateGameSession +// * UpdateGameSession // -// * GetGameSessionLogUrl -// -// * Game session placements +// * +// GetGameSessionLogUrl // +// * Game session placements // // * StartGameSessionPlacement // -// * DescribeGameSessionPlacement +// * +// DescribeGameSessionPlacement // -// * -// StopGameSessionPlacement +// * StopGameSessionPlacement type GameSession struct { // Time stamp indicating when this data object was created. Format is a number @@ -968,11 +961,11 @@ type GameSession struct { // DNS identifier assigned to the instance that is running the game session. Values // have the following format: // - // * TLS-enabled fleets: ..amazongamelift.com. - // + // * TLS-enabled fleets: ..amazongamelift.com. // - // * Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance - // IP Addressing + // * + // Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance IP + // Addressing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses).) // // When @@ -1062,11 +1055,11 @@ type GameSessionConnectionInfo struct { // DNS identifier assigned to the instance that is running the game session. Values // have the following format: // - // * TLS-enabled fleets: ..amazongamelift.com. + // * TLS-enabled fleets: ..amazongamelift.com. // - // - // * Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance - // IP Addressing + // * + // Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance IP + // Addressing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses).) // // When @@ -1101,12 +1094,12 @@ type GameSessionDetail struct { // Current status of protection for the game session. // - // * NoProtection -- The - // game session can be terminated during a scale-down event. + // * NoProtection -- The game + // session can be terminated during a scale-down event. // - // * FullProtection - // -- If the game session is in an ACTIVE status, it cannot be terminated during a - // scale-down event. + // * FullProtection -- If the + // game session is in an ACTIVE status, it cannot be terminated during a scale-down + // event. ProtectionPolicy ProtectionPolicy } @@ -1114,23 +1107,23 @@ type GameSessionDetail struct { // the full details of the original request plus the current status and start/end // time stamps. Game session placement-related operations include: // -// * +// * // StartGameSessionPlacement // -// * DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * +// * // StopGameSessionPlacement type GameSessionPlacement struct { // DNS identifier assigned to the instance that is running the game session. Values // have the following format: // - // * TLS-enabled fleets: ..amazongamelift.com. - // + // * TLS-enabled fleets: ..amazongamelift.com. // - // * Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance - // IP Addressing + // * + // Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance IP + // Addressing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses).) // // When @@ -1220,23 +1213,23 @@ type GameSessionPlacement struct { // Current status of the game session placement request. // - // * PENDING -- The + // * PENDING -- The // placement request is currently in the queue waiting to be processed. // - // * + // * // FULFILLED -- A new game session and player sessions (if requested) have been // successfully created. Values for GameSessionArn and GameSessionRegion are // available. // - // * CANCELLED -- The placement request was canceled with a call to + // * CANCELLED -- The placement request was canceled with a call to // StopGameSessionPlacement. // - // * TIMED_OUT -- A new game session was not + // * TIMED_OUT -- A new game session was not // successfully created before the time limit expired. You can resubmit the // placement request as needed. // - // * FAILED -- GameLift is not able to complete - // the process of placing the game session. Common reasons are the game session + // * FAILED -- GameLift is not able to complete the + // process of placing the game session. Common reasons are the game session // terminated before the placement process was completed, or an unexpected internal // error. Status GameSessionPlacementState @@ -1245,30 +1238,29 @@ type GameSessionPlacement struct { // Configuration of a queue that is used to process game session placement // requests. The queue configuration identifies several game features: // -// * The +// * The // destinations where a new game session can potentially be hosted. Amazon GameLift // tries these destinations in an order based on either the queue's default order // or player latency information, if provided in a placement request. With latency // information, Amazon GameLift can place game sessions where the majority of // players are reporting the lowest possible latency. // -// * The length of time -// that placement requests can wait in the queue before timing out. +// * The length of time that +// placement requests can wait in the queue before timing out. // -// * A set of -// optional latency policies that protect individual players from high latencies, -// preventing game sessions from being placed where any individual player is -// reporting latency higher than a policy's maximum. +// * A set of optional +// latency policies that protect individual players from high latencies, preventing +// game sessions from being placed where any individual player is reporting latency +// higher than a policy's maximum. // -// * -// CreateGameSessionQueue +// * CreateGameSessionQueue // -// * DescribeGameSessionQueues +// * +// DescribeGameSessionQueues // -// * -// UpdateGameSessionQueue +// * UpdateGameSessionQueue // -// * DeleteGameSessionQueue +// * DeleteGameSessionQueue type GameSessionQueue struct { // A list of fleets that can be used to fulfill game session placement requests in @@ -1307,15 +1299,14 @@ type GameSessionQueue struct { // queue are fulfilled by starting a new game session on any destination that is // configured for a queue. // -// * CreateGameSessionQueue +// * CreateGameSessionQueue // -// * +// * // DescribeGameSessionQueues // -// * UpdateGameSessionQueue +// * UpdateGameSessionQueue // -// * -// DeleteGameSessionQueue +// * DeleteGameSessionQueue type GameSessionQueueDestination struct { // The Amazon Resource Name (ARN) that is assigned to fleet or fleet alias. ARNs, @@ -1335,11 +1326,11 @@ type Instance struct { // DNS identifier assigned to the instance that is running the game session. Values // have the following format: // - // * TLS-enabled fleets: ..amazongamelift.com. + // * TLS-enabled fleets: ..amazongamelift.com. // - // - // * Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance - // IP Addressing + // * + // Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance IP + // Addressing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses).) // // When @@ -1361,17 +1352,17 @@ type Instance struct { // Current status of the instance. Possible statuses include the following: // - // * + // * // PENDING -- The instance is in the process of being created and launching server // processes as defined in the fleet's run-time configuration. // - // * ACTIVE -- The + // * ACTIVE -- The // instance has been successfully created and at least one server process has // successfully launched and reported back to Amazon GameLift that it is ready to // host a game session. The instance is now considered ready to host game // sessions. // - // * TERMINATING -- The instance is in the process of shutting down. + // * TERMINATING -- The instance is in the process of shutting down. // This may happen to reduce capacity during a scaling down event or to recycle // resources in the event of a problem. Status InstanceStatus @@ -1603,32 +1594,32 @@ type MatchmakingConfiguration struct { // rule set, see Build a FlexMatch Rule Set // (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-rulesets.html). // -// -// * Teams -- Required. A rule set must define one or multiple teams for the match +// * +// Teams -- Required. A rule set must define one or multiple teams for the match // and set minimum and maximum team sizes. For example, a rule set might describe a // 4x4 match that requires all eight slots to be filled. // -// * Player attributes -// -- Optional. These attributes specify a set of player characteristics to -// evaluate when looking for a match. Matchmaking requests that use a rule set with -// player attributes must provide the corresponding attribute values. For example, -// an attribute might specify a player's skill or level. -// -// * Rules -- Optional. -// Rules define how to evaluate potential players for a match based on player -// attributes. A rule might specify minimum requirements for individual players, -// teams, or entire matches. For example, a rule might require each player to meet -// a certain skill level, each team to have at least one player in a certain role, -// or the match to have a minimum average skill level. or may describe an entire +// * Player attributes -- +// Optional. These attributes specify a set of player characteristics to evaluate +// when looking for a match. Matchmaking requests that use a rule set with player +// attributes must provide the corresponding attribute values. For example, an +// attribute might specify a player's skill or level. +// +// * Rules -- Optional. Rules +// define how to evaluate potential players for a match based on player attributes. +// A rule might specify minimum requirements for individual players, teams, or +// entire matches. For example, a rule might require each player to meet a certain +// skill level, each team to have at least one player in a certain role, or the +// match to have a minimum average skill level. or may describe an entire // group--such as all teams must be evenly matched or have at least one player in a // certain role. // -// * Expansions -- Optional. Expansions allow you to relax the -// rules after a period of time when no acceptable matches are found. This feature -// lets you balance getting players into games in a reasonable amount of time -// instead of making them wait indefinitely for the best possible match. For -// example, you might use an expansion to increase the maximum skill variance -// between players after 30 seconds. +// * Expansions -- Optional. Expansions allow you to relax the rules +// after a period of time when no acceptable matches are found. This feature lets +// you balance getting players into games in a reasonable amount of time instead of +// making them wait indefinitely for the best possible match. For example, you +// might use an expansion to increase the maximum skill variance between players +// after 30 seconds. type MatchmakingRuleSet struct { // A collection of matchmaking rules, formatted as a JSON string. Comments are not @@ -1695,39 +1686,38 @@ type MatchmakingTicket struct { // Current status of the matchmaking request. // - // * QUEUED -- The matchmaking - // request has been received and is currently waiting to be processed. + // * QUEUED -- The matchmaking request + // has been received and is currently waiting to be processed. // - // * - // SEARCHING -- The matchmaking request is currently being processed. + // * SEARCHING -- The + // matchmaking request is currently being processed. // - // * - // REQUIRES_ACCEPTANCE -- A match has been proposed and the players must accept the - // match (see AcceptMatch). This status is used only with requests that use a - // matchmaking configuration with a player acceptance requirement. + // * REQUIRES_ACCEPTANCE -- A + // match has been proposed and the players must accept the match (see AcceptMatch). + // This status is used only with requests that use a matchmaking configuration with + // a player acceptance requirement. // - // * PLACING - // -- The FlexMatch engine has matched players and is in the process of placing a - // new game session for the match. + // * PLACING -- The FlexMatch engine has matched + // players and is in the process of placing a new game session for the match. // - // * COMPLETED -- Players have been matched - // and a game session is ready to host the players. A ticket in this state contains - // the necessary connection information for players. + // * + // COMPLETED -- Players have been matched and a game session is ready to host the + // players. A ticket in this state contains the necessary connection information + // for players. // - // * FAILED -- The - // matchmaking request was not completed. + // * FAILED -- The matchmaking request was not completed. // - // * CANCELLED -- The matchmaking - // request was canceled. This may be the result of a call to StopMatchmaking or a - // proposed match that one or more players failed to accept. + // * + // CANCELLED -- The matchmaking request was canceled. This may be the result of a + // call to StopMatchmaking or a proposed match that one or more players failed to + // accept. // - // * TIMED_OUT -- - // The matchmaking request was not successful within the duration specified in the - // matchmaking configuration. + // * TIMED_OUT -- The matchmaking request was not successful within the + // duration specified in the matchmaking configuration. // - // Matchmaking requests that fail to successfully - // complete (statuses FAILED, CANCELLED, TIMED_OUT) can be resubmitted as new - // requests with new ticket IDs. + // Matchmaking requests that + // fail to successfully complete (statuses FAILED, CANCELLED, TIMED_OUT) can be + // resubmitted as new requests with new ticket IDs. Status MatchmakingConfigurationStatus // Additional information about the current status. @@ -1747,22 +1737,22 @@ type MatchmakingTicket struct { // player session ID. To retrieve full details on a player session, call // DescribePlayerSessions with the player session ID. // -// * CreatePlayerSession +// * CreatePlayerSession // +// * +// CreatePlayerSessions // -// * CreatePlayerSessions +// * DescribePlayerSessions // -// * DescribePlayerSessions +// * Game session placements // -// * Game session -// placements +// * +// StartGameSessionPlacement // -// * StartGameSessionPlacement +// * DescribeGameSessionPlacement // -// * -// DescribeGameSessionPlacement -// -// * StopGameSessionPlacement +// * +// StopGameSessionPlacement type PlacedPlayerSession struct { // A unique identifier for a player that is associated with this player session. @@ -1825,15 +1815,14 @@ type PlayerLatency struct { // higher than the cap. Latency policies are only enforced when the placement // request contains player latency information. // -// * CreateGameSessionQueue +// * CreateGameSessionQueue // +// * +// DescribeGameSessionQueues // -// * DescribeGameSessionQueues -// -// * UpdateGameSessionQueue +// * UpdateGameSessionQueue // -// * -// DeleteGameSessionQueue +// * DeleteGameSessionQueue type PlayerLatencyPolicy struct { // The maximum latency value that is allowed for any player, in milliseconds. All @@ -1856,22 +1845,22 @@ type PlayerLatencyPolicy struct { // changes to COMPLETED. Once the session ends, the player session object is // retained for 30 days and then removed. // -// * CreatePlayerSession +// * CreatePlayerSession // -// * +// * // CreatePlayerSessions // -// * DescribePlayerSessions +// * DescribePlayerSessions // -// * Game session -// placements +// * Game session placements // -// * StartGameSessionPlacement +// * +// StartGameSessionPlacement // -// * -// DescribeGameSessionPlacement +// * DescribeGameSessionPlacement // -// * StopGameSessionPlacement +// * +// StopGameSessionPlacement type PlayerSession struct { // Time stamp indicating when this data object was created. Format is a number @@ -1881,11 +1870,11 @@ type PlayerSession struct { // DNS identifier assigned to the instance that is running the game session. Values // have the following format: // - // * TLS-enabled fleets: ..amazongamelift.com. + // * TLS-enabled fleets: ..amazongamelift.com. // - // - // * Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance - // IP Addressing + // * + // Non-TLS-enabled fleets: ec2-.compute.amazonaws.com. (See Amazon EC2 Instance IP + // Addressing // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-instance-addressing.html#concepts-public-addresses).) // // When @@ -1927,19 +1916,18 @@ type PlayerSession struct { // Current status of the player session. Possible player session statuses include // the following: // - // * RESERVED -- The player session request has been received, - // but the player has not yet connected to the server process and/or been - // validated. + // * RESERVED -- The player session request has been received, but + // the player has not yet connected to the server process and/or been validated. // - // * ACTIVE -- The player has been validated by the server process - // and is currently connected. + // * + // ACTIVE -- The player has been validated by the server process and is currently + // connected. // - // * COMPLETED -- The player connection has been - // dropped. + // * COMPLETED -- The player connection has been dropped. // - // * TIMEDOUT -- A player session request was received, but the - // player did not connect and/or was not validated within the timeout limit (60 - // seconds). + // * TIMEDOUT + // -- A player session request was received, but the player did not connect and/or + // was not validated within the timeout limit (60 seconds). Status PlayerSessionStatus // Time stamp indicating when this data object was terminated. Format is a number @@ -1968,19 +1956,18 @@ type ResourceCreationLimitPolicy struct { // The routing configuration for a fleet alias. // -// * CreateAlias +// * CreateAlias // -// * -// ListAliases +// * ListAliases // -// * DescribeAlias +// * +// DescribeAlias // -// * UpdateAlias +// * UpdateAlias // -// * DeleteAlias +// * DeleteAlias // -// * -// ResolveAlias +// * ResolveAlias type RoutingStrategy struct { // The unique identifier for a fleet that the alias points to. This value is the @@ -1993,12 +1980,12 @@ type RoutingStrategy struct { // The type of routing strategy for the alias. Possible routing types include the // following: // - // * SIMPLE - The alias resolves to one specific fleet. Use this - // type when routing to active fleets. + // * SIMPLE - The alias resolves to one specific fleet. Use this type + // when routing to active fleets. // - // * TERMINAL - The alias does not resolve - // to a fleet but instead can be used to display a message to the user. A terminal - // alias throws a TerminalRoutingStrategyException with the RoutingStrategy message + // * TERMINAL - The alias does not resolve to a + // fleet but instead can be used to display a message to the user. A terminal alias + // throws a TerminalRoutingStrategyException with the RoutingStrategy message // embedded. Type RoutingStrategyType } @@ -2016,20 +2003,19 @@ type RoutingStrategy struct { // calculate the total number of processes in a runtime configuration, add the // values of the ConcurrentExecutions parameter for each ServerProcess object. // +// * +// CreateFleet // -// * CreateFleet -// -// * ListFleets +// * ListFleets // -// * DeleteFleet +// * DeleteFleet // -// * -// DescribeFleetAttributes +// * DescribeFleetAttributes // -// * UpdateFleetAttributes +// * +// UpdateFleetAttributes // -// * StartFleetActions or -// StopFleetActions +// * StartFleetActions or StopFleetActions type RuntimeConfiguration struct { // The maximum amount of time (in seconds) that a game session can remain in status @@ -2074,30 +2060,29 @@ type S3Location struct { // Rule that controls how a fleet is scaled. Scaling policies are uniquely // identified by the combination of name and fleet ID. // -// * -// DescribeFleetCapacity +// * DescribeFleetCapacity // -// * UpdateFleetCapacity +// * +// UpdateFleetCapacity // -// * -// DescribeEC2InstanceLimits +// * DescribeEC2InstanceLimits // -// * Manage scaling policies: +// * Manage scaling policies: // -// * +// * // PutScalingPolicy (auto-scaling) // -// * DescribeScalingPolicies -// (auto-scaling) +// * DescribeScalingPolicies (auto-scaling) // -// * DeleteScalingPolicy (auto-scaling) +// * +// DeleteScalingPolicy (auto-scaling) // -// * Manage fleet -// actions: +// * Manage fleet actions: // -// * StartFleetActions +// * +// StartFleetActions // -// * StopFleetActions +// * StopFleetActions type ScalingPolicy struct { // Comparison operator to use when measuring a metric against the threshold value. @@ -2115,46 +2100,45 @@ type ScalingPolicy struct { // GameLift with Amazon CloudWatch // (https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html). // + // * + // ActivatingGameSessions -- Game sessions in the process of being created. // - // * ActivatingGameSessions -- Game sessions in the process of being created. + // * + // ActiveGameSessions -- Game sessions that are currently running. // - // - // * ActiveGameSessions -- Game sessions that are currently running. - // - // * + // * // ActiveInstances -- Fleet instances that are currently running at least one game // session. // - // * AvailableGameSessions -- Additional game sessions that fleet - // could host simultaneously, given current capacity. + // * AvailableGameSessions -- Additional game sessions that fleet could + // host simultaneously, given current capacity. // - // * - // AvailablePlayerSessions -- Empty player slots in currently active game sessions. - // This includes game sessions that are not currently accepting players. Reserved - // player slots are not included. + // * AvailablePlayerSessions -- Empty + // player slots in currently active game sessions. This includes game sessions that + // are not currently accepting players. Reserved player slots are not included. // - // * CurrentPlayerSessions -- Player slots in - // active game sessions that are being used by a player or are reserved for a - // player. + // * + // CurrentPlayerSessions -- Player slots in active game sessions that are being + // used by a player or are reserved for a player. // - // * IdleInstances -- Active instances that are currently hosting zero - // game sessions. + // * IdleInstances -- Active + // instances that are currently hosting zero game sessions. // - // * PercentAvailableGameSessions -- Unused percentage of the - // total number of game sessions that a fleet could host simultaneously, given - // current capacity. Use this metric for a target-based scaling policy. + // * + // PercentAvailableGameSessions -- Unused percentage of the total number of game + // sessions that a fleet could host simultaneously, given current capacity. Use + // this metric for a target-based scaling policy. // - // * - // PercentIdleInstances -- Percentage of the total number of active instances that - // are hosting zero game sessions. + // * PercentIdleInstances -- + // Percentage of the total number of active instances that are hosting zero game + // sessions. // - // * QueueDepth -- Pending game session - // placement requests, in any queue, where the current fleet is the top-priority - // destination. + // * QueueDepth -- Pending game session placement requests, in any + // queue, where the current fleet is the top-priority destination. // - // * WaitTime -- Current wait time for pending game session - // placement requests, in any queue, where the current fleet is the top-priority - // destination. + // * WaitTime -- + // Current wait time for pending game session placement requests, in any queue, + // where the current fleet is the top-priority destination. MetricName MetricName // A descriptive label that is associated with a scaling policy. Policy names do @@ -2174,16 +2158,16 @@ type ScalingPolicy struct { // The type of adjustment to make to a fleet's instance count (see // FleetCapacity): // - // * ChangeInCapacity -- add (or subtract) the scaling - // adjustment value from the current instance count. Positive values scale up while - // negative values scale down. + // * ChangeInCapacity -- add (or subtract) the scaling adjustment + // value from the current instance count. Positive values scale up while negative + // values scale down. // - // * ExactCapacity -- set the instance count to - // the scaling adjustment value. + // * ExactCapacity -- set the instance count to the scaling + // adjustment value. // - // * PercentChangeInCapacity -- increase or - // reduce the current instance count by the scaling adjustment, read as a - // percentage. Positive values scale up while negative values scale down. + // * PercentChangeInCapacity -- increase or reduce the current + // instance count by the scaling adjustment, read as a percentage. Positive values + // scale up while negative values scale down. ScalingAdjustmentType ScalingAdjustmentType // Current status of the scaling policy. The scaling policy can be in force only @@ -2191,26 +2175,26 @@ type ScalingPolicy struct { // fleets (see StopFleetActions; if suspended for a fleet, the policy status does // not change. View a fleet's stopped actions by calling DescribeFleetCapacity. // + // * + // ACTIVE -- The scaling policy can be used for auto-scaling a fleet. // - // * ACTIVE -- The scaling policy can be used for auto-scaling a fleet. - // - // * + // * // UPDATE_REQUESTED -- A request to update the scaling policy has been received. // + // * + // UPDATING -- A change is being made to the scaling policy. // - // * UPDATING -- A change is being made to the scaling policy. + // * DELETE_REQUESTED -- + // A request to delete the scaling policy has been received. // - // * - // DELETE_REQUESTED -- A request to delete the scaling policy has been received. + // * DELETING -- The + // scaling policy is being deleted. // + // * DELETED -- The scaling policy has been + // deleted. // - // * DELETING -- The scaling policy is being deleted. - // - // * DELETED -- The scaling - // policy has been deleted. - // - // * ERROR -- An error occurred in creating the - // policy. It should be removed and recreated. + // * ERROR -- An error occurred in creating the policy. It should be + // removed and recreated. Status ScalingStatusType // The settings for a target-based scaling policy. @@ -2222,17 +2206,16 @@ type ScalingPolicy struct { // Properties describing a Realtime script. Related operations // -// * -// CreateScript +// * CreateScript // -// * ListScripts +// * +// ListScripts // -// * DescribeScript +// * DescribeScript // -// * UpdateScript +// * UpdateScript // -// * -// DeleteScript +// * DeleteScript type Script struct { // A time stamp indicating when this data object was created. The format is a @@ -2286,10 +2269,10 @@ type ServerProcess struct { // Realtime script file that contains the Init() function. Game builds and Realtime // scripts are installed on instances at the root: // - // * Windows (for custom game + // * Windows (for custom game // builds only): C:\game. Example: "C:\game\MyGame\server.exe" // - // * Linux: + // * Linux: // /local/game. Examples: "/local/game/MyGame/server.exe" or // "/local/game/MyRealtimeScript.js" // @@ -2307,12 +2290,11 @@ type ServerProcess struct { // (http://aws.amazon.com/answers/account-management/aws-tagging-strategies/) // Related operations // -// * TagResource +// * TagResource // -// * UntagResource +// * UntagResource // -// * -// ListTagsForResource +// * ListTagsForResource type Tag struct { // The key for a developer-defined key:value pair for tagging an AWS resource. @@ -2333,29 +2315,28 @@ type Tag struct { // specifies settings as needed for the target based policy, including the target // value. // -// * DescribeFleetCapacity +// * DescribeFleetCapacity // -// * UpdateFleetCapacity +// * UpdateFleetCapacity // -// * +// * // DescribeEC2InstanceLimits // -// * Manage scaling policies: -// -// * -// PutScalingPolicy (auto-scaling) +// * Manage scaling policies: // -// * DescribeScalingPolicies +// * PutScalingPolicy // (auto-scaling) // -// * DeleteScalingPolicy (auto-scaling) +// * DescribeScalingPolicies (auto-scaling) // -// * Manage fleet -// actions: +// * DeleteScalingPolicy +// (auto-scaling) +// +// * Manage fleet actions: // -// * StartFleetActions +// * StartFleetActions // -// * StopFleetActions +// * StopFleetActions type TargetConfiguration struct { // Desired value to use with a target-based scaling policy. The value must be @@ -2388,20 +2369,20 @@ type TargetTrackingConfiguration struct { // authorization must exist and be valid for the peering connection to be // established. Authorizations are valid for 24 hours after they are issued. // -// * +// * // CreateVpcPeeringAuthorization // -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection type VpcPeeringAuthorization struct { // Time stamp indicating when this authorization was issued. Format is a number @@ -2434,20 +2415,20 @@ type VpcPeeringAuthorization struct { // the VPC for your Amazon GameLift fleets. This record may be for an active // peering connection or a pending connection that has not yet been established. // +// * +// CreateVpcPeeringAuthorization // -// * CreateVpcPeeringAuthorization -// -// * DescribeVpcPeeringAuthorizations +// * DescribeVpcPeeringAuthorizations // -// * +// * // DeleteVpcPeeringAuthorization // -// * CreateVpcPeeringConnection +// * CreateVpcPeeringConnection // -// * +// * // DescribeVpcPeeringConnections // -// * DeleteVpcPeeringConnection +// * DeleteVpcPeeringConnection type VpcPeeringConnection struct { // The Amazon Resource Name (ARN diff --git a/service/glacier/api_op_CreateVault.go b/service/glacier/api_op_CreateVault.go index 76a47dc4b4d..29dba90bb0c 100644 --- a/service/glacier/api_op_CreateVault.go +++ b/service/glacier/api_op_CreateVault.go @@ -16,18 +16,18 @@ import ( // 1,000 vaults per account. If you need to create more vaults, contact Amazon S3 // Glacier. You must use the following guidelines when naming a vault. // -// * Names -// can be between 1 and 255 characters long. +// * Names can +// be between 1 and 255 characters long. // -// * Allowed characters are a-z, -// A-Z, 0-9, '_' (underscore), '-' (hyphen), and '.' (period). +// * Allowed characters are a-z, A-Z, 0-9, +// '_' (underscore), '-' (hyphen), and '.' (period). // -// This operation is -// idempotent. An AWS account has full permission to perform all operations -// (actions). However, AWS Identity and Access Management (IAM) users don't have -// any permissions by default. You must grant them explicit permission to perform -// specific actions. For more information, see Access Control Using AWS Identity -// and Access Management (IAM) +// This operation is idempotent. +// An AWS account has full permission to perform all operations (actions). However, +// AWS Identity and Access Management (IAM) users don't have any permissions by +// default. You must grant them explicit permission to perform specific actions. +// For more information, see Access Control Using AWS Identity and Access +// Management (IAM) // (https://docs.aws.amazon.com/amazonglacier/latest/dev/using-iam-with-amazon-glacier.html). // For conceptual information and underlying REST API, see Creating a Vault in // Amazon Glacier diff --git a/service/glacier/api_op_DeleteArchive.go b/service/glacier/api_op_DeleteArchive.go index ba3a27ad681..f5450effade 100644 --- a/service/glacier/api_op_DeleteArchive.go +++ b/service/glacier/api_op_DeleteArchive.go @@ -16,11 +16,11 @@ import ( // for this archive ID may or may not succeed according to the following // scenarios: // -// * If the archive retrieval job is actively preparing the data -// for download when Amazon S3 Glacier receives the delete archive request, the +// * If the archive retrieval job is actively preparing the data for +// download when Amazon S3 Glacier receives the delete archive request, the // archival retrieval operation might fail. // -// * If the archive retrieval job has +// * If the archive retrieval job has // successfully prepared the archive for download when Amazon S3 Glacier receives // the delete archive request, you will be able to download the output. // diff --git a/service/glacier/api_op_DescribeJob.go b/service/glacier/api_op_DescribeJob.go index a2b55545ba3..ff87c9b58ea 100644 --- a/service/glacier/api_op_DescribeJob.go +++ b/service/glacier/api_op_DescribeJob.go @@ -134,16 +134,16 @@ type DescribeJobOutput struct { // is retrieved, this value is the same as the ArchiveSHA256TreeHash value. This // field is null for the following: // - // * Archive retrieval jobs that specify a - // range that is not tree-hash aligned + // * Archive retrieval jobs that specify a range + // that is not tree-hash aligned // - // * Archival jobs that specify a range - // that is equal to the whole archive, when the job status is InProgress + // * Archival jobs that specify a range that is + // equal to the whole archive, when the job status is InProgress // - // * - // Inventory jobs + // * Inventory + // jobs // - // * Select jobs + // * Select jobs SHA256TreeHash *string // An Amazon SNS topic that receives notification. diff --git a/service/glacier/api_op_GetJobOutput.go b/service/glacier/api_op_GetJobOutput.go index c85100fc9bf..ad4caf367d4 100644 --- a/service/glacier/api_op_GetJobOutput.go +++ b/service/glacier/api_op_GetJobOutput.go @@ -95,23 +95,23 @@ type GetJobOutputInput struct { // chunks of data at a time, which is a total of eight Get Job Output requests. You // use the following process to download the job output: // - // * Download a 128 MB - // chunk of output by specifying the appropriate byte range. Verify that all 128 MB - // of data was received. + // * Download a 128 MB chunk + // of output by specifying the appropriate byte range. Verify that all 128 MB of + // data was received. // - // * Along with the data, the response includes a SHA256 - // tree hash of the payload. You compute the checksum of the payload on the client - // and compare it with the checksum you received in the response to ensure you - // received all the expected data. + // * Along with the data, the response includes a SHA256 tree + // hash of the payload. You compute the checksum of the payload on the client and + // compare it with the checksum you received in the response to ensure you received + // all the expected data. // - // * Repeat steps 1 and 2 for all the eight - // 128 MB chunks of output data, each time specifying the appropriate byte range. + // * Repeat steps 1 and 2 for all the eight 128 MB chunks + // of output data, each time specifying the appropriate byte range. // - // - // * After downloading all the parts of the job output, you have a list of eight - // checksum values. Compute the tree hash of these values to find the checksum of - // the entire output. Using the DescribeJob API, obtain job information of the job - // that provided you the output. The response includes the checksum of the entire + // * After + // downloading all the parts of the job output, you have a list of eight checksum + // values. Compute the tree hash of these values to find the checksum of the entire + // output. Using the DescribeJob API, obtain job information of the job that + // provided you the output. The response includes the checksum of the entire // archive stored in Amazon S3 Glacier. You compare this value with the checksum // you computed to ensure you have downloaded the entire archive content with no // errors. @@ -135,17 +135,17 @@ type GetJobOutputOutput struct { // retrieving the output for an archive retrieval job. Furthermore, this header // appears only under the following conditions: // - // * You get the entire range of - // the archive. + // * You get the entire range of the + // archive. // - // * You request a range to return of the archive that starts and - // ends on a multiple of 1 MB. For example, if you have an 3.1 MB archive and you - // specify a range to return that starts at 1 MB and ends at 2 MB, then the + // * You request a range to return of the archive that starts and ends on + // a multiple of 1 MB. For example, if you have an 3.1 MB archive and you specify a + // range to return that starts at 1 MB and ends at 2 MB, then the // x-amz-sha256-tree-hash is returned as a response header. // - // * You request a - // range of the archive to return that starts on a multiple of 1 MB and goes to the - // end of the archive. For example, if you have a 3.1 MB archive and you specify a + // * You request a range + // of the archive to return that starts on a multiple of 1 MB and goes to the end + // of the archive. For example, if you have a 3.1 MB archive and you specify a // range that starts at 2 MB and ends at 3.1 MB (the end of the archive), then the // x-amz-sha256-tree-hash is returned as a response header. Checksum *string diff --git a/service/glacier/api_op_GetVaultLock.go b/service/glacier/api_op_GetVaultLock.go index 4a542dcc360..ab0cd45af78 100644 --- a/service/glacier/api_op_GetVaultLock.go +++ b/service/glacier/api_op_GetVaultLock.go @@ -14,17 +14,17 @@ import ( // This operation retrieves the following attributes from the lock-policy // subresource set on the specified vault: // -// * The vault lock policy set on the +// * The vault lock policy set on the // vault. // -// * The state of the vault lock, which is either InProgess or -// Locked. +// * The state of the vault lock, which is either InProgess or Locked. // -// * When the lock ID expires. The lock ID is used to complete the -// vault locking process. +// * +// When the lock ID expires. The lock ID is used to complete the vault locking +// process. // -// * When the vault lock was initiated and put into the -// InProgress state. +// * When the vault lock was initiated and put into the InProgress +// state. // // A vault lock is put into the InProgress state by calling // InitiateVaultLock. A vault lock is put into the Locked state by calling diff --git a/service/glacier/api_op_InitiateVaultLock.go b/service/glacier/api_op_InitiateVaultLock.go index 7532d447a99..0740693537a 100644 --- a/service/glacier/api_op_InitiateVaultLock.go +++ b/service/glacier/api_op_InitiateVaultLock.go @@ -14,18 +14,18 @@ import ( // This operation initiates the vault locking process by doing the following: // +// * +// Installing a vault lock policy on the specified vault. // -// * Installing a vault lock policy on the specified vault. +// * Setting the lock state +// of vault lock to InProgress. // -// * Setting the lock -// state of vault lock to InProgress. +// * Returning a lock ID, which is used to complete +// the vault locking process. // -// * Returning a lock ID, which is used to -// complete the vault locking process. -// -// You can set one vault lock policy for each -// vault and this policy can be up to 20 KB in size. For more information about -// vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies +// You can set one vault lock policy for each vault and +// this policy can be up to 20 KB in size. For more information about vault lock +// policies, see Amazon Glacier Access Control with Vault Lock Policies // (https://docs.aws.amazon.com/amazonglacier/latest/dev/vault-lock-policy.html). // You must complete the vault locking process within 24 hours after the vault lock // enters the InProgress state. After the 24 hour window ends, the lock ID expires, diff --git a/service/glacier/api_op_SetVaultNotifications.go b/service/glacier/api_op_SetVaultNotifications.go index d391f6190d4..25b714d6a3a 100644 --- a/service/glacier/api_op_SetVaultNotifications.go +++ b/service/glacier/api_op_SetVaultNotifications.go @@ -22,12 +22,12 @@ import ( // can configure a vault to publish a notification for the following vault // events: // -// * ArchiveRetrievalCompleted This event occurs when a job that was +// * ArchiveRetrievalCompleted This event occurs when a job that was // initiated for an archive retrieval is completed (InitiateJob). The status of the // completed job can be "Succeeded" or "Failed". The notification sent to the SNS // topic is the same output as returned from DescribeJob. // -// * +// * // InventoryRetrievalCompleted This event occurs when a job that was initiated for // an inventory retrieval is completed (InitiateJob). The status of the completed // job can be "Succeeded" or "Failed". The notification sent to the SNS topic is diff --git a/service/glacier/api_op_UploadMultipartPart.go b/service/glacier/api_op_UploadMultipartPart.go index 08ffd48e386..f3bb551232f 100644 --- a/service/glacier/api_op_UploadMultipartPart.go +++ b/service/glacier/api_op_UploadMultipartPart.go @@ -17,16 +17,16 @@ import ( // for a multipart upload. Amazon Glacier rejects your upload part request if any // of the following conditions is true: // -// * SHA256 tree hash does not matchTo -// ensure that part data is not corrupted in transmission, you compute a SHA256 -// tree hash of the part and include it in your request. Upon receiving the part -// data, Amazon S3 Glacier also computes a SHA256 tree hash. If these hash values -// don't match, the operation fails. For information about computing a SHA256 tree -// hash, see Computing Checksums +// * SHA256 tree hash does not matchTo ensure +// that part data is not corrupted in transmission, you compute a SHA256 tree hash +// of the part and include it in your request. Upon receiving the part data, Amazon +// S3 Glacier also computes a SHA256 tree hash. If these hash values don't match, +// the operation fails. For information about computing a SHA256 tree hash, see +// Computing Checksums // (https://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html). // -// -// * Part size does not matchThe size of each part except the last must match the +// * +// Part size does not matchThe size of each part except the last must match the // size specified in the corresponding InitiateMultipartUpload request. The size of // the last part must be the same size as, or smaller than, the specified size. If // you upload a part whose size is smaller than the part size you specified in your @@ -34,7 +34,7 @@ import ( // upload part request will succeed. However, the subsequent Complete Multipart // Upload request will fail. // -// * Range does not alignThe byte range value in the +// * Range does not alignThe byte range value in the // request does not align with the part size specified in the corresponding // initiate request. For example, if you specify a part size of 4194304 bytes (4 // MB), then 0 to 4194303 bytes (4 MB - 1) and 4194304 (4 MB) to 8388607 (8 MB - 1) diff --git a/service/glacier/doc.go b/service/glacier/doc.go index 6c1cdf9f53b..58f814eb2a6 100644 --- a/service/glacier/doc.go +++ b/service/glacier/doc.go @@ -19,13 +19,13 @@ // Glacier, we recommend that you begin by reading the following sections in the // Amazon S3 Glacier Developer Guide: // -// * What is Amazon S3 Glacier +// * What is Amazon S3 Glacier // (https://docs.aws.amazon.com/amazonglacier/latest/dev/introduction.html) - This // section of the Developer Guide describes the underlying data model, the // operations it supports, and the AWS SDKs that you can use to interact with the // service. // -// * Getting Started with Amazon S3 Glacier +// * Getting Started with Amazon S3 Glacier // (https://docs.aws.amazon.com/amazonglacier/latest/dev/amazon-glacier-getting-started.html) // - The Getting Started section walks you through the process of creating a vault, // uploading archives, creating jobs to download archives, retrieving the job diff --git a/service/glacier/types/enums.go b/service/glacier/types/enums.go index 8e3751da0e1..e052fb1637c 100644 --- a/service/glacier/types/enums.go +++ b/service/glacier/types/enums.go @@ -108,11 +108,11 @@ type Permission string // Enum values for Permission const ( - PermissionFull_control Permission = "FULL_CONTROL" - PermissionWrite Permission = "WRITE" - PermissionWrite_acp Permission = "WRITE_ACP" - PermissionRead Permission = "READ" - PermissionRead_acp Permission = "READ_ACP" + PermissionFullControl Permission = "FULL_CONTROL" + PermissionWrite Permission = "WRITE" + PermissionWriteAcp Permission = "WRITE_ACP" + PermissionRead Permission = "READ" + PermissionReadAcp Permission = "READ_ACP" ) // Values returns all known values for Permission. Note that this can be expanded diff --git a/service/glacier/types/types.go b/service/glacier/types/types.go index e6998042e49..58ca1a84a12 100644 --- a/service/glacier/types/types.go +++ b/service/glacier/types/types.go @@ -186,16 +186,16 @@ type GlacierJobDescription struct { // is retrieved, this value is the same as the ArchiveSHA256TreeHash value. This // field is null for the following: // - // * Archive retrieval jobs that specify a - // range that is not tree-hash aligned + // * Archive retrieval jobs that specify a range + // that is not tree-hash aligned // - // * Archival jobs that specify a range - // that is equal to the whole archive, when the job status is InProgress + // * Archival jobs that specify a range that is + // equal to the whole archive, when the job status is InProgress // - // * - // Inventory jobs + // * Inventory + // jobs // - // * Select jobs + // * Select jobs SHA256TreeHash *string // An Amazon SNS topic that receives notification. diff --git a/service/globalaccelerator/types/enums.go b/service/globalaccelerator/types/enums.go index 0faffed0c1a..4ca169e1ae3 100644 --- a/service/globalaccelerator/types/enums.go +++ b/service/globalaccelerator/types/enums.go @@ -6,8 +6,8 @@ type AcceleratorStatus string // Enum values for AcceleratorStatus const ( - AcceleratorStatusDeployed AcceleratorStatus = "DEPLOYED" - AcceleratorStatusIn_progress AcceleratorStatus = "IN_PROGRESS" + AcceleratorStatusDeployed AcceleratorStatus = "DEPLOYED" + AcceleratorStatusInProgress AcceleratorStatus = "IN_PROGRESS" ) // Values returns all known values for AcceleratorStatus. Note that this can be @@ -24,17 +24,17 @@ type ByoipCidrState string // Enum values for ByoipCidrState const ( - ByoipCidrStatePending_provisioning ByoipCidrState = "PENDING_PROVISIONING" - ByoipCidrStateReady ByoipCidrState = "READY" - ByoipCidrStatePending_advertising ByoipCidrState = "PENDING_ADVERTISING" - ByoipCidrStateAdvertising ByoipCidrState = "ADVERTISING" - ByoipCidrStatePending_withdrawing ByoipCidrState = "PENDING_WITHDRAWING" - ByoipCidrStatePending_deprovisioning ByoipCidrState = "PENDING_DEPROVISIONING" - ByoipCidrStateDeprovisioned ByoipCidrState = "DEPROVISIONED" - ByoipCidrStateFailed_provision ByoipCidrState = "FAILED_PROVISION" - ByoipCidrStateFailed_advertising ByoipCidrState = "FAILED_ADVERTISING" - ByoipCidrStateFailed_withdraw ByoipCidrState = "FAILED_WITHDRAW" - ByoipCidrStateFailed_deprovision ByoipCidrState = "FAILED_DEPROVISION" + ByoipCidrStatePendingProvisioning ByoipCidrState = "PENDING_PROVISIONING" + ByoipCidrStateReady ByoipCidrState = "READY" + ByoipCidrStatePendingAdvertising ByoipCidrState = "PENDING_ADVERTISING" + ByoipCidrStateAdvertising ByoipCidrState = "ADVERTISING" + ByoipCidrStatePendingWithdrawing ByoipCidrState = "PENDING_WITHDRAWING" + ByoipCidrStatePendingDeprovisioning ByoipCidrState = "PENDING_DEPROVISIONING" + ByoipCidrStateDeprovisioned ByoipCidrState = "DEPROVISIONED" + ByoipCidrStateFailedProvision ByoipCidrState = "FAILED_PROVISION" + ByoipCidrStateFailedAdvertising ByoipCidrState = "FAILED_ADVERTISING" + ByoipCidrStateFailedWithdraw ByoipCidrState = "FAILED_WITHDRAW" + ByoipCidrStateFailedDeprovision ByoipCidrState = "FAILED_DEPROVISION" ) // Values returns all known values for ByoipCidrState. Note that this can be @@ -60,8 +60,8 @@ type ClientAffinity string // Enum values for ClientAffinity const ( - ClientAffinityNone ClientAffinity = "NONE" - ClientAffinitySource_ip ClientAffinity = "SOURCE_IP" + ClientAffinityNone ClientAffinity = "NONE" + ClientAffinitySourceIp ClientAffinity = "SOURCE_IP" ) // Values returns all known values for ClientAffinity. Note that this can be diff --git a/service/globalaccelerator/types/types.go b/service/globalaccelerator/types/types.go index 6682222b975..3136863ac50 100644 --- a/service/globalaccelerator/types/types.go +++ b/service/globalaccelerator/types/types.go @@ -76,52 +76,52 @@ type AcceleratorAttributes struct { // resources through bring your own IP address (BYOIP). The following describes // each BYOIP State that your IP address range can be in. // -// * -// PENDING_PROVISIONING — You’ve submitted a request to provision an IP address -// range but it is not yet provisioned with AWS Global Accelerator. +// * PENDING_PROVISIONING — +// You’ve submitted a request to provision an IP address range but it is not yet +// provisioned with AWS Global Accelerator. // -// * READY — -// The address range is provisioned with AWS Global Accelerator and can be -// advertised. +// * READY — The address range is +// provisioned with AWS Global Accelerator and can be advertised. // -// * PENDING_ADVERTISING — You’ve submitted a request for AWS -// Global Accelerator to advertise an address range but it is not yet being -// advertised. +// * +// PENDING_ADVERTISING — You’ve submitted a request for AWS Global Accelerator to +// advertise an address range but it is not yet being advertised. // -// * ADVERTISING — The address range is being advertised by AWS -// Global Accelerator. +// * ADVERTISING — +// The address range is being advertised by AWS Global Accelerator. +// +// * +// PENDING_WITHDRAWING — You’ve submitted a request to withdraw an address range +// from being advertised but it is still being advertised by AWS Global +// Accelerator. // -// * PENDING_WITHDRAWING — You’ve submitted a request to -// withdraw an address range from being advertised but it is still being advertised -// by AWS Global Accelerator. +// * PENDING_DEPROVISIONING — You’ve submitted a request to +// deprovision an address range from AWS Global Accelerator but it is still +// provisioned. // -// * PENDING_DEPROVISIONING — You’ve submitted a -// request to deprovision an address range from AWS Global Accelerator but it is -// still provisioned. +// * DEPROVISIONED — The address range is deprovisioned from AWS +// Global Accelerator. // -// * DEPROVISIONED — The address range is deprovisioned -// from AWS Global Accelerator. +// * FAILED_PROVISION — The request to provision the address +// range from AWS Global Accelerator was not successful. Please make sure that you +// provide all of the correct information, and try again. If the request fails a +// second time, contact AWS support. // -// * FAILED_PROVISION — The request to provision -// the address range from AWS Global Accelerator was not successful. Please make -// sure that you provide all of the correct information, and try again. If the +// * FAILED_ADVERTISING — The request for AWS +// Global Accelerator to advertise the address range was not successful. Please +// make sure that you provide all of the correct information, and try again. If the // request fails a second time, contact AWS support. // -// * FAILED_ADVERTISING — -// The request for AWS Global Accelerator to advertise the address range was not -// successful. Please make sure that you provide all of the correct information, -// and try again. If the request fails a second time, contact AWS support. -// -// * -// FAILED_WITHDRAW — The request to withdraw the address range from advertising by -// AWS Global Accelerator was not successful. Please make sure that you provide all -// of the correct information, and try again. If the request fails a second time, -// contact AWS support. +// * FAILED_WITHDRAW — The +// request to withdraw the address range from advertising by AWS Global Accelerator +// was not successful. Please make sure that you provide all of the correct +// information, and try again. If the request fails a second time, contact AWS +// support. // -// * FAILED_DEPROVISION — The request to deprovision the -// address range from AWS Global Accelerator was not successful. Please make sure -// that you provide all of the correct information, and try again. If the request -// fails a second time, contact AWS support. +// * FAILED_DEPROVISION — The request to deprovision the address range +// from AWS Global Accelerator was not successful. Please make sure that you +// provide all of the correct information, and try again. If the request fails a +// second time, contact AWS support. type ByoipCidr struct { // The address range, in CIDR notation. @@ -228,22 +228,22 @@ type EndpointDescription struct { // state is healthy, a reason code is not provided. If the endpoint state is // unhealthy, the reason code can be one of the following values: // - // * Timeout: - // The health check requests to the endpoint are timing out before returning a + // * Timeout: The + // health check requests to the endpoint are timing out before returning a // status. // - // * Failed: The health check failed, for example because the endpoint + // * Failed: The health check failed, for example because the endpoint // response was invalid (malformed). // // If the endpoint state is initial, the reason // code can be one of the following values: // - // * ProvisioningInProgress: The - // endpoint is in the process of being provisioned. + // * ProvisioningInProgress: The endpoint + // is in the process of being provisioned. // - // * InitialHealthChecking: - // Global Accelerator is still setting up the minimum number of health checks for - // the endpoint that are required to determine its health status. + // * InitialHealthChecking: Global + // Accelerator is still setting up the minimum number of health checks for the + // endpoint that are required to determine its health status. HealthReason *string // The health status of the endpoint. diff --git a/service/glue/api_op_CreateDevEndpoint.go b/service/glue/api_op_CreateDevEndpoint.go index b7d9cfdab73..2d121188a0b 100644 --- a/service/glue/api_op_CreateDevEndpoint.go +++ b/service/glue/api_op_CreateDevEndpoint.go @@ -108,22 +108,22 @@ type CreateDevEndpointInput struct { // The type of predefined worker that is allocated to the development endpoint. // Accepts a value of Standard, G.1X, or G.2X. // - // * For the Standard worker type, + // * For the Standard worker type, // each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors // per worker. // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, - // 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 + // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this + // worker type for memory-intensive jobs. // - // * For the G.2X worker type, - // each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 - // executor per worker. We recommend this worker type for memory-intensive - // jobs. + // * For the G.2X worker type, each worker + // maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor + // per worker. We recommend this worker type for memory-intensive jobs. // - // Known issue: when a development endpoint is created with the - // G.2XWorkerType configuration, the Spark drivers for the development endpoint - // will run on 4 vCPU, 16 GB of memory, and a 64 GB disk. + // Known + // issue: when a development endpoint is created with the G.2XWorkerType + // configuration, the Spark drivers for the development endpoint will run on 4 + // vCPU, 16 GB of memory, and a 64 GB disk. WorkerType types.WorkerType } @@ -131,12 +131,12 @@ type CreateDevEndpointOutput struct { // The map of arguments used to configure this DevEndpoint. Valid arguments are: // + // * + // "--enable-glue-datacatalog": "" // - // * "--enable-glue-datacatalog": "" + // * "GLUE_PYTHON_VERSION": "3" // - // * "GLUE_PYTHON_VERSION": "3" - // - // * + // * // "GLUE_PYTHON_VERSION": "2" // // You can specify a version of Python support for diff --git a/service/glue/api_op_CreateJob.go b/service/glue/api_op_CreateJob.go index 142b1be7f1f..64fab773977 100644 --- a/service/glue/api_op_CreateJob.go +++ b/service/glue/api_op_CreateJob.go @@ -93,11 +93,11 @@ type CreateJobInput struct { // allocated for MaxCapacity depends on whether you are running a Python shell job // or an Apache Spark ETL job: // - // * When you specify a Python shell job + // * When you specify a Python shell job // (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The // default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job + // * When you specify an Apache Spark ETL job // (JobCommand.Name="glueetl") or Apache Spark streaming ETL job // (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The // default is 10 DPUs. This job type cannot have a fractional DPU allocation. @@ -134,17 +134,17 @@ type CreateJobInput struct { // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each worker + // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // + // * + // For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 + // GB disk), and provides 1 executor per worker. We recommend this worker type for + // memory-intensive jobs. // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, - // 64 GB disk), and provides 1 executor per worker. We recommend this worker type - // for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to - // 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per - // worker. We recommend this worker type for memory-intensive jobs. + // * For the G.2X worker type, each worker maps to 2 DPU (8 + // vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We + // recommend this worker type for memory-intensive jobs. WorkerType types.WorkerType } diff --git a/service/glue/api_op_CreateMLTransform.go b/service/glue/api_op_CreateMLTransform.go index 613ac4066f0..9b5a649a524 100644 --- a/service/glue/api_op_CreateMLTransform.go +++ b/service/glue/api_op_CreateMLTransform.go @@ -60,12 +60,11 @@ type CreateMLTransformInput struct { // permissions to AWS Glue resources, and Amazon S3 permissions required by the // transform. // - // * This role needs AWS Glue service role permissions to allow - // access to resources in AWS Glue. See Attach a Policy to IAM Users That Access - // AWS Glue + // * This role needs AWS Glue service role permissions to allow access + // to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue // (https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html). // - // * + // * // This role needs permission to your Amazon Simple Storage Service (Amazon S3) // sources, targets, temporary directory, scripts, and any libraries used by the // task run for this transform. @@ -92,16 +91,16 @@ type CreateMLTransformInput struct { // pricing page (https://aws.amazon.com/glue/pricing/). MaxCapacity is a mutually // exclusive option with NumberOfWorkers and WorkerType. // - // * If either + // * If either // NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set. // - // * If + // * If // MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set. // - // * - // If WorkerType is set, then NumberOfWorkers is required (and vice versa). + // * If + // WorkerType is set, then NumberOfWorkers is required (and vice versa). // - // * + // * // MaxCapacity and NumberOfWorkers must both be at least 1. // // When the WorkerType @@ -134,31 +133,30 @@ type CreateMLTransformInput struct { // The type of predefined worker that is allocated when this task runs. Accepts a // value of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each - // worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per - // worker. + // * For the Standard worker type, each worker + // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 64GB disk, and 1 executor per worker. + // * + // For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a + // 64GB disk, and 1 executor per worker. // - // * For the G.2X worker - // type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 - // executor per worker. + // * For the G.2X worker type, each worker + // provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per + // worker. // - // MaxCapacity is a mutually exclusive option with - // NumberOfWorkers and WorkerType. + // MaxCapacity is a mutually exclusive option with NumberOfWorkers and + // WorkerType. // - // * If either NumberOfWorkers or WorkerType - // is set, then MaxCapacity cannot be set. + // * If either NumberOfWorkers or WorkerType is set, then MaxCapacity + // cannot be set. // - // * If MaxCapacity is set then - // neither NumberOfWorkers or WorkerType can be set. + // * If MaxCapacity is set then neither NumberOfWorkers or + // WorkerType can be set. // - // * If WorkerType is set, - // then NumberOfWorkers is required (and vice versa). + // * If WorkerType is set, then NumberOfWorkers is required + // (and vice versa). // - // * MaxCapacity and - // NumberOfWorkers must both be at least 1. + // * MaxCapacity and NumberOfWorkers must both be at least 1. WorkerType types.WorkerType } diff --git a/service/glue/api_op_GetDatabases.go b/service/glue/api_op_GetDatabases.go index 482439f1c98..7e0e0465c93 100644 --- a/service/glue/api_op_GetDatabases.go +++ b/service/glue/api_op_GetDatabases.go @@ -42,10 +42,10 @@ type GetDatabasesInput struct { // Allows you to specify that you want to list the databases shared with your // account. The allowable values are FOREIGN or ALL. // - // * If set to FOREIGN, will + // * If set to FOREIGN, will // list the databases shared with your account. // - // * If set to ALL, will list the + // * If set to ALL, will list the // databases shared with your account, as well as the databases in yor local // account. ResourceShareType types.ResourceShareType diff --git a/service/glue/api_op_GetMLTransform.go b/service/glue/api_op_GetMLTransform.go index 55390f2c00f..b14502ed93c 100644 --- a/service/glue/api_op_GetMLTransform.go +++ b/service/glue/api_op_GetMLTransform.go @@ -116,16 +116,15 @@ type GetMLTransformOutput struct { // The type of predefined worker that is allocated when this task runs. Accepts a // value of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each - // worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per - // worker. + // * For the Standard worker type, each worker + // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 64GB disk, and 1 executor per worker. + // * + // For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a + // 64GB disk, and 1 executor per worker. // - // * For the G.2X worker - // type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 - // executor per worker. + // * For the G.2X worker type, each worker + // provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker. WorkerType types.WorkerType // Metadata pertaining to the operation's result. diff --git a/service/glue/api_op_GetPartitions.go b/service/glue/api_op_GetPartitions.go index bad2db38a76..0646ebb8ea2 100644 --- a/service/glue/api_op_GetPartitions.go +++ b/service/glue/api_op_GetPartitions.go @@ -63,30 +63,30 @@ type GetPartitionsInput struct { // true. AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL Logical operators. Supported // Partition Key Types: The following are the supported partition keys. // - // * + // * // string // - // * date + // * date // - // * timestamp + // * timestamp // - // * int + // * int // - // * bigint + // * bigint // - // * long + // * long // - // * - // tinyint + // * tinyint // - // * smallint + // * smallint // - // * decimal + // * + // decimal // - // If an invalid type is encountered, an - // exception is thrown. The following list shows the valid operators on each type. - // When you define a crawler, the partitionKey type is created as a STRING, to be - // compatible with the catalog partitions. Sample API Call: + // If an invalid type is encountered, an exception is thrown. The + // following list shows the valid operators on each type. When you define a + // crawler, the partitionKey type is created as a STRING, to be compatible with the + // catalog partitions. Sample API Call: Expression *string // The maximum number of partitions to return in a single response. diff --git a/service/glue/api_op_GetPlan.go b/service/glue/api_op_GetPlan.go index 117e765423e..ddd37e02032 100644 --- a/service/glue/api_op_GetPlan.go +++ b/service/glue/api_op_GetPlan.go @@ -42,7 +42,7 @@ type GetPlanInput struct { // A map to hold additional optional key-value parameters. Currently, these // key-value pairs are supported: // - // * inferSchema — Specifies whether to set + // * inferSchema — Specifies whether to set // inferSchema to true or false for the default script generated by an AWS Glue // job. For example, to set inferSchema to true, pass the following key value pair: // --additional-plan-options-map '{"inferSchema":"true"}' diff --git a/service/glue/api_op_SearchTables.go b/service/glue/api_op_SearchTables.go index e80ba2080d5..9e14783ad96 100644 --- a/service/glue/api_op_SearchTables.go +++ b/service/glue/api_op_SearchTables.go @@ -60,11 +60,11 @@ type SearchTablesInput struct { // Allows you to specify that you want to search the tables shared with your // account. The allowable values are FOREIGN or ALL. // - // * If set to FOREIGN, will + // * If set to FOREIGN, will // search the tables shared with your account. // - // * If set to ALL, will search - // the tables shared with your account, as well as the tables in yor local account. + // * If set to ALL, will search the + // tables shared with your account, as well as the tables in yor local account. ResourceShareType types.ResourceShareType // A string used for a text search. Specifying a value in quotes filters based on diff --git a/service/glue/api_op_StartJobRun.go b/service/glue/api_op_StartJobRun.go index 9312956f2e7..a3c364bd891 100644 --- a/service/glue/api_op_StartJobRun.go +++ b/service/glue/api_op_StartJobRun.go @@ -66,11 +66,11 @@ type StartJobRunInput struct { // allocated for MaxCapacity depends on whether you are running a Python shell job, // or an Apache Spark ETL job: // - // * When you specify a Python shell job + // * When you specify a Python shell job // (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The // default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job + // * When you specify an Apache Spark ETL job // (JobCommand.Name="glueetl"), you can allocate from 2 to 100 DPUs. The default is // 10 DPUs. This job type cannot have a fractional DPU allocation. MaxCapacity *float64 @@ -95,16 +95,15 @@ type StartJobRunInput struct { // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each worker + // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // - // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a + // * + // For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a // 64GB disk, and 1 executor per worker. // - // * For the G.2X worker type, each - // worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per - // worker. + // * For the G.2X worker type, each worker + // provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker. WorkerType types.WorkerType } diff --git a/service/glue/api_op_UpdateDevEndpoint.go b/service/glue/api_op_UpdateDevEndpoint.go index 6d7658aef41..80db7345d2d 100644 --- a/service/glue/api_op_UpdateDevEndpoint.go +++ b/service/glue/api_op_UpdateDevEndpoint.go @@ -37,12 +37,12 @@ type UpdateDevEndpointInput struct { // The map of arguments to add the map of arguments used to configure the // DevEndpoint. Valid arguments are: // - // * "--enable-glue-datacatalog": "" + // * "--enable-glue-datacatalog": "" // - // * + // * // "GLUE_PYTHON_VERSION": "3" // - // * "GLUE_PYTHON_VERSION": "2" + // * "GLUE_PYTHON_VERSION": "2" // // You can specify a // version of Python support for development endpoints by using the Arguments diff --git a/service/glue/api_op_UpdateMLTransform.go b/service/glue/api_op_UpdateMLTransform.go index dc0ad3ab43e..0766345e4b7 100644 --- a/service/glue/api_op_UpdateMLTransform.go +++ b/service/glue/api_op_UpdateMLTransform.go @@ -85,16 +85,15 @@ type UpdateMLTransformInput struct { // The type of predefined worker that is allocated when this task runs. Accepts a // value of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each - // worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per - // worker. + // * For the Standard worker type, each worker + // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of - // memory and a 64GB disk, and 1 executor per worker. + // * + // For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a + // 64GB disk, and 1 executor per worker. // - // * For the G.2X worker - // type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 - // executor per worker. + // * For the G.2X worker type, each worker + // provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker. WorkerType types.WorkerType } diff --git a/service/glue/types/enums.go b/service/glue/types/enums.go index bd0e9e34ca3..41cbb7830b1 100644 --- a/service/glue/types/enums.go +++ b/service/glue/types/enums.go @@ -70,11 +70,11 @@ type Comparator string // Enum values for Comparator const ( - ComparatorEquals Comparator = "EQUALS" - ComparatorGreater_than Comparator = "GREATER_THAN" - ComparatorLess_than Comparator = "LESS_THAN" - ComparatorGreater_than_equals Comparator = "GREATER_THAN_EQUALS" - ComparatorLess_than_equals Comparator = "LESS_THAN_EQUALS" + ComparatorEquals Comparator = "EQUALS" + ComparatorGreaterThan Comparator = "GREATER_THAN" + ComparatorLessThan Comparator = "LESS_THAN" + ComparatorGreaterThanEquals Comparator = "GREATER_THAN_EQUALS" + ComparatorLessThanEquals Comparator = "LESS_THAN_EQUALS" ) // Values returns all known values for Comparator. Note that this can be expanded @@ -94,27 +94,27 @@ type ConnectionPropertyKey string // Enum values for ConnectionPropertyKey const ( - ConnectionPropertyKeyHost ConnectionPropertyKey = "HOST" - ConnectionPropertyKeyPort ConnectionPropertyKey = "PORT" - ConnectionPropertyKeyUser_name ConnectionPropertyKey = "USERNAME" - ConnectionPropertyKeyPassword ConnectionPropertyKey = "PASSWORD" - ConnectionPropertyKeyEncrypted_password ConnectionPropertyKey = "ENCRYPTED_PASSWORD" - ConnectionPropertyKeyJdbc_driver_jar_uri ConnectionPropertyKey = "JDBC_DRIVER_JAR_URI" - ConnectionPropertyKeyJdbc_driver_class_name ConnectionPropertyKey = "JDBC_DRIVER_CLASS_NAME" - ConnectionPropertyKeyJdbc_engine ConnectionPropertyKey = "JDBC_ENGINE" - ConnectionPropertyKeyJdbc_engine_version ConnectionPropertyKey = "JDBC_ENGINE_VERSION" - ConnectionPropertyKeyConfig_files ConnectionPropertyKey = "CONFIG_FILES" - ConnectionPropertyKeyInstance_id ConnectionPropertyKey = "INSTANCE_ID" - ConnectionPropertyKeyJdbc_connection_url ConnectionPropertyKey = "JDBC_CONNECTION_URL" - ConnectionPropertyKeyJdbc_enforce_ssl ConnectionPropertyKey = "JDBC_ENFORCE_SSL" - ConnectionPropertyKeyCustom_jdbc_cert ConnectionPropertyKey = "CUSTOM_JDBC_CERT" - ConnectionPropertyKeySkip_custom_jdbc_cert_validation ConnectionPropertyKey = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" - ConnectionPropertyKeyCustom_jdbc_cert_string ConnectionPropertyKey = "CUSTOM_JDBC_CERT_STRING" - ConnectionPropertyKeyConnection_url ConnectionPropertyKey = "CONNECTION_URL" - ConnectionPropertyKeyKafka_bootstrap_servers ConnectionPropertyKey = "KAFKA_BOOTSTRAP_SERVERS" - ConnectionPropertyKeyKafka_ssl_enabled ConnectionPropertyKey = "KAFKA_SSL_ENABLED" - ConnectionPropertyKeyKafka_custom_cert ConnectionPropertyKey = "KAFKA_CUSTOM_CERT" - ConnectionPropertyKeyKafka_skip_custom_cert_validation ConnectionPropertyKey = "KAFKA_SKIP_CUSTOM_CERT_VALIDATION" + ConnectionPropertyKeyHost ConnectionPropertyKey = "HOST" + ConnectionPropertyKeyPort ConnectionPropertyKey = "PORT" + ConnectionPropertyKeyUserName ConnectionPropertyKey = "USERNAME" + ConnectionPropertyKeyPassword ConnectionPropertyKey = "PASSWORD" + ConnectionPropertyKeyEncryptedPassword ConnectionPropertyKey = "ENCRYPTED_PASSWORD" + ConnectionPropertyKeyJdbcDriverJarUri ConnectionPropertyKey = "JDBC_DRIVER_JAR_URI" + ConnectionPropertyKeyJdbcDriverClassName ConnectionPropertyKey = "JDBC_DRIVER_CLASS_NAME" + ConnectionPropertyKeyJdbcEngine ConnectionPropertyKey = "JDBC_ENGINE" + ConnectionPropertyKeyJdbcEngineVersion ConnectionPropertyKey = "JDBC_ENGINE_VERSION" + ConnectionPropertyKeyConfigFiles ConnectionPropertyKey = "CONFIG_FILES" + ConnectionPropertyKeyInstanceId ConnectionPropertyKey = "INSTANCE_ID" + ConnectionPropertyKeyJdbcConnectionUrl ConnectionPropertyKey = "JDBC_CONNECTION_URL" + ConnectionPropertyKeyJdbcEnforceSsl ConnectionPropertyKey = "JDBC_ENFORCE_SSL" + ConnectionPropertyKeyCustomJdbcCert ConnectionPropertyKey = "CUSTOM_JDBC_CERT" + ConnectionPropertyKeySkipCustomJdbcCertValidation ConnectionPropertyKey = "SKIP_CUSTOM_JDBC_CERT_VALIDATION" + ConnectionPropertyKeyCustomJdbcCertString ConnectionPropertyKey = "CUSTOM_JDBC_CERT_STRING" + ConnectionPropertyKeyConnectionUrl ConnectionPropertyKey = "CONNECTION_URL" + ConnectionPropertyKeyKafkaBootstrapServers ConnectionPropertyKey = "KAFKA_BOOTSTRAP_SERVERS" + ConnectionPropertyKeyKafkaSslEnabled ConnectionPropertyKey = "KAFKA_SSL_ENABLED" + ConnectionPropertyKeyKafkaCustomCert ConnectionPropertyKey = "KAFKA_CUSTOM_CERT" + ConnectionPropertyKeyKafkaSkipCustomCertValidation ConnectionPropertyKey = "KAFKA_SKIP_CUSTOM_CERT_VALIDATION" ) // Values returns all known values for ConnectionPropertyKey. Note that this can be @@ -238,9 +238,9 @@ type DeleteBehavior string // Enum values for DeleteBehavior const ( - DeleteBehaviorLog DeleteBehavior = "LOG" - DeleteBehaviorDelete_from_database DeleteBehavior = "DELETE_FROM_DATABASE" - DeleteBehaviorDeprecate_in_database DeleteBehavior = "DEPRECATE_IN_DATABASE" + DeleteBehaviorLog DeleteBehavior = "LOG" + DeleteBehaviorDeleteFromDatabase DeleteBehavior = "DELETE_FROM_DATABASE" + DeleteBehaviorDeprecateInDatabase DeleteBehavior = "DEPRECATE_IN_DATABASE" ) // Values returns all known values for DeleteBehavior. Note that this can be @@ -276,9 +276,9 @@ type ExistCondition string // Enum values for ExistCondition const ( - ExistConditionMust_exist ExistCondition = "MUST_EXIST" - ExistConditionNot_exist ExistCondition = "NOT_EXIST" - ExistConditionNone ExistCondition = "NONE" + ExistConditionMustExist ExistCondition = "MUST_EXIST" + ExistConditionNotExist ExistCondition = "NOT_EXIST" + ExistConditionNone ExistCondition = "NONE" ) // Values returns all known values for ExistCondition. Note that this can be @@ -450,15 +450,15 @@ type Permission string // Enum values for Permission const ( - PermissionAll Permission = "ALL" - PermissionSelect Permission = "SELECT" - PermissionAlter Permission = "ALTER" - PermissionDrop Permission = "DROP" - PermissionDelete Permission = "DELETE" - PermissionInsert Permission = "INSERT" - PermissionCreate_database Permission = "CREATE_DATABASE" - PermissionCreate_table Permission = "CREATE_TABLE" - PermissionData_location_access Permission = "DATA_LOCATION_ACCESS" + PermissionAll Permission = "ALL" + PermissionSelect Permission = "SELECT" + PermissionAlter Permission = "ALTER" + PermissionDrop Permission = "DROP" + PermissionDelete Permission = "DELETE" + PermissionInsert Permission = "INSERT" + PermissionCreateDatabase Permission = "CREATE_DATABASE" + PermissionCreateTable Permission = "CREATE_TABLE" + PermissionDataLocationAccess Permission = "DATA_LOCATION_ACCESS" ) // Values returns all known values for Permission. Note that this can be expanded @@ -502,8 +502,8 @@ type RecrawlBehavior string // Enum values for RecrawlBehavior const ( - RecrawlBehaviorCrawl_everything RecrawlBehavior = "CRAWL_EVERYTHING" - RecrawlBehaviorCrawl_new_folders_only RecrawlBehavior = "CRAWL_NEW_FOLDERS_ONLY" + RecrawlBehaviorCrawlEverything RecrawlBehavior = "CRAWL_EVERYTHING" + RecrawlBehaviorCrawlNewFoldersOnly RecrawlBehavior = "CRAWL_NEW_FOLDERS_ONLY" ) // Values returns all known values for RecrawlBehavior. Note that this can be @@ -579,7 +579,7 @@ type ScheduleState string // Enum values for ScheduleState const ( ScheduleStateScheduled ScheduleState = "SCHEDULED" - ScheduleStateNot_scheduled ScheduleState = "NOT_SCHEDULED" + ScheduleStateNotScheduled ScheduleState = "NOT_SCHEDULED" ScheduleStateTransitioning ScheduleState = "TRANSITIONING" ) @@ -634,9 +634,9 @@ type TaskRunSortColumnType string // Enum values for TaskRunSortColumnType const ( - TaskRunSortColumnTypeTask_run_type TaskRunSortColumnType = "TASK_RUN_TYPE" - TaskRunSortColumnTypeStatus TaskRunSortColumnType = "STATUS" - TaskRunSortColumnTypeStarted TaskRunSortColumnType = "STARTED" + TaskRunSortColumnTypeTaskRunType TaskRunSortColumnType = "TASK_RUN_TYPE" + TaskRunSortColumnTypeStatus TaskRunSortColumnType = "STATUS" + TaskRunSortColumnTypeStarted TaskRunSortColumnType = "STARTED" ) // Values returns all known values for TaskRunSortColumnType. Note that this can be @@ -682,11 +682,11 @@ type TaskType string // Enum values for TaskType const ( - TaskTypeEvaluation TaskType = "EVALUATION" - TaskTypeLabeling_set_generation TaskType = "LABELING_SET_GENERATION" - TaskTypeImport_labels TaskType = "IMPORT_LABELS" - TaskTypeExport_labels TaskType = "EXPORT_LABELS" - TaskTypeFind_matches TaskType = "FIND_MATCHES" + TaskTypeEvaluation TaskType = "EVALUATION" + TaskTypeLabelingSetGeneration TaskType = "LABELING_SET_GENERATION" + TaskTypeImportLabels TaskType = "IMPORT_LABELS" + TaskTypeExportLabels TaskType = "EXPORT_LABELS" + TaskTypeFindMatches TaskType = "FIND_MATCHES" ) // Values returns all known values for TaskType. Note that this can be expanded in @@ -706,11 +706,11 @@ type TransformSortColumnType string // Enum values for TransformSortColumnType const ( - TransformSortColumnTypeName TransformSortColumnType = "NAME" - TransformSortColumnTypeTransform_type TransformSortColumnType = "TRANSFORM_TYPE" - TransformSortColumnTypeStatus TransformSortColumnType = "STATUS" - TransformSortColumnTypeCreated TransformSortColumnType = "CREATED" - TransformSortColumnTypeLast_modified TransformSortColumnType = "LAST_MODIFIED" + TransformSortColumnTypeName TransformSortColumnType = "NAME" + TransformSortColumnTypeTransformType TransformSortColumnType = "TRANSFORM_TYPE" + TransformSortColumnTypeStatus TransformSortColumnType = "STATUS" + TransformSortColumnTypeCreated TransformSortColumnType = "CREATED" + TransformSortColumnTypeLastModified TransformSortColumnType = "LAST_MODIFIED" ) // Values returns all known values for TransformSortColumnType. Note that this can @@ -730,9 +730,9 @@ type TransformStatusType string // Enum values for TransformStatusType const ( - TransformStatusTypeNot_ready TransformStatusType = "NOT_READY" - TransformStatusTypeReady TransformStatusType = "READY" - TransformStatusTypeDeleting TransformStatusType = "DELETING" + TransformStatusTypeNotReady TransformStatusType = "NOT_READY" + TransformStatusTypeReady TransformStatusType = "READY" + TransformStatusTypeDeleting TransformStatusType = "DELETING" ) // Values returns all known values for TransformStatusType. Note that this can be @@ -750,7 +750,7 @@ type TransformType string // Enum values for TransformType const ( - TransformTypeFind_matches TransformType = "FIND_MATCHES" + TransformTypeFindMatches TransformType = "FIND_MATCHES" ) // Values returns all known values for TransformType. Note that this can be @@ -798,7 +798,7 @@ type TriggerType string const ( TriggerTypeScheduled TriggerType = "SCHEDULED" TriggerTypeConditional TriggerType = "CONDITIONAL" - TriggerTypeOn_demand TriggerType = "ON_DEMAND" + TriggerTypeOnDemand TriggerType = "ON_DEMAND" ) // Values returns all known values for TriggerType. Note that this can be expanded @@ -816,8 +816,8 @@ type UpdateBehavior string // Enum values for UpdateBehavior const ( - UpdateBehaviorLog UpdateBehavior = "LOG" - UpdateBehaviorUpdate_in_database UpdateBehavior = "UPDATE_IN_DATABASE" + UpdateBehaviorLog UpdateBehavior = "LOG" + UpdateBehaviorUpdateInDatabase UpdateBehavior = "UPDATE_IN_DATABASE" ) // Values returns all known values for UpdateBehavior. Note that this can be diff --git a/service/glue/types/types.go b/service/glue/types/types.go index 9f6155c1c3c..213ada7c3a8 100644 --- a/service/glue/types/types.go +++ b/service/glue/types/types.go @@ -396,88 +396,87 @@ type Connection struct { // These key-value pairs define parameters for the connection: // - // * HOST - The - // host URI: either the fully qualified domain name (FQDN) or the IPv4 address of - // the database host. + // * HOST - The host + // URI: either the fully qualified domain name (FQDN) or the IPv4 address of the + // database host. // - // * PORT - The port number, between 1024 and 65535, of the - // port on which the database host is listening for database connections. + // * PORT - The port number, between 1024 and 65535, of the port on + // which the database host is listening for database connections. // - // * - // USER_NAME - The name under which to log in to the database. The value string for - // USER_NAME is "USERNAME". + // * USER_NAME - + // The name under which to log in to the database. The value string for USER_NAME + // is "USERNAME". // - // * PASSWORD - A password, if one is used, for the - // user name. + // * PASSWORD - A password, if one is used, for the user name. // - // * ENCRYPTED_PASSWORD - When you enable connection password - // protection by setting ConnectionPasswordEncryption in the Data Catalog - // encryption settings, this field stores the encrypted password. + // * + // ENCRYPTED_PASSWORD - When you enable connection password protection by setting + // ConnectionPasswordEncryption in the Data Catalog encryption settings, this field + // stores the encrypted password. // - // * - // JDBC_DRIVER_JAR_URI - The Amazon Simple Storage Service (Amazon S3) path of the - // JAR file that contains the JDBC driver to use. + // * JDBC_DRIVER_JAR_URI - The Amazon Simple + // Storage Service (Amazon S3) path of the JAR file that contains the JDBC driver + // to use. // - // * JDBC_DRIVER_CLASS_NAME - - // The class name of the JDBC driver to use. + // * JDBC_DRIVER_CLASS_NAME - The class name of the JDBC driver to use. // - // * JDBC_ENGINE - The name of the - // JDBC engine to use. + // * + // JDBC_ENGINE - The name of the JDBC engine to use. // - // * JDBC_ENGINE_VERSION - The version of the JDBC engine - // to use. + // * JDBC_ENGINE_VERSION - The + // version of the JDBC engine to use. // - // * CONFIG_FILES - (Reserved for future use.) + // * CONFIG_FILES - (Reserved for future + // use.) // - // * INSTANCE_ID - - // The instance ID to use. + // * INSTANCE_ID - The instance ID to use. // - // * JDBC_CONNECTION_URL - The URL for connecting to a - // JDBC data source. + // * JDBC_CONNECTION_URL - The URL + // for connecting to a JDBC data source. // - // * JDBC_ENFORCE_SSL - A Boolean string (true, false) - // specifying whether Secure Sockets Layer (SSL) with hostname matching is enforced - // for the JDBC connection on the client. The default is false. + // * JDBC_ENFORCE_SSL - A Boolean string + // (true, false) specifying whether Secure Sockets Layer (SSL) with hostname + // matching is enforced for the JDBC connection on the client. The default is + // false. // - // * - // CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's root - // certificate. AWS Glue uses this root certificate to validate the customer’s + // * CUSTOM_JDBC_CERT - An Amazon S3 location specifying the customer's + // root certificate. AWS Glue uses this root certificate to validate the customer’s // certificate when connecting to the customer database. AWS Glue only handles // X.509 certificates. The certificate provided must be DER-encoded and supplied in // Base64 encoding PEM format. // - // * SKIP_CUSTOM_JDBC_CERT_VALIDATION - By - // default, this is false. AWS Glue validates the Signature algorithm and Subject - // Public Key Algorithm for the customer certificate. The only permitted algorithms - // for the Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. - // For the Subject Public Key Algorithm, the key length must be at least 2048. You - // can set the value of this property to true to skip AWS Glue’s validation of the - // customer certificate. + // * SKIP_CUSTOM_JDBC_CERT_VALIDATION - By default, + // this is false. AWS Glue validates the Signature algorithm and Subject Public Key + // Algorithm for the customer certificate. The only permitted algorithms for the + // Signature algorithm are SHA256withRSA, SHA384withRSA or SHA512withRSA. For the + // Subject Public Key Algorithm, the key length must be at least 2048. You can set + // the value of this property to true to skip AWS Glue’s validation of the customer + // certificate. // - // * CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate - // string which is used for domain match or distinguished name match to prevent a + // * CUSTOM_JDBC_CERT_STRING - A custom JDBC certificate string which + // is used for domain match or distinguished name match to prevent a // man-in-the-middle attack. In Oracle database, this is used as the // SSL_SERVER_CERT_DN; in Microsoft SQL Server, this is used as the // hostNameInCertificate. // - // * CONNECTION_URL - The URL for connecting to a - // general (non-JDBC) data source. + // * CONNECTION_URL - The URL for connecting to a general + // (non-JDBC) data source. // - // * KAFKA_BOOTSTRAP_SERVERS - A - // comma-separated list of host and port pairs that are the addresses of the Apache - // Kafka brokers in a Kafka cluster to which a Kafka client will connect to and - // bootstrap itself. + // * KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of + // host and port pairs that are the addresses of the Apache Kafka brokers in a + // Kafka cluster to which a Kafka client will connect to and bootstrap itself. // - // * KAFKA_SSL_ENABLED - Whether to enable or disable SSL on - // an Apache Kafka connection. Default value is "true". + // * + // KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka + // connection. Default value is "true". // - // * KAFKA_CUSTOM_CERT - - // The Amazon S3 URL for the private CA cert file (.pem format). The default is an - // empty string. + // * KAFKA_CUSTOM_CERT - The Amazon S3 URL + // for the private CA cert file (.pem format). The default is an empty string. // - // * KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the - // validation of the CA cert file or not. AWS Glue validates for three algorithms: - // SHA256withRSA, SHA384withRSA and SHA512withRSA. Default value is "false". + // * + // KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of the CA + // cert file or not. AWS Glue validates for three algorithms: SHA256withRSA, + // SHA384withRSA and SHA512withRSA. Default value is "false". ConnectionProperties map[string]*string // The type of the connection. Currently, SFTP is not supported. @@ -516,18 +515,18 @@ type ConnectionInput struct { // The type of the connection. Currently, these types are supported: // - // * JDBC - + // * JDBC - // Designates a connection to a database through Java Database Connectivity // (JDBC). // - // * KAFKA - Designates a connection to an Apache Kafka streaming + // * KAFKA - Designates a connection to an Apache Kafka streaming // platform. // - // * MONGODB - Designates a connection to a MongoDB document + // * MONGODB - Designates a connection to a MongoDB document // database. // - // * NETWORK - Designates a network connection to a data source - // within an Amazon Virtual Private Cloud environment (Amazon VPC). + // * NETWORK - Designates a network connection to a data source within + // an Amazon Virtual Private Cloud environment (Amazon VPC). // // SFTP is not // supported. @@ -1007,12 +1006,12 @@ type DevEndpoint struct { // A map of arguments used to configure the DevEndpoint. Valid arguments are: // + // * + // "--enable-glue-datacatalog": "" // - // * "--enable-glue-datacatalog": "" + // * "GLUE_PYTHON_VERSION": "3" // - // * "GLUE_PYTHON_VERSION": "3" - // - // * + // * // "GLUE_PYTHON_VERSION": "2" // // You can specify a version of Python support for @@ -1116,22 +1115,22 @@ type DevEndpoint struct { // The type of predefined worker that is allocated to the development endpoint. // Accepts a value of Standard, G.1X, or G.2X. // - // * For the Standard worker type, + // * For the Standard worker type, // each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors // per worker. // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, - // 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend - // this worker type for memory-intensive jobs. + // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 + // GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this + // worker type for memory-intensive jobs. // - // * For the G.2X worker type, - // each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 - // executor per worker. We recommend this worker type for memory-intensive - // jobs. + // * For the G.2X worker type, each worker + // maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor + // per worker. We recommend this worker type for memory-intensive jobs. // - // Known issue: when a development endpoint is created with the - // G.2XWorkerType configuration, the Spark drivers for the development endpoint - // will run on 4 vCPU, 16 GB of memory, and a 64 GB disk. + // Known + // issue: when a development endpoint is created with the G.2XWorkerType + // configuration, the Spark drivers for the development endpoint will run on 4 + // vCPU, 16 GB of memory, and a 64 GB disk. WorkerType WorkerType // The YARN endpoint address used by this DevEndpoint. @@ -1537,13 +1536,13 @@ type Job struct { // allocated for MaxCapacity depends on whether you are running a Python shell job, // an Apache Spark ETL job, or an Apache Spark streaming ETL job: // - // * When you + // * When you // specify a Python shell job (JobCommand.Name="pythonshell"), you can allocate // either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an - // Apache Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL - // job (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The + // * When you specify an Apache + // Spark ETL job (JobCommand.Name="glueetl") or Apache Spark streaming ETL job + // (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The // default is 10 DPUs. This job type cannot have a fractional DPU allocation. MaxCapacity *float64 @@ -1578,17 +1577,17 @@ type Job struct { // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each worker + // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // + // * + // For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 + // GB disk), and provides 1 executor per worker. We recommend this worker type for + // memory-intensive jobs. // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, - // 64 GB disk), and provides 1 executor per worker. We recommend this worker type - // for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to - // 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per - // worker. We recommend this worker type for memory-intensive jobs. + // * For the G.2X worker type, each worker maps to 2 DPU (8 + // vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We + // recommend this worker type for memory-intensive jobs. WorkerType WorkerType } @@ -1726,11 +1725,11 @@ type JobRun struct { // allocated for MaxCapacity depends on whether you are running a Python shell job // or an Apache Spark ETL job: // - // * When you specify a Python shell job + // * When you specify a Python shell job // (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The // default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job + // * When you specify an Apache Spark ETL job // (JobCommand.Name="glueetl"), you can allocate from 2 to 100 DPUs. The default is // 10 DPUs. This job type cannot have a fractional DPU allocation. MaxCapacity *float64 @@ -1768,16 +1767,15 @@ type JobRun struct { // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each worker + // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // - // - // * For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a + // * + // For the G.1X worker type, each worker provides 4 vCPU, 16 GB of memory and a // 64GB disk, and 1 executor per worker. // - // * For the G.2X worker type, each - // worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per - // worker. + // * For the G.2X worker type, each worker + // provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker. WorkerType WorkerType } @@ -1836,11 +1834,11 @@ type JobUpdate struct { // allocated for MaxCapacity depends on whether you are running a Python shell job // or an Apache Spark ETL job: // - // * When you specify a Python shell job + // * When you specify a Python shell job // (JobCommand.Name="pythonshell"), you can allocate either 0.0625 or 1 DPU. The // default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job + // * When you specify an Apache Spark ETL job // (JobCommand.Name="glueetl") or Apache Spark streaming ETL job // (JobCommand.Name="gluestreaming"), you can allocate from 2 to 100 DPUs. The // default is 10 DPUs. This job type cannot have a fractional DPU allocation. @@ -1875,17 +1873,17 @@ type JobUpdate struct { // The type of predefined worker that is allocated when a job runs. Accepts a value // of Standard, G.1X, or G.2X. // - // * For the Standard worker type, each worker + // * For the Standard worker type, each worker // provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. // + // * + // For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 + // GB disk), and provides 1 executor per worker. We recommend this worker type for + // memory-intensive jobs. // - // * For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, - // 64 GB disk), and provides 1 executor per worker. We recommend this worker type - // for memory-intensive jobs. - // - // * For the G.2X worker type, each worker maps to - // 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per - // worker. We recommend this worker type for memory-intensive jobs. + // * For the G.2X worker type, each worker maps to 2 DPU (8 + // vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We + // recommend this worker type for memory-intensive jobs. WorkerType WorkerType } @@ -2055,16 +2053,16 @@ type MLTransform struct { // pricing page (http://aws.amazon.com/glue/pricing/). MaxCapacity is a mutually // exclusive option with NumberOfWorkers and WorkerType. // - // * If either + // * If either // NumberOfWorkers or WorkerType is set, then MaxCapacity cannot be set. // - // * If + // * If // MaxCapacity is set then neither NumberOfWorkers or WorkerType can be set. // - // * - // If WorkerType is set, then NumberOfWorkers is required (and vice versa). + // * If + // WorkerType is set, then NumberOfWorkers is required (and vice versa). // - // * + // * // MaxCapacity and NumberOfWorkers must both be at least 1. // // When the WorkerType @@ -2096,12 +2094,11 @@ type MLTransform struct { // permissions to AWS Glue resources, and Amazon S3 permissions required by the // transform. // - // * This role needs AWS Glue service role permissions to allow - // access to resources in AWS Glue. See Attach a Policy to IAM Users That Access - // AWS Glue + // * This role needs AWS Glue service role permissions to allow access + // to resources in AWS Glue. See Attach a Policy to IAM Users That Access AWS Glue // (https://docs.aws.amazon.com/glue/latest/dg/attach-policy-iam-user.html). // - // * + // * // This role needs permission to your Amazon Simple Storage Service (Amazon S3) // sources, targets, temporary directory, scripts, and any libraries used by the // task run for this transform. @@ -2124,31 +2121,31 @@ type MLTransform struct { // The type of predefined worker that is allocated when a task of this transform // runs. Accepts a value of Standard, G.1X, or G.2X. // - // * For the Standard worker + // * For the Standard worker // type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 // executors per worker. // - // * For the G.1X worker type, each worker provides 4 - // vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker. + // * For the G.1X worker type, each worker provides 4 vCPU, + // 16 GB of memory and a 64GB disk, and 1 executor per worker. // - // * For the - // G.2X worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, - // and 1 executor per worker. + // * For the G.2X + // worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and + // 1 executor per worker. // // MaxCapacity is a mutually exclusive option with // NumberOfWorkers and WorkerType. // - // * If either NumberOfWorkers or WorkerType - // is set, then MaxCapacity cannot be set. + // * If either NumberOfWorkers or WorkerType is + // set, then MaxCapacity cannot be set. // - // * If MaxCapacity is set then - // neither NumberOfWorkers or WorkerType can be set. + // * If MaxCapacity is set then neither + // NumberOfWorkers or WorkerType can be set. // - // * If WorkerType is set, - // then NumberOfWorkers is required (and vice versa). + // * If WorkerType is set, then + // NumberOfWorkers is required (and vice versa). // - // * MaxCapacity and - // NumberOfWorkers must both be at least 1. + // * MaxCapacity and NumberOfWorkers + // must both be at least 1. WorkerType WorkerType } diff --git a/service/greengrass/types/enums.go b/service/greengrass/types/enums.go index 70110df89bd..20ad79e368b 100644 --- a/service/greengrass/types/enums.go +++ b/service/greengrass/types/enums.go @@ -186,8 +186,8 @@ type SoftwareToUpdate string // Enum values for SoftwareToUpdate const ( - SoftwareToUpdateCore SoftwareToUpdate = "core" - SoftwareToUpdateOta_agent SoftwareToUpdate = "ota_agent" + SoftwareToUpdateCore SoftwareToUpdate = "core" + SoftwareToUpdateOtaAgent SoftwareToUpdate = "ota_agent" ) // Values returns all known values for SoftwareToUpdate. Note that this can be @@ -254,7 +254,7 @@ type UpdateTargetsArchitecture string const ( UpdateTargetsArchitectureArmv6l UpdateTargetsArchitecture = "armv6l" UpdateTargetsArchitectureArmv7l UpdateTargetsArchitecture = "armv7l" - UpdateTargetsArchitectureX86_64 UpdateTargetsArchitecture = "x86_64" + UpdateTargetsArchitectureX8664 UpdateTargetsArchitecture = "x86_64" UpdateTargetsArchitectureAarch64 UpdateTargetsArchitecture = "aarch64" ) @@ -274,10 +274,10 @@ type UpdateTargetsOperatingSystem string // Enum values for UpdateTargetsOperatingSystem const ( - UpdateTargetsOperatingSystemUbuntu UpdateTargetsOperatingSystem = "ubuntu" - UpdateTargetsOperatingSystemRaspbian UpdateTargetsOperatingSystem = "raspbian" - UpdateTargetsOperatingSystemAmazon_linux UpdateTargetsOperatingSystem = "amazon_linux" - UpdateTargetsOperatingSystemOpenwrt UpdateTargetsOperatingSystem = "openwrt" + UpdateTargetsOperatingSystemUbuntu UpdateTargetsOperatingSystem = "ubuntu" + UpdateTargetsOperatingSystemRaspbian UpdateTargetsOperatingSystem = "raspbian" + UpdateTargetsOperatingSystemAmazonLinux UpdateTargetsOperatingSystem = "amazon_linux" + UpdateTargetsOperatingSystemOpenwrt UpdateTargetsOperatingSystem = "openwrt" ) // Values returns all known values for UpdateTargetsOperatingSystem. Note that this diff --git a/service/groundstation/types/enums.go b/service/groundstation/types/enums.go index 925d8fd4e7e..0bb2217b74f 100644 --- a/service/groundstation/types/enums.go +++ b/service/groundstation/types/enums.go @@ -6,8 +6,8 @@ type AngleUnits string // Enum values for AngleUnits const ( - AngleUnitsDegree_angle AngleUnits = "DEGREE_ANGLE" - AngleUnitsRadian AngleUnits = "RADIAN" + AngleUnitsDegreeAngle AngleUnits = "DEGREE_ANGLE" + AngleUnitsRadian AngleUnits = "RADIAN" ) // Values returns all known values for AngleUnits. Note that this can be expanded @@ -44,12 +44,12 @@ type ConfigCapabilityType string // Enum values for ConfigCapabilityType const ( - ConfigCapabilityTypeAntenna_downlink ConfigCapabilityType = "antenna-downlink" - ConfigCapabilityTypeAntenna_downlink_demod_decode ConfigCapabilityType = "antenna-downlink-demod-decode" - ConfigCapabilityTypeTracking ConfigCapabilityType = "tracking" - ConfigCapabilityTypeDataflow_endpoint ConfigCapabilityType = "dataflow-endpoint" - ConfigCapabilityTypeAntenna_uplink ConfigCapabilityType = "antenna-uplink" - ConfigCapabilityTypeUplink_echo ConfigCapabilityType = "uplink-echo" + ConfigCapabilityTypeAntennaDownlink ConfigCapabilityType = "antenna-downlink" + ConfigCapabilityTypeAntennaDownlinkDemodDecode ConfigCapabilityType = "antenna-downlink-demod-decode" + ConfigCapabilityTypeTracking ConfigCapabilityType = "tracking" + ConfigCapabilityTypeDataflowEndpoint ConfigCapabilityType = "dataflow-endpoint" + ConfigCapabilityTypeAntennaUplink ConfigCapabilityType = "antenna-uplink" + ConfigCapabilityTypeUplinkEcho ConfigCapabilityType = "uplink-echo" ) // Values returns all known values for ConfigCapabilityType. Note that this can be @@ -70,18 +70,18 @@ type ContactStatus string // Enum values for ContactStatus const ( - ContactStatusScheduling ContactStatus = "SCHEDULING" - ContactStatusFailed_to_schedule ContactStatus = "FAILED_TO_SCHEDULE" - ContactStatusScheduled ContactStatus = "SCHEDULED" - ContactStatusCancelled ContactStatus = "CANCELLED" - ContactStatusAws_cancelled ContactStatus = "AWS_CANCELLED" - ContactStatusPrepass ContactStatus = "PREPASS" - ContactStatusPass ContactStatus = "PASS" - ContactStatusPostpass ContactStatus = "POSTPASS" - ContactStatusCompleted ContactStatus = "COMPLETED" - ContactStatusFailed ContactStatus = "FAILED" - ContactStatusAvailable ContactStatus = "AVAILABLE" - ContactStatusCancelling ContactStatus = "CANCELLING" + ContactStatusScheduling ContactStatus = "SCHEDULING" + ContactStatusFailedToSchedule ContactStatus = "FAILED_TO_SCHEDULE" + ContactStatusScheduled ContactStatus = "SCHEDULED" + ContactStatusCancelled ContactStatus = "CANCELLED" + ContactStatusAwsCancelled ContactStatus = "AWS_CANCELLED" + ContactStatusPrepass ContactStatus = "PREPASS" + ContactStatusPass ContactStatus = "PASS" + ContactStatusPostpass ContactStatus = "POSTPASS" + ContactStatusCompleted ContactStatus = "COMPLETED" + ContactStatusFailed ContactStatus = "FAILED" + ContactStatusAvailable ContactStatus = "AVAILABLE" + ContactStatusCancelling ContactStatus = "CANCELLING" ) // Values returns all known values for ContactStatus. Note that this can be @@ -188,9 +188,9 @@ type Polarization string // Enum values for Polarization const ( - PolarizationRight_hand Polarization = "RIGHT_HAND" - PolarizationLeft_hand Polarization = "LEFT_HAND" - PolarizationNone Polarization = "NONE" + PolarizationRightHand Polarization = "RIGHT_HAND" + PolarizationLeftHand Polarization = "LEFT_HAND" + PolarizationNone Polarization = "NONE" ) // Values returns all known values for Polarization. Note that this can be expanded diff --git a/service/guardduty/api_op_CreateFilter.go b/service/guardduty/api_op_CreateFilter.go index 2223d7eb739..0f732e5f356 100644 --- a/service/guardduty/api_op_CreateFilter.go +++ b/service/guardduty/api_op_CreateFilter.go @@ -39,148 +39,147 @@ type CreateFilterInput struct { // Represents the criteria to be used in the filter for querying findings. You can // only use the following attributes to query findings: // - // * accountId + // * accountId // - // * - // region + // * region // - // * confidence + // * + // confidence // - // * id + // * id // - // * - // resource.accessKeyDetails.accessKeyId + // * resource.accessKeyDetails.accessKeyId // - // * + // * // resource.accessKeyDetails.principalId // - // * - // resource.accessKeyDetails.userName + // * resource.accessKeyDetails.userName // - // * resource.accessKeyDetails.userType + // * + // resource.accessKeyDetails.userType // + // * + // resource.instanceDetails.iamInstanceProfile.id // - // * resource.instanceDetails.iamInstanceProfile.id - // - // * + // * // resource.instanceDetails.imageId // - // * resource.instanceDetails.instanceId - // + // * resource.instanceDetails.instanceId // - // * resource.instanceDetails.outpostArn + // * + // resource.instanceDetails.outpostArn // - // * + // * // resource.instanceDetails.networkInterfaces.ipv6Addresses // - // * + // * // resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress // + // * + // resource.instanceDetails.networkInterfaces.publicDnsName // - // * resource.instanceDetails.networkInterfaces.publicDnsName - // - // * + // * // resource.instanceDetails.networkInterfaces.publicIp // - // * + // * // resource.instanceDetails.networkInterfaces.securityGroups.groupId // - // * + // * // resource.instanceDetails.networkInterfaces.securityGroups.groupName // - // * + // * // resource.instanceDetails.networkInterfaces.subnetId // - // * + // * // resource.instanceDetails.networkInterfaces.vpcId // - // * + // * // resource.instanceDetails.tags.key // - // * resource.instanceDetails.tags.value + // * resource.instanceDetails.tags.value // + // * + // resource.resourceType // - // * resource.resourceType + // * service.action.actionType // - // * service.action.actionType - // - // * + // * // service.action.awsApiCallAction.api // - // * + // * // service.action.awsApiCallAction.callerType // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.city.cityName // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.country.countryName // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.organization.asn // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg // - // * + // * // service.action.awsApiCallAction.serviceName // - // * + // * // service.action.dnsRequestAction.domain // - // * + // * // service.action.networkConnectionAction.blocked // - // * + // * // service.action.networkConnectionAction.connectionDirection // - // * + // * // service.action.networkConnectionAction.localPortDetails.port // - // * + // * // service.action.networkConnectionAction.protocol // - // * + // * // service.action.networkConnectionAction.localIpDetails.ipAddressV4 // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.city.cityName // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.country.countryName // + // * + // service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 // - // * service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 - // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.organization.asn // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg // + // * + // service.action.networkConnectionAction.remotePortDetails.port // - // * service.action.networkConnectionAction.remotePortDetails.port - // - // * + // * // service.additionalInfo.threatListName // - // * service.archived When this - // attribute is set to TRUE, only archived findings are listed. When it's set to - // FALSE, only unarchived findings are listed. When this attribute is not set, all - // existing findings are listed. + // * service.archived When this attribute is + // set to TRUE, only archived findings are listed. When it's set to FALSE, only + // unarchived findings are listed. When this attribute is not set, all existing + // findings are listed. // - // * service.resourceRole + // * service.resourceRole // - // * severity + // * severity // - // * - // type + // * type // - // * updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or - // YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. + // * updatedAt + // Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or YYYY-MM-DDTHH:MM:SSZ + // depending on whether the value contains milliseconds. // // This member is required. FindingCriteria *types.FindingCriteria diff --git a/service/guardduty/api_op_ListFindings.go b/service/guardduty/api_op_ListFindings.go index 7d3f3e2adf4..b898921aaf7 100644 --- a/service/guardduty/api_op_ListFindings.go +++ b/service/guardduty/api_op_ListFindings.go @@ -37,144 +37,143 @@ type ListFindingsInput struct { // Represents the criteria used for querying findings. Valid values include: // - // * + // * // JSON field name // - // * accountId + // * accountId // - // * region + // * region // - // * confidence + // * confidence // - // * id + // * id // + // * + // resource.accessKeyDetails.accessKeyId // - // * resource.accessKeyDetails.accessKeyId - // - // * + // * // resource.accessKeyDetails.principalId // - // * - // resource.accessKeyDetails.userName - // - // * resource.accessKeyDetails.userType + // * resource.accessKeyDetails.userName // + // * + // resource.accessKeyDetails.userType // - // * resource.instanceDetails.iamInstanceProfile.id + // * + // resource.instanceDetails.iamInstanceProfile.id // - // * + // * // resource.instanceDetails.imageId // - // * resource.instanceDetails.instanceId + // * resource.instanceDetails.instanceId // + // * + // resource.instanceDetails.networkInterfaces.ipv6Addresses // - // * resource.instanceDetails.networkInterfaces.ipv6Addresses - // - // * + // * // resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress // + // * + // resource.instanceDetails.networkInterfaces.publicDnsName // - // * resource.instanceDetails.networkInterfaces.publicDnsName - // - // * + // * // resource.instanceDetails.networkInterfaces.publicIp // - // * + // * // resource.instanceDetails.networkInterfaces.securityGroups.groupId // - // * + // * // resource.instanceDetails.networkInterfaces.securityGroups.groupName // - // * + // * // resource.instanceDetails.networkInterfaces.subnetId // - // * + // * // resource.instanceDetails.networkInterfaces.vpcId // - // * + // * // resource.instanceDetails.tags.key // - // * resource.instanceDetails.tags.value + // * resource.instanceDetails.tags.value // + // * + // resource.resourceType // - // * resource.resourceType + // * service.action.actionType // - // * service.action.actionType - // - // * + // * // service.action.awsApiCallAction.api // - // * + // * // service.action.awsApiCallAction.callerType // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.city.cityName // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.country.countryName // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.organization.asn // - // * + // * // service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg // - // * + // * // service.action.awsApiCallAction.serviceName // - // * + // * // service.action.dnsRequestAction.domain // - // * + // * // service.action.networkConnectionAction.blocked // - // * + // * // service.action.networkConnectionAction.connectionDirection // - // * + // * // service.action.networkConnectionAction.localPortDetails.port // - // * + // * // service.action.networkConnectionAction.protocol // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.city.cityName // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.country.countryName // + // * + // service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 // - // * service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 - // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.organization.asn // - // * + // * // service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg // + // * + // service.action.networkConnectionAction.remotePortDetails.port // - // * service.action.networkConnectionAction.remotePortDetails.port - // - // * + // * // service.additionalInfo.threatListName // - // * service.archived When this - // attribute is set to 'true', only archived findings are listed. When it's set to - // 'false', only unarchived findings are listed. When this attribute is not set, - // all existing findings are listed. - // - // * service.resourceRole + // * service.archived When this attribute is + // set to 'true', only archived findings are listed. When it's set to 'false', only + // unarchived findings are listed. When this attribute is not set, all existing + // findings are listed. // - // * severity + // * service.resourceRole // + // * severity // // * type // - // * updatedAt Type: Timestamp in Unix Epoch millisecond format: - // 1486685375000 + // * updatedAt + // Type: Timestamp in Unix Epoch millisecond format: 1486685375000 FindingCriteria *types.FindingCriteria // You can use this parameter to indicate the maximum number of items you want in diff --git a/service/guardduty/types/enums.go b/service/guardduty/types/enums.go index 5d17080ebfc..ead2c04be55 100644 --- a/service/guardduty/types/enums.go +++ b/service/guardduty/types/enums.go @@ -6,8 +6,8 @@ type AdminStatus string // Enum values for AdminStatus const ( - AdminStatusEnabled AdminStatus = "ENABLED" - AdminStatusDisable_in_progress AdminStatus = "DISABLE_IN_PROGRESS" + AdminStatusEnabled AdminStatus = "ENABLED" + AdminStatusDisableInProgress AdminStatus = "DISABLE_IN_PROGRESS" ) // Values returns all known values for AdminStatus. Note that this can be expanded @@ -24,10 +24,10 @@ type DataSource string // Enum values for DataSource const ( - DataSourceFlow_logs DataSource = "FLOW_LOGS" - DataSourceCloud_trail DataSource = "CLOUD_TRAIL" - DataSourceDns_logs DataSource = "DNS_LOGS" - DataSourceS3_logs DataSource = "S3_LOGS" + DataSourceFlowLogs DataSource = "FLOW_LOGS" + DataSourceCloudTrail DataSource = "CLOUD_TRAIL" + DataSourceDnsLogs DataSource = "DNS_LOGS" + DataSourceS3Logs DataSource = "S3_LOGS" ) // Values returns all known values for DataSource. Note that this can be expanded @@ -98,8 +98,8 @@ type Feedback string // Enum values for Feedback const ( - FeedbackUseful Feedback = "USEFUL" - FeedbackNot_useful Feedback = "NOT_USEFUL" + FeedbackUseful Feedback = "USEFUL" + FeedbackNotUseful Feedback = "NOT_USEFUL" ) // Values returns all known values for Feedback. Note that this can be expanded in @@ -134,9 +134,9 @@ type FindingPublishingFrequency string // Enum values for FindingPublishingFrequency const ( - FindingPublishingFrequencyFifteen_minutes FindingPublishingFrequency = "FIFTEEN_MINUTES" - FindingPublishingFrequencyOne_hour FindingPublishingFrequency = "ONE_HOUR" - FindingPublishingFrequencySix_hours FindingPublishingFrequency = "SIX_HOURS" + FindingPublishingFrequencyFifteenMinutes FindingPublishingFrequency = "FIFTEEN_MINUTES" + FindingPublishingFrequencyOneHour FindingPublishingFrequency = "ONE_HOUR" + FindingPublishingFrequencySixHours FindingPublishingFrequency = "SIX_HOURS" ) // Values returns all known values for FindingPublishingFrequency. Note that this @@ -154,7 +154,7 @@ type FindingStatisticType string // Enum values for FindingStatisticType const ( - FindingStatisticTypeCount_by_severity FindingStatisticType = "COUNT_BY_SEVERITY" + FindingStatisticTypeCountBySeverity FindingStatisticType = "COUNT_BY_SEVERITY" ) // Values returns all known values for FindingStatisticType. Note that this can be @@ -170,12 +170,12 @@ type IpSetFormat string // Enum values for IpSetFormat const ( - IpSetFormatTxt IpSetFormat = "TXT" - IpSetFormatStix IpSetFormat = "STIX" - IpSetFormatOtx_csv IpSetFormat = "OTX_CSV" - IpSetFormatAlien_vault IpSetFormat = "ALIEN_VAULT" - IpSetFormatProof_point IpSetFormat = "PROOF_POINT" - IpSetFormatFire_eye IpSetFormat = "FIRE_EYE" + IpSetFormatTxt IpSetFormat = "TXT" + IpSetFormatStix IpSetFormat = "STIX" + IpSetFormatOtxCsv IpSetFormat = "OTX_CSV" + IpSetFormatAlienVault IpSetFormat = "ALIEN_VAULT" + IpSetFormatProofPoint IpSetFormat = "PROOF_POINT" + IpSetFormatFireEye IpSetFormat = "FIRE_EYE" ) // Values returns all known values for IpSetFormat. Note that this can be expanded @@ -196,13 +196,13 @@ type IpSetStatus string // Enum values for IpSetStatus const ( - IpSetStatusInactive IpSetStatus = "INACTIVE" - IpSetStatusActivating IpSetStatus = "ACTIVATING" - IpSetStatusActive IpSetStatus = "ACTIVE" - IpSetStatusDeactivating IpSetStatus = "DEACTIVATING" - IpSetStatusError IpSetStatus = "ERROR" - IpSetStatusDelete_pending IpSetStatus = "DELETE_PENDING" - IpSetStatusDeleted IpSetStatus = "DELETED" + IpSetStatusInactive IpSetStatus = "INACTIVE" + IpSetStatusActivating IpSetStatus = "ACTIVATING" + IpSetStatusActive IpSetStatus = "ACTIVE" + IpSetStatusDeactivating IpSetStatus = "DEACTIVATING" + IpSetStatusError IpSetStatus = "ERROR" + IpSetStatusDeletePending IpSetStatus = "DELETE_PENDING" + IpSetStatusDeleted IpSetStatus = "DELETED" ) // Values returns all known values for IpSetStatus. Note that this can be expanded @@ -242,10 +242,10 @@ type PublishingStatus string // Enum values for PublishingStatus const ( - PublishingStatusPending_verification PublishingStatus = "PENDING_VERIFICATION" - PublishingStatusPublishing PublishingStatus = "PUBLISHING" - PublishingStatusUnable_to_publish_fix_destination_property PublishingStatus = "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY" - PublishingStatusStopped PublishingStatus = "STOPPED" + PublishingStatusPendingVerification PublishingStatus = "PENDING_VERIFICATION" + PublishingStatusPublishing PublishingStatus = "PUBLISHING" + PublishingStatusUnableToPublishFixDestinationProperty PublishingStatus = "UNABLE_TO_PUBLISH_FIX_DESTINATION_PROPERTY" + PublishingStatusStopped PublishingStatus = "STOPPED" ) // Values returns all known values for PublishingStatus. Note that this can be @@ -264,12 +264,12 @@ type ThreatIntelSetFormat string // Enum values for ThreatIntelSetFormat const ( - ThreatIntelSetFormatTxt ThreatIntelSetFormat = "TXT" - ThreatIntelSetFormatStix ThreatIntelSetFormat = "STIX" - ThreatIntelSetFormatOtx_csv ThreatIntelSetFormat = "OTX_CSV" - ThreatIntelSetFormatAlien_vault ThreatIntelSetFormat = "ALIEN_VAULT" - ThreatIntelSetFormatProof_point ThreatIntelSetFormat = "PROOF_POINT" - ThreatIntelSetFormatFire_eye ThreatIntelSetFormat = "FIRE_EYE" + ThreatIntelSetFormatTxt ThreatIntelSetFormat = "TXT" + ThreatIntelSetFormatStix ThreatIntelSetFormat = "STIX" + ThreatIntelSetFormatOtxCsv ThreatIntelSetFormat = "OTX_CSV" + ThreatIntelSetFormatAlienVault ThreatIntelSetFormat = "ALIEN_VAULT" + ThreatIntelSetFormatProofPoint ThreatIntelSetFormat = "PROOF_POINT" + ThreatIntelSetFormatFireEye ThreatIntelSetFormat = "FIRE_EYE" ) // Values returns all known values for ThreatIntelSetFormat. Note that this can be @@ -290,13 +290,13 @@ type ThreatIntelSetStatus string // Enum values for ThreatIntelSetStatus const ( - ThreatIntelSetStatusInactive ThreatIntelSetStatus = "INACTIVE" - ThreatIntelSetStatusActivating ThreatIntelSetStatus = "ACTIVATING" - ThreatIntelSetStatusActive ThreatIntelSetStatus = "ACTIVE" - ThreatIntelSetStatusDeactivating ThreatIntelSetStatus = "DEACTIVATING" - ThreatIntelSetStatusError ThreatIntelSetStatus = "ERROR" - ThreatIntelSetStatusDelete_pending ThreatIntelSetStatus = "DELETE_PENDING" - ThreatIntelSetStatusDeleted ThreatIntelSetStatus = "DELETED" + ThreatIntelSetStatusInactive ThreatIntelSetStatus = "INACTIVE" + ThreatIntelSetStatusActivating ThreatIntelSetStatus = "ACTIVATING" + ThreatIntelSetStatusActive ThreatIntelSetStatus = "ACTIVE" + ThreatIntelSetStatusDeactivating ThreatIntelSetStatus = "DEACTIVATING" + ThreatIntelSetStatusError ThreatIntelSetStatus = "ERROR" + ThreatIntelSetStatusDeletePending ThreatIntelSetStatus = "DELETE_PENDING" + ThreatIntelSetStatusDeleted ThreatIntelSetStatus = "DELETED" ) // Values returns all known values for ThreatIntelSetStatus. Note that this can be @@ -318,10 +318,10 @@ type UsageStatisticType string // Enum values for UsageStatisticType const ( - UsageStatisticTypeSum_by_account UsageStatisticType = "SUM_BY_ACCOUNT" - UsageStatisticTypeSum_by_data_source UsageStatisticType = "SUM_BY_DATA_SOURCE" - UsageStatisticTypeSum_by_resource UsageStatisticType = "SUM_BY_RESOURCE" - UsageStatisticTypeTop_resources UsageStatisticType = "TOP_RESOURCES" + UsageStatisticTypeSumByAccount UsageStatisticType = "SUM_BY_ACCOUNT" + UsageStatisticTypeSumByDataSource UsageStatisticType = "SUM_BY_DATA_SOURCE" + UsageStatisticTypeSumByResource UsageStatisticType = "SUM_BY_RESOURCE" + UsageStatisticTypeTopResources UsageStatisticType = "TOP_RESOURCES" ) // Values returns all known values for UsageStatisticType. Note that this can be diff --git a/service/health/api_op_DescribeAffectedAccountsForOrganization.go b/service/health/api_op_DescribeAffectedAccountsForOrganization.go index 8129f71745b..fba98d68c3b 100644 --- a/service/health/api_op_DescribeAffectedAccountsForOrganization.go +++ b/service/health/api_op_DescribeAffectedAccountsForOrganization.go @@ -66,16 +66,16 @@ type DescribeAffectedAccountsForOrganizationOutput struct { // This parameter specifies if the AWS Health event is a public AWS service event // or an account-specific event. // - // * If the eventScopeCode value is PUBLIC, then - // the affectedAccounts value is always empty. + // * If the eventScopeCode value is PUBLIC, then the + // affectedAccounts value is always empty. // - // * If the eventScopeCode value - // is ACCOUNT_SPECIFIC, then the affectedAccounts value lists the affected AWS + // * If the eventScopeCode value is + // ACCOUNT_SPECIFIC, then the affectedAccounts value lists the affected AWS // accounts in your organization. For example, if an event affects a service such // as Amazon Elastic Compute Cloud and you have AWS accounts that use that service, // those account IDs appear in the response. // - // * If the eventScopeCode value is + // * If the eventScopeCode value is // NONE, then the eventArn that you specified in the request is invalid or doesn't // exist. EventScopeCode types.EventScopeCode diff --git a/service/health/api_op_DescribeEventDetailsForOrganization.go b/service/health/api_op_DescribeEventDetailsForOrganization.go index 4daa244bdb4..478ae615300 100644 --- a/service/health/api_op_DescribeEventDetailsForOrganization.go +++ b/service/health/api_op_DescribeEventDetailsForOrganization.go @@ -28,16 +28,16 @@ import ( // organizationEventDetailFilters object in the request. Depending on the AWS // Health event type, note the following differences: // -// * If the event is -// public, the awsAccountId parameter must be empty. If you specify an account ID -// for a public event, then an error message is returned. That's because the event -// might apply to all AWS accounts and isn't specific to an account in your +// * If the event is public, +// the awsAccountId parameter must be empty. If you specify an account ID for a +// public event, then an error message is returned. That's because the event might +// apply to all AWS accounts and isn't specific to an account in your // organization. // -// * If the event is specific to an account, then you must -// specify the awsAccountId parameter in the request. If you don't specify an -// account ID, an error message returns because the event is specific to an AWS -// account in your organization. +// * If the event is specific to an account, then you must specify +// the awsAccountId parameter in the request. If you don't specify an account ID, +// an error message returns because the event is specific to an AWS account in your +// organization. // // For more information, see Event // (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). diff --git a/service/health/api_op_DescribeEvents.go b/service/health/api_op_DescribeEvents.go index 37d9ceda1de..0c10f913292 100644 --- a/service/health/api_op_DescribeEvents.go +++ b/service/health/api_op_DescribeEvents.go @@ -21,8 +21,8 @@ import ( // operations. If no filter criteria are specified, all events are returned. // Results are sorted by lastModifiedTime, starting with the most recent event. // -// -// * When you call the DescribeEvents operation and specify an entity for the +// * +// When you call the DescribeEvents operation and specify an entity for the // entityValues parameter, AWS Health might return public events that aren't // specific to that resource. For example, if you call DescribeEvents and specify // an ID for an Amazon Elastic Compute Cloud (Amazon EC2) instance, AWS Health @@ -31,8 +31,8 @@ import ( // object. For more information, see Event // (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). // -// * -// This API operation uses pagination. Specify the nextToken parameter in the next +// * This +// API operation uses pagination. Specify the nextToken parameter in the next // request to return more results. func (c *Client) DescribeEvents(ctx context.Context, params *DescribeEventsInput, optFns ...func(*Options)) (*DescribeEventsOutput, error) { if params == nil { diff --git a/service/health/api_op_DescribeEventsForOrganization.go b/service/health/api_op_DescribeEventsForOrganization.go index d609b6e42c1..a6496c4d14f 100644 --- a/service/health/api_op_DescribeEventsForOrganization.go +++ b/service/health/api_op_DescribeEventsForOrganization.go @@ -18,15 +18,15 @@ import ( // any affected resources. To retrieve that information, use the following // operations: // -// * DescribeAffectedAccountsForOrganization +// * DescribeAffectedAccountsForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedAccountsForOrganization.html) // -// -// * DescribeEventDetailsForOrganization +// * +// DescribeEventDetailsForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventDetailsForOrganization.html) // -// -// * DescribeAffectedEntitiesForOrganization +// * +// DescribeAffectedEntitiesForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html) // // If diff --git a/service/health/doc.go b/service/health/doc.go index 2667aca591b..a8bc42afda3 100644 --- a/service/health/doc.go +++ b/service/health/doc.go @@ -26,13 +26,13 @@ // Health User Guide. When you use the AWS Health API operations to return AWS // Health events, see the following recommendations: // -// * Use the eventScopeCode +// * Use the eventScopeCode // (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode) // parameter to specify whether to return AWS Health events that are public or // account-specific. // -// * Use pagination to view all events from the response. -// For example, if you call the DescribeEventsForOrganization operation to get all +// * Use pagination to view all events from the response. For +// example, if you call the DescribeEventsForOrganization operation to get all // events in your organization, you might receive several page results. Specify the // nextToken in the next request to return more results. package health diff --git a/service/health/types/enums.go b/service/health/types/enums.go index 53b56eaeee7..0164eb621cf 100644 --- a/service/health/types/enums.go +++ b/service/health/types/enums.go @@ -42,9 +42,9 @@ type EventScopeCode string // Enum values for EventScopeCode const ( - EventScopeCodePublic EventScopeCode = "PUBLIC" - EventScopeCodeAccount_specific EventScopeCode = "ACCOUNT_SPECIFIC" - EventScopeCodeNone EventScopeCode = "NONE" + EventScopeCodePublic EventScopeCode = "PUBLIC" + EventScopeCodeAccountSpecific EventScopeCode = "ACCOUNT_SPECIFIC" + EventScopeCodeNone EventScopeCode = "NONE" ) // Values returns all known values for EventScopeCode. Note that this can be @@ -82,10 +82,10 @@ type EventTypeCategory string // Enum values for EventTypeCategory const ( - EventTypeCategoryIssue EventTypeCategory = "issue" - EventTypeCategoryAccount_notification EventTypeCategory = "accountNotification" - EventTypeCategoryScheduled_change EventTypeCategory = "scheduledChange" - EventTypeCategoryInvestigation EventTypeCategory = "investigation" + EventTypeCategoryIssue EventTypeCategory = "issue" + EventTypeCategoryAccountNotification EventTypeCategory = "accountNotification" + EventTypeCategoryScheduledChange EventTypeCategory = "scheduledChange" + EventTypeCategoryInvestigation EventTypeCategory = "investigation" ) // Values returns all known values for EventTypeCategory. Note that this can be diff --git a/service/health/types/types.go b/service/health/types/types.go index 346eb1e5233..6c9379f4846 100644 --- a/service/health/types/types.go +++ b/service/health/types/types.go @@ -107,20 +107,19 @@ type EntityFilter struct { // Summary information about an AWS Health event. AWS Health events can be public // or account-specific: // -// * Public events might be service events that are not +// * Public events might be service events that are not // specific to an AWS account. For example, if there is an issue with an AWS // Region, AWS Health provides information about the event, even if you don't use // services or resources in that Region. // -// * Account-specific events are -// specific to either your AWS account or an account in your organization. For -// example, if there's an issue with Amazon Elastic Compute Cloud in a Region that -// you use, AWS Health provides information about the event and the affected -// resources in the account. +// * Account-specific events are specific to +// either your AWS account or an account in your organization. For example, if +// there's an issue with Amazon Elastic Compute Cloud in a Region that you use, AWS +// Health provides information about the event and the affected resources in the +// account. // -// You can determine if an event is public or -// account-specific by using the eventScopeCode parameter. For more information, -// see eventScopeCode +// You can determine if an event is public or account-specific by using +// the eventScopeCode parameter. For more information, see eventScopeCode // (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode). type Event struct { @@ -139,16 +138,16 @@ type Event struct { // This parameter specifies if the AWS Health event is a public AWS service event // or an account-specific event. // - // * If the eventScopeCode value is PUBLIC, then - // the affectedAccounts value is always empty. + // * If the eventScopeCode value is PUBLIC, then the + // affectedAccounts value is always empty. // - // * If the eventScopeCode value - // is ACCOUNT_SPECIFIC, then the affectedAccounts value lists the affected AWS + // * If the eventScopeCode value is + // ACCOUNT_SPECIFIC, then the affectedAccounts value lists the affected AWS // accounts in your organization. For example, if an event affects a service such // as Amazon Elastic Compute Cloud and you have AWS accounts that use that service, // those account IDs appear in the response. // - // * If the eventScopeCode value is + // * If the eventScopeCode value is // NONE, then the eventArn that you specified in the request is invalid or doesn't // exist. EventScopeCode EventScopeCode @@ -383,16 +382,16 @@ type OrganizationEvent struct { // This parameter specifies if the AWS Health event is a public AWS service event // or an account-specific event. // - // * If the eventScopeCode value is PUBLIC, then - // the affectedAccounts value is always empty. + // * If the eventScopeCode value is PUBLIC, then the + // affectedAccounts value is always empty. // - // * If the eventScopeCode value - // is ACCOUNT_SPECIFIC, then the affectedAccounts value lists the affected AWS + // * If the eventScopeCode value is + // ACCOUNT_SPECIFIC, then the affectedAccounts value lists the affected AWS // accounts in your organization. For example, if an event affects a service such // as Amazon Elastic Compute Cloud and you have AWS accounts that use that service, // those account IDs appear in the response. // - // * If the eventScopeCode value is + // * If the eventScopeCode value is // NONE, then the eventArn that you specified in the request is invalid or doesn't // exist. EventScopeCode EventScopeCode @@ -437,20 +436,19 @@ type OrganizationEventDetails struct { // Summary information about an AWS Health event. AWS Health events can be public // or account-specific: // - // * Public events might be service events that are not + // * Public events might be service events that are not // specific to an AWS account. For example, if there is an issue with an AWS // Region, AWS Health provides information about the event, even if you don't use // services or resources in that Region. // - // * Account-specific events are - // specific to either your AWS account or an account in your organization. For - // example, if there's an issue with Amazon Elastic Compute Cloud in a Region that - // you use, AWS Health provides information about the event and the affected - // resources in the account. + // * Account-specific events are specific to + // either your AWS account or an account in your organization. For example, if + // there's an issue with Amazon Elastic Compute Cloud in a Region that you use, AWS + // Health provides information about the event and the affected resources in the + // account. // - // You can determine if an event is public or - // account-specific by using the eventScopeCode parameter. For more information, - // see eventScopeCode + // You can determine if an event is public or account-specific by using + // the eventScopeCode parameter. For more information, see eventScopeCode // (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode). Event *Event diff --git a/service/iam/api_op_CreateOpenIDConnectProvider.go b/service/iam/api_op_CreateOpenIDConnectProvider.go index dbea5db6b04..21a0fc66d06 100644 --- a/service/iam/api_op_CreateOpenIDConnectProvider.go +++ b/service/iam/api_op_CreateOpenIDConnectProvider.go @@ -16,19 +16,19 @@ import ( // Such a policy establishes a trust relationship between AWS and the OIDC // provider. When you create the IAM OIDC provider, you specify the following: // +// * +// The URL of the OIDC identity provider (IdP) to trust // -// * The URL of the OIDC identity provider (IdP) to trust +// * A list of client IDs +// (also known as audiences) that identify the application or applications that are +// allowed to authenticate using the OIDC provider // -// * A list of client -// IDs (also known as audiences) that identify the application or applications that -// are allowed to authenticate using the OIDC provider +// * A list of thumbprints of one +// or more server certificates that the IdP uses // -// * A list of thumbprints -// of one or more server certificates that the IdP uses -// -// You get all of this -// information from the OIDC IdP that you want to use to access AWS. The trust for -// the OIDC provider is derived from the IAM provider that this operation creates. +// You get all of this information +// from the OIDC IdP that you want to use to access AWS. The trust for the OIDC +// provider is derived from the IAM provider that this operation creates. // Therefore, it is best to limit access to the CreateOpenIDConnectProvider // operation to highly privileged users. func (c *Client) CreateOpenIDConnectProvider(ctx context.Context, params *CreateOpenIDConnectProviderInput, optFns ...func(*Options)) (*CreateOpenIDConnectProviderOutput, error) { diff --git a/service/iam/api_op_CreatePolicy.go b/service/iam/api_op_CreatePolicy.go index 703e8e6b745..8831add7d6e 100644 --- a/service/iam/api_op_CreatePolicy.go +++ b/service/iam/api_op_CreatePolicy.go @@ -45,15 +45,15 @@ type CreatePolicyInput struct { // to validate this parameter is a string of characters consisting of the // following: // - // * Any printable ASCII character ranging from the space character + // * Any printable ASCII character ranging from the space character // (\u0020) through the end of the ASCII character range // - // * The printable + // * The printable // characters in the Basic Latin and Latin-1 Supplement character set (through // \u00FF) // - // * The special characters tab (\u0009), line feed (\u000A), and - // carriage return (\u000D) + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) // // This member is required. PolicyDocument *string diff --git a/service/iam/api_op_CreatePolicyVersion.go b/service/iam/api_op_CreatePolicyVersion.go index 9b04511fae9..a5f77cab803 100644 --- a/service/iam/api_op_CreatePolicyVersion.go +++ b/service/iam/api_op_CreatePolicyVersion.go @@ -55,15 +55,15 @@ type CreatePolicyVersionInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PolicyDocument *string diff --git a/service/iam/api_op_CreateRole.go b/service/iam/api_op_CreateRole.go index ddb330eb9dc..3b17d6d6d1f 100644 --- a/service/iam/api_op_CreateRole.go +++ b/service/iam/api_op_CreateRole.go @@ -43,14 +43,14 @@ type CreateRoleInput struct { // pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a // string of characters consisting of the following: // - // * Any printable ASCII + // * Any printable ASCII // character ranging from the space character (\u0020) through the end of the ASCII // character range // - // * The printable characters in the Basic Latin and Latin-1 + // * The printable characters in the Basic Latin and Latin-1 // Supplement character set (through \u00FF) // - // * The special characters tab + // * The special characters tab // (\u0009), line feed (\u000A), and carriage return (\u000D) // // Upon success, the diff --git a/service/iam/api_op_DeletePolicy.go b/service/iam/api_op_DeletePolicy.go index 08dc4b23904..a9ab306816f 100644 --- a/service/iam/api_op_DeletePolicy.go +++ b/service/iam/api_op_DeletePolicy.go @@ -15,23 +15,23 @@ import ( // attached to. In addition, you must delete all the policy's versions. The // following steps describe the process for deleting a managed policy: // -// * -// Detach the policy from all users, groups, and roles that the policy is attached -// to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy API +// * Detach +// the policy from all users, groups, and roles that the policy is attached to, +// using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy API // operations. To list all the users, groups, and roles that a policy is attached // to, use ListEntitiesForPolicy. // -// * Delete all versions of the policy using +// * Delete all versions of the policy using // DeletePolicyVersion. To list the policy's versions, use ListPolicyVersions. You // cannot use DeletePolicyVersion to delete the version that is marked as the // default version. You delete the policy's default version in the next step of the // process. // -// * Delete the policy (this automatically deletes the policy's -// default version) using this API. +// * Delete the policy (this automatically deletes the policy's default +// version) using this API. // -// For information about managed policies, see -// Managed Policies and Inline Policies +// For information about managed policies, see Managed +// Policies and Inline Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *Client) DeletePolicy(ctx context.Context, params *DeletePolicyInput, optFns ...func(*Options)) (*DeletePolicyOutput, error) { diff --git a/service/iam/api_op_DeleteUser.go b/service/iam/api_op_DeleteUser.go index 659e1c01f6f..ad987376735 100644 --- a/service/iam/api_op_DeleteUser.go +++ b/service/iam/api_op_DeleteUser.go @@ -16,30 +16,29 @@ import ( // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_deleting_cli). // Before attempting to delete a user, remove the following items: // -// * Password +// * Password // (DeleteLoginProfile) // -// * Access keys (DeleteAccessKey) +// * Access keys (DeleteAccessKey) // -// * Signing -// certificate (DeleteSigningCertificate) +// * Signing certificate +// (DeleteSigningCertificate) // -// * SSH public key -// (DeleteSSHPublicKey) +// * SSH public key (DeleteSSHPublicKey) // -// * Git credentials (DeleteServiceSpecificCredential) +// * Git +// credentials (DeleteServiceSpecificCredential) // +// * Multi-factor authentication +// (MFA) device (DeactivateMFADevice, DeleteVirtualMFADevice) // -// * Multi-factor authentication (MFA) device (DeactivateMFADevice, -// DeleteVirtualMFADevice) +// * Inline policies +// (DeleteUserPolicy) // -// * Inline policies (DeleteUserPolicy) +// * Attached managed policies (DetachUserPolicy) // -// * -// Attached managed policies (DetachUserPolicy) -// -// * Group memberships -// (RemoveUserFromGroup) +// * Group +// memberships (RemoveUserFromGroup) func (c *Client) DeleteUser(ctx context.Context, params *DeleteUserInput, optFns ...func(*Options)) (*DeleteUserOutput, error) { if params == nil { params = &DeleteUserInput{} diff --git a/service/iam/api_op_GenerateOrganizationsAccessReport.go b/service/iam/api_op_GenerateOrganizationsAccessReport.go index c56663791b1..750a70d3f2b 100644 --- a/service/iam/api_op_GenerateOrganizationsAccessReport.go +++ b/service/iam/api_op_GenerateOrganizationsAccessReport.go @@ -49,35 +49,35 @@ import ( // specifying the optional AWS Organizations policy ID. The type of entity that you // specify determines the data returned in the report. // -// * Root – When you -// specify the organizations root as the entity, the resulting report lists all of -// the services allowed by SCPs that are attached to your root. For each service, -// the report includes data for all accounts in your organization except the master +// * Root – When you specify +// the organizations root as the entity, the resulting report lists all of the +// services allowed by SCPs that are attached to your root. For each service, the +// report includes data for all accounts in your organization except the master // account, because the master account is not limited by SCPs. // -// * OU – When you +// * OU – When you // specify an organizational unit (OU) as the entity, the resulting report lists // all of the services allowed by SCPs that are attached to the OU and its parents. // For each service, the report includes data for all accounts in the OU or its // children. This data excludes the master account, because the master account is // not limited by SCPs. // -// * Master account – When you specify the master -// account, the resulting report lists all AWS services, because the master account -// is not limited by SCPs. For each service, the report includes data for only the -// master account. +// * Master account – When you specify the master account, +// the resulting report lists all AWS services, because the master account is not +// limited by SCPs. For each service, the report includes data for only the master +// account. // -// * Account – When you specify another account as the entity, -// the resulting report lists all of the services allowed by SCPs that are attached -// to the account and its parents. For each service, the report includes data for -// only the specified account. +// * Account – When you specify another account as the entity, the +// resulting report lists all of the services allowed by SCPs that are attached to +// the account and its parents. For each service, the report includes data for only +// the specified account. // // To generate a service last accessed data report for // policies, specify an entity path and the optional AWS Organizations policy ID. // The type of entity that you specify determines the data returned for each // service. // -// * Root – When you specify the root entity and a policy ID, the +// * Root – When you specify the root entity and a policy ID, the // resulting report lists all of the services that are allowed by the specified // SCP. For each service, the report includes data for all accounts in your // organization to which the SCP applies. This data excludes the master account, @@ -85,7 +85,7 @@ import ( // any entities in the organization, then the report will return a list of services // with no data. // -// * OU – When you specify an OU entity and a policy ID, the +// * OU – When you specify an OU entity and a policy ID, the // resulting report lists all of the services that are allowed by the specified // SCP. For each service, the report includes data for all accounts in the OU or // its children to which the SCP applies. This means that other accounts outside @@ -94,14 +94,14 @@ import ( // If the SCP is not attached to the OU or one of its children, the report will // return a list of services with no data. // -// * Master account – When you specify -// the master account, the resulting report lists all AWS services, because the -// master account is not limited by SCPs. If you specify a policy ID in the CLI or -// API, the policy is ignored. For each service, the report includes data for only -// the master account. +// * Master account – When you specify the +// master account, the resulting report lists all AWS services, because the master +// account is not limited by SCPs. If you specify a policy ID in the CLI or API, +// the policy is ignored. For each service, the report includes data for only the +// master account. // -// * Account – When you specify another account entity and -// a policy ID, the resulting report lists all of the services that are allowed by +// * Account – When you specify another account entity and a +// policy ID, the resulting report lists all of the services that are allowed by // the specified SCP. For each service, the report includes data for only the // specified account. This means that other accounts in the organization that are // affected by the SCP might not be included in the data. If the SCP is not diff --git a/service/iam/api_op_GenerateServiceLastAccessedDetails.go b/service/iam/api_op_GenerateServiceLastAccessedDetails.go index 87d9285174e..68b4ecc30ad 100644 --- a/service/iam/api_op_GenerateServiceLastAccessedDetails.go +++ b/service/iam/api_op_GenerateServiceLastAccessedDetails.go @@ -30,21 +30,21 @@ import ( // a JobId. Use this parameter in the following operations to retrieve the // following details from your report: // -// * GetServiceLastAccessedDetails – Use -// this operation for users, groups, roles, or policies to list every AWS service -// that the resource could access using permissions policies. For each service, the +// * GetServiceLastAccessedDetails – Use this +// operation for users, groups, roles, or policies to list every AWS service that +// the resource could access using permissions policies. For each service, the // response includes information about the most recent access attempt. The JobId // returned by GenerateServiceLastAccessedDetail must be used by the same role // within a session, or by the same user when used to call // GetServiceLastAccessedDetail. // -// * GetServiceLastAccessedDetailsWithEntities – -// Use this operation for groups and policies to list information about the -// associated entities (users or roles) that attempted to access a specific AWS -// service. +// * GetServiceLastAccessedDetailsWithEntities – Use +// this operation for groups and policies to list information about the associated +// entities (users or roles) that attempted to access a specific AWS service. // -// To check the status of the GenerateServiceLastAccessedDetails request, -// use the JobId parameter in the same operations and test the JobStatus response +// To +// check the status of the GenerateServiceLastAccessedDetails request, use the +// JobId parameter in the same operations and test the JobStatus response // parameter. For additional information about the permissions policies that allow // an identity (user, group, or role) to access specific services, use the // ListPoliciesGrantingServiceAccess operation. Service last accessed data does not diff --git a/service/iam/api_op_GetContextKeysForCustomPolicy.go b/service/iam/api_op_GetContextKeysForCustomPolicy.go index 5bf693f6e90..af8ca523a7a 100644 --- a/service/iam/api_op_GetContextKeysForCustomPolicy.go +++ b/service/iam/api_op_GetContextKeysForCustomPolicy.go @@ -43,15 +43,15 @@ type GetContextKeysForCustomPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PolicyInputList []*string diff --git a/service/iam/api_op_GetContextKeysForPrincipalPolicy.go b/service/iam/api_op_GetContextKeysForPrincipalPolicy.go index 0582679e4a1..3a5965f4d08 100644 --- a/service/iam/api_op_GetContextKeysForPrincipalPolicy.go +++ b/service/iam/api_op_GetContextKeysForPrincipalPolicy.go @@ -61,15 +61,15 @@ type GetContextKeysForPrincipalPolicyInput struct { // used to validate this parameter is a string of characters consisting of the // following: // - // * Any printable ASCII character ranging from the space character + // * Any printable ASCII character ranging from the space character // (\u0020) through the end of the ASCII character range // - // * The printable + // * The printable // characters in the Basic Latin and Latin-1 Supplement character set (through // \u00FF) // - // * The special characters tab (\u0009), line feed (\u000A), and - // carriage return (\u000D) + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) PolicyInputList []*string } diff --git a/service/iam/api_op_GetServiceLastAccessedDetails.go b/service/iam/api_op_GetServiceLastAccessedDetails.go index 5ebdf675913..5af798f127e 100644 --- a/service/iam/api_op_GetServiceLastAccessedDetails.go +++ b/service/iam/api_op_GetServiceLastAccessedDetails.go @@ -34,22 +34,22 @@ import ( // the date and time of the last attempt. It also returns the ARN of the following // entity, depending on the resource ARN that you used to generate the report: // +// * +// User – Returns the user ARN that you used to generate the report // -// * User – Returns the user ARN that you used to generate the report -// -// * Group -// – Returns the ARN of the group member (user) that last attempted to access the +// * Group – +// Returns the ARN of the group member (user) that last attempted to access the // service // -// * Role – Returns the role ARN that you used to generate the -// report +// * Role – Returns the role ARN that you used to generate the report // -// * Policy – Returns the ARN of the user or role that last used the -// policy to attempt to access the service +// * +// Policy – Returns the ARN of the user or role that last used the policy to +// attempt to access the service // -// By default, the list is sorted by -// service namespace. If you specified ACTION_LEVEL granularity when you generated -// the report, this operation returns service and action last accessed data. This +// By default, the list is sorted by service +// namespace. If you specified ACTION_LEVEL granularity when you generated the +// report, this operation returns service and action last accessed data. This // includes the most recent access attempt for each tracked action within a // service. Otherwise, this operation returns only service data. For more // information about service and action last accessed data, see Reducing diff --git a/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go b/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go index ae016a16418..41d314086a7 100644 --- a/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go +++ b/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go @@ -18,19 +18,19 @@ import ( // of your report job and a list of entities that could have used group or policy // permissions to access the specified service. // -// * Group – For a group report, -// this operation returns a list of users in the group that could have used the -// group’s policies in an attempt to access the service. +// * Group – For a group report, this +// operation returns a list of users in the group that could have used the group’s +// policies in an attempt to access the service. // -// * Policy – For a -// policy report, this operation returns a list of entities (users or roles) that -// could have used the policy in an attempt to access the service. +// * Policy – For a policy report, +// this operation returns a list of entities (users or roles) that could have used +// the policy in an attempt to access the service. // -// You can also -// use this operation for user or role reports to retrieve details about those -// entities. If the operation fails, the GetServiceLastAccessedDetailsWithEntities -// operation returns the reason that it failed. By default, the list of associated -// entities is sorted by date, with the most recent access listed first. +// You can also use this operation +// for user or role reports to retrieve details about those entities. If the +// operation fails, the GetServiceLastAccessedDetailsWithEntities operation returns +// the reason that it failed. By default, the list of associated entities is sorted +// by date, with the most recent access listed first. func (c *Client) GetServiceLastAccessedDetailsWithEntities(ctx context.Context, params *GetServiceLastAccessedDetailsWithEntitiesInput, optFns ...func(*Options)) (*GetServiceLastAccessedDetailsWithEntitiesOutput, error) { if params == nil { params = &GetServiceLastAccessedDetailsWithEntitiesInput{} diff --git a/service/iam/api_op_ListPoliciesGrantingServiceAccess.go b/service/iam/api_op_ListPoliciesGrantingServiceAccess.go index 559625e5086..b984add2d1f 100644 --- a/service/iam/api_op_ListPoliciesGrantingServiceAccess.go +++ b/service/iam/api_op_ListPoliciesGrantingServiceAccess.go @@ -22,24 +22,23 @@ import ( // in the IAM User Guide. The list of policies returned by the operation depends on // the ARN of the identity that you provide. // -// * User – The list of policies +// * User – The list of policies // includes the managed and inline policies that are attached to the user directly. // The list also includes any additional managed and inline policies that are // attached to the group to which the user belongs. // -// * Group – The list of -// policies includes only the managed and inline policies that are attached to the -// group directly. Policies that are attached to the group’s user are not -// included. +// * Group – The list of policies +// includes only the managed and inline policies that are attached to the group +// directly. Policies that are attached to the group’s user are not included. // -// * Role – The list of policies includes only the managed and -// inline policies that are attached to the role. +// * +// Role – The list of policies includes only the managed and inline policies that +// are attached to the role. // -// For each managed policy, this -// operation returns the ARN and policy name. For each inline policy, it returns -// the policy name and the entity to which it is attached. Inline policies do not -// have an ARN. For more information about these policy types, see Managed Policies -// and Inline Policies +// For each managed policy, this operation returns the +// ARN and policy name. For each inline policy, it returns the policy name and the +// entity to which it is attached. Inline policies do not have an ARN. For more +// information about these policy types, see Managed Policies and Inline Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) // in the IAM User Guide. Policies that are attached to users and roles as // permissions boundaries are not returned. To view which managed policy is diff --git a/service/iam/api_op_PutGroupPolicy.go b/service/iam/api_op_PutGroupPolicy.go index ba195fa7c99..8551a5cd080 100644 --- a/service/iam/api_op_PutGroupPolicy.go +++ b/service/iam/api_op_PutGroupPolicy.go @@ -56,15 +56,15 @@ type PutGroupPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PolicyDocument *string diff --git a/service/iam/api_op_PutRolePolicy.go b/service/iam/api_op_PutRolePolicy.go index f6bcd16d650..b3787037ac1 100644 --- a/service/iam/api_op_PutRolePolicy.go +++ b/service/iam/api_op_PutRolePolicy.go @@ -53,15 +53,15 @@ type PutRolePolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PolicyDocument *string diff --git a/service/iam/api_op_PutUserPolicy.go b/service/iam/api_op_PutUserPolicy.go index a0bf5e73ce2..acb0bce4193 100644 --- a/service/iam/api_op_PutUserPolicy.go +++ b/service/iam/api_op_PutUserPolicy.go @@ -48,15 +48,15 @@ type PutUserPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PolicyDocument *string diff --git a/service/iam/api_op_SimulateCustomPolicy.go b/service/iam/api_op_SimulateCustomPolicy.go index 605c17adaa6..a799a04c1d5 100644 --- a/service/iam/api_op_SimulateCustomPolicy.go +++ b/service/iam/api_op_SimulateCustomPolicy.go @@ -62,15 +62,15 @@ type SimulateCustomPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PolicyInputList []*string @@ -113,14 +113,14 @@ type SimulateCustomPolicyInput struct { // pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a // string of characters consisting of the following: // - // * Any printable ASCII + // * Any printable ASCII // character ranging from the space character (\u0020) through the end of the ASCII // character range // - // * The printable characters in the Basic Latin and Latin-1 + // * The printable characters in the Basic Latin and Latin-1 // Supplement character set (through \u00FF) // - // * The special characters tab + // * The special characters tab // (\u0009), line feed (\u000A), and carriage return (\u000D) PermissionsBoundaryPolicyInputList []*string @@ -155,23 +155,23 @@ type SimulateCustomPolicyInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) // in the Amazon EC2 User Guide. // - // * EC2-Classic-InstanceStore instance, image, + // * EC2-Classic-InstanceStore instance, image, // security-group // - // * EC2-Classic-EBS instance, image, security-group, volume + // * EC2-Classic-EBS instance, image, security-group, volume // + // * + // EC2-VPC-InstanceStore instance, image, security-group, network-interface // - // * EC2-VPC-InstanceStore instance, image, security-group, network-interface + // * + // EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, + // subnet // + // * EC2-VPC-EBS instance, image, security-group, network-interface, + // volume // - // * EC2-VPC-InstanceStore-Subnet instance, image, security-group, - // network-interface, subnet - // - // * EC2-VPC-EBS instance, image, security-group, - // network-interface, volume - // - // * EC2-VPC-EBS-Subnet instance, image, - // security-group, network-interface, subnet, volume + // * EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, + // subnet, volume ResourceHandlingOption *string // An ARN representing the AWS account ID that specifies the owner of any simulated @@ -194,15 +194,15 @@ type SimulateCustomPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) ResourcePolicy *string } diff --git a/service/iam/api_op_SimulatePrincipalPolicy.go b/service/iam/api_op_SimulatePrincipalPolicy.go index f57db306f20..305fb652b34 100644 --- a/service/iam/api_op_SimulatePrincipalPolicy.go +++ b/service/iam/api_op_SimulatePrincipalPolicy.go @@ -118,32 +118,32 @@ type SimulatePrincipalPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) PermissionsBoundaryPolicyInputList []*string // An optional list of additional policy documents to include in the simulation. // Each document is specified as a string containing the complete, valid JSON text // of an IAM policy. The regex pattern (http://wikipedia.org/wiki/regex) used to - // validate this parameter is a string of characters consisting of the following: + // validate this parameter is a string of characters consisting of the + // following: // + // * Any printable ASCII character ranging from the space character + // (\u0020) through the end of the ASCII character range // - // * Any printable ASCII character ranging from the space character (\u0020) - // through the end of the ASCII character range + // * The printable + // characters in the Basic Latin and Latin-1 Supplement character set (through + // \u00FF) // - // * The printable characters in - // the Basic Latin and Latin-1 Supplement character set (through \u00FF) - // - // * The - // special characters tab (\u0009), line feed (\u000A), and carriage return - // (\u000D) + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) PolicyInputList []*string // A list of ARNs of AWS resources to include in the simulation. If this parameter @@ -175,23 +175,23 @@ type SimulatePrincipalPolicyInput struct { // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) // in the Amazon EC2 User Guide. // - // * EC2-Classic-InstanceStore instance, image, + // * EC2-Classic-InstanceStore instance, image, // security group // - // * EC2-Classic-EBS instance, image, security group, volume - // - // - // * EC2-VPC-InstanceStore instance, image, security group, network interface + // * EC2-Classic-EBS instance, image, security group, volume // + // * + // EC2-VPC-InstanceStore instance, image, security group, network interface // - // * EC2-VPC-InstanceStore-Subnet instance, image, security group, network - // interface, subnet + // * + // EC2-VPC-InstanceStore-Subnet instance, image, security group, network interface, + // subnet // - // * EC2-VPC-EBS instance, image, security group, network - // interface, volume + // * EC2-VPC-EBS instance, image, security group, network interface, + // volume // - // * EC2-VPC-EBS-Subnet instance, image, security group, - // network interface, subnet, volume + // * EC2-VPC-EBS-Subnet instance, image, security group, network interface, + // subnet, volume ResourceHandlingOption *string // An AWS account ID that specifies the owner of any simulated resource that does @@ -211,15 +211,15 @@ type SimulatePrincipalPolicyInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) ResourcePolicy *string } diff --git a/service/iam/api_op_TagRole.go b/service/iam/api_op_TagRole.go index 38a332d1aab..fb7019a39cb 100644 --- a/service/iam/api_op_TagRole.go +++ b/service/iam/api_op_TagRole.go @@ -17,36 +17,35 @@ import ( // associated value. By assigning tags to your resources, you can do the // following: // -// * Administrative grouping and discovery - Attach tags to -// resources to aid in organization and search. For example, you could search for -// all resources with the key name Project and the value MyImportantProject. Or -// search for all resources with the key name Cost Center and the value 41200. +// * Administrative grouping and discovery - Attach tags to resources +// to aid in organization and search. For example, you could search for all +// resources with the key name Project and the value MyImportantProject. Or search +// for all resources with the key name Cost Center and the value 41200. // -// -// * Access control - Reference tags in IAM user-based and resource-based policies. -// You can use tags to restrict access to only an IAM user or role that has a -// specified tag attached. You can also restrict access to only those resources -// that have a certain tag attached. For examples of policies that show how to use -// tags to control access, see Control Access Using IAM Tags +// * Access +// control - Reference tags in IAM user-based and resource-based policies. You can +// use tags to restrict access to only an IAM user or role that has a specified tag +// attached. You can also restrict access to only those resources that have a +// certain tag attached. For examples of policies that show how to use tags to +// control access, see Control Access Using IAM Tags // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) in the IAM // User Guide. // -// * Cost allocation - Use tags to help track which individuals -// and teams are using which AWS resources. +// * Cost allocation - Use tags to help track which individuals and +// teams are using which AWS resources. // -// * Make sure that you have no -// invalid tags and that you do not exceed the allowed number of tags per role. In -// either case, the entire request fails and no tags are added to the role. +// * Make sure that you have no invalid tags +// and that you do not exceed the allowed number of tags per role. In either case, +// the entire request fails and no tags are added to the role. // -// * -// AWS always interprets the tag Value as a single string. If you need to store an -// array, you can store comma-separated values in the string. However, you must -// interpret the value in your code. +// * AWS always +// interprets the tag Value as a single string. If you need to store an array, you +// can store comma-separated values in the string. However, you must interpret the +// value in your code. // -// For more information about tagging, see -// Tagging IAM Identities -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User -// Guide. +// For more information about tagging, see Tagging IAM +// Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in +// the IAM User Guide. func (c *Client) TagRole(ctx context.Context, params *TagRoleInput, optFns ...func(*Options)) (*TagRoleOutput, error) { if params == nil { params = &TagRoleInput{} diff --git a/service/iam/api_op_TagUser.go b/service/iam/api_op_TagUser.go index bc018afa6d4..3c6cd7d73c6 100644 --- a/service/iam/api_op_TagUser.go +++ b/service/iam/api_op_TagUser.go @@ -16,13 +16,13 @@ import ( // name and an associated value. By assigning tags to your resources, you can do // the following: // -// * Administrative grouping and discovery - Attach tags to +// * Administrative grouping and discovery - Attach tags to // resources to aid in organization and search. For example, you could search for // all resources with the key name Project and the value MyImportantProject. Or // search for all resources with the key name Cost Center and the value 41200. // -// -// * Access control - Reference tags in IAM user-based and resource-based policies. +// * +// Access control - Reference tags in IAM user-based and resource-based policies. // You can use tags to restrict access to only an IAM requesting user or to a role // that has a specified tag attached. You can also restrict access to only those // resources that have a certain tag attached. For examples of policies that show @@ -30,22 +30,21 @@ import ( // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) in the IAM // User Guide. // -// * Cost allocation - Use tags to help track which individuals -// and teams are using which AWS resources. +// * Cost allocation - Use tags to help track which individuals and +// teams are using which AWS resources. // -// * Make sure that you have no -// invalid tags and that you do not exceed the allowed number of tags per role. In -// either case, the entire request fails and no tags are added to the role. +// * Make sure that you have no invalid tags +// and that you do not exceed the allowed number of tags per role. In either case, +// the entire request fails and no tags are added to the role. // -// * -// AWS always interprets the tag Value as a single string. If you need to store an -// array, you can store comma-separated values in the string. However, you must -// interpret the value in your code. +// * AWS always +// interprets the tag Value as a single string. If you need to store an array, you +// can store comma-separated values in the string. However, you must interpret the +// value in your code. // -// For more information about tagging, see -// Tagging IAM Identities -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the IAM User -// Guide. +// For more information about tagging, see Tagging IAM +// Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in +// the IAM User Guide. func (c *Client) TagUser(ctx context.Context, params *TagUserInput, optFns ...func(*Options)) (*TagUserOutput, error) { if params == nil { params = &TagUserInput{} diff --git a/service/iam/api_op_UpdateAccountPasswordPolicy.go b/service/iam/api_op_UpdateAccountPasswordPolicy.go index 30de85a1122..d7503bd2b77 100644 --- a/service/iam/api_op_UpdateAccountPasswordPolicy.go +++ b/service/iam/api_op_UpdateAccountPasswordPolicy.go @@ -12,8 +12,8 @@ import ( // Updates the password policy settings for the AWS account. // -// * This operation -// does not support partial updates. No parameters are required, but if you do not +// * This operation does +// not support partial updates. No parameters are required, but if you do not // specify a parameter, that parameter's value reverts to its default value. See // the Request Parameters section for each parameter's default value. Also note // that some parameters do not allow the default parameter to be explicitly set. diff --git a/service/iam/api_op_UpdateAssumeRolePolicy.go b/service/iam/api_op_UpdateAssumeRolePolicy.go index bc59b2222ac..6ac605d13f8 100644 --- a/service/iam/api_op_UpdateAssumeRolePolicy.go +++ b/service/iam/api_op_UpdateAssumeRolePolicy.go @@ -38,15 +38,15 @@ type UpdateAssumeRolePolicyInput struct { // to IAM. The regex pattern (http://wikipedia.org/wiki/regex) used to validate // this parameter is a string of characters consisting of the following: // - // * Any + // * Any // printable ASCII character ranging from the space character (\u0020) through the // end of the ASCII character range // - // * The printable characters in the Basic - // Latin and Latin-1 Supplement character set (through \u00FF) + // * The printable characters in the Basic Latin + // and Latin-1 Supplement character set (through \u00FF) // - // * The special - // characters tab (\u0009), line feed (\u000A), and carriage return (\u000D) + // * The special characters + // tab (\u0009), line feed (\u000A), and carriage return (\u000D) // // This member is required. PolicyDocument *string diff --git a/service/iam/api_op_UpdateLoginProfile.go b/service/iam/api_op_UpdateLoginProfile.go index d61606abe7f..c11f1b70053 100644 --- a/service/iam/api_op_UpdateLoginProfile.go +++ b/service/iam/api_op_UpdateLoginProfile.go @@ -44,15 +44,15 @@ type UpdateLoginProfileInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // However, the format can be further // restricted by the account administrator by setting a password policy on the AWS diff --git a/service/iam/api_op_UploadSSHPublicKey.go b/service/iam/api_op_UploadSSHPublicKey.go index e122395c8e8..6bdff7d878e 100644 --- a/service/iam/api_op_UploadSSHPublicKey.go +++ b/service/iam/api_op_UploadSSHPublicKey.go @@ -41,15 +41,15 @@ type UploadSSHPublicKeyInput struct { // regex pattern (http://wikipedia.org/wiki/regex) used to validate this parameter // is a string of characters consisting of the following: // - // * Any printable - // ASCII character ranging from the space character (\u0020) through the end of the - // ASCII character range + // * Any printable ASCII + // character ranging from the space character (\u0020) through the end of the ASCII + // character range // - // * The printable characters in the Basic Latin and - // Latin-1 Supplement character set (through \u00FF) + // * The printable characters in the Basic Latin and Latin-1 + // Supplement character set (through \u00FF) // - // * The special characters - // tab (\u0009), line feed (\u000A), and carriage return (\u000D) + // * The special characters tab + // (\u0009), line feed (\u000A), and carriage return (\u000D) // // This member is required. SSHPublicKeyBody *string diff --git a/service/iam/api_op_UploadServerCertificate.go b/service/iam/api_op_UploadServerCertificate.go index 369d2d292b8..b4d9c307ca8 100644 --- a/service/iam/api_op_UploadServerCertificate.go +++ b/service/iam/api_op_UploadServerCertificate.go @@ -57,14 +57,14 @@ type UploadServerCertificateInput struct { // pattern (http://wikipedia.org/wiki/regex) used to validate this parameter is a // string of characters consisting of the following: // - // * Any printable ASCII + // * Any printable ASCII // character ranging from the space character (\u0020) through the end of the ASCII // character range // - // * The printable characters in the Basic Latin and Latin-1 + // * The printable characters in the Basic Latin and Latin-1 // Supplement character set (through \u00FF) // - // * The special characters tab + // * The special characters tab // (\u0009), line feed (\u000A), and carriage return (\u000D) // // This member is required. @@ -74,15 +74,15 @@ type UploadServerCertificateInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. PrivateKey *string @@ -101,15 +101,15 @@ type UploadServerCertificateInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) CertificateChain *string // The path for the server certificate. For more information about paths, see IAM diff --git a/service/iam/api_op_UploadSigningCertificate.go b/service/iam/api_op_UploadSigningCertificate.go index 6255604c426..4916bdacbbe 100644 --- a/service/iam/api_op_UploadSigningCertificate.go +++ b/service/iam/api_op_UploadSigningCertificate.go @@ -48,15 +48,15 @@ type UploadSigningCertificateInput struct { // (http://wikipedia.org/wiki/regex) used to validate this parameter is a string of // characters consisting of the following: // - // * Any printable ASCII character - // ranging from the space character (\u0020) through the end of the ASCII character + // * Any printable ASCII character ranging + // from the space character (\u0020) through the end of the ASCII character // range // - // * The printable characters in the Basic Latin and Latin-1 Supplement + // * The printable characters in the Basic Latin and Latin-1 Supplement // character set (through \u00FF) // - // * The special characters tab (\u0009), line - // feed (\u000A), and carriage return (\u000D) + // * The special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) // // This member is required. CertificateBody *string diff --git a/service/iam/types/enums.go b/service/iam/types/enums.go index a993c4d1ded..e7cb1c47b35 100644 --- a/service/iam/types/enums.go +++ b/service/iam/types/enums.go @@ -6,8 +6,8 @@ type AccessAdvisorUsageGranularityType string // Enum values for AccessAdvisorUsageGranularityType const ( - AccessAdvisorUsageGranularityTypeService_level AccessAdvisorUsageGranularityType = "SERVICE_LEVEL" - AccessAdvisorUsageGranularityTypeAction_level AccessAdvisorUsageGranularityType = "ACTION_LEVEL" + AccessAdvisorUsageGranularityTypeServiceLevel AccessAdvisorUsageGranularityType = "SERVICE_LEVEL" + AccessAdvisorUsageGranularityTypeActionLevel AccessAdvisorUsageGranularityType = "ACTION_LEVEL" ) // Values returns all known values for AccessAdvisorUsageGranularityType. Note that @@ -45,18 +45,18 @@ type ContextKeyTypeEnum string // Enum values for ContextKeyTypeEnum const ( - ContextKeyTypeEnumString ContextKeyTypeEnum = "string" - ContextKeyTypeEnumString_list ContextKeyTypeEnum = "stringList" - ContextKeyTypeEnumNumeric ContextKeyTypeEnum = "numeric" - ContextKeyTypeEnumNumeric_list ContextKeyTypeEnum = "numericList" - ContextKeyTypeEnumBoolean ContextKeyTypeEnum = "boolean" - ContextKeyTypeEnumBoolean_list ContextKeyTypeEnum = "booleanList" - ContextKeyTypeEnumIp ContextKeyTypeEnum = "ip" - ContextKeyTypeEnumIp_list ContextKeyTypeEnum = "ipList" - ContextKeyTypeEnumBinary ContextKeyTypeEnum = "binary" - ContextKeyTypeEnumBinary_list ContextKeyTypeEnum = "binaryList" - ContextKeyTypeEnumDate ContextKeyTypeEnum = "date" - ContextKeyTypeEnumDate_list ContextKeyTypeEnum = "dateList" + ContextKeyTypeEnumString ContextKeyTypeEnum = "string" + ContextKeyTypeEnumStringList ContextKeyTypeEnum = "stringList" + ContextKeyTypeEnumNumeric ContextKeyTypeEnum = "numeric" + ContextKeyTypeEnumNumericList ContextKeyTypeEnum = "numericList" + ContextKeyTypeEnumBoolean ContextKeyTypeEnum = "boolean" + ContextKeyTypeEnumBooleanList ContextKeyTypeEnum = "booleanList" + ContextKeyTypeEnumIp ContextKeyTypeEnum = "ip" + ContextKeyTypeEnumIpList ContextKeyTypeEnum = "ipList" + ContextKeyTypeEnumBinary ContextKeyTypeEnum = "binary" + ContextKeyTypeEnumBinaryList ContextKeyTypeEnum = "binaryList" + ContextKeyTypeEnumDate ContextKeyTypeEnum = "date" + ContextKeyTypeEnumDateList ContextKeyTypeEnum = "dateList" ) // Values returns all known values for ContextKeyTypeEnum. Note that this can be @@ -83,10 +83,10 @@ type DeletionTaskStatusType string // Enum values for DeletionTaskStatusType const ( - DeletionTaskStatusTypeSucceeded DeletionTaskStatusType = "SUCCEEDED" - DeletionTaskStatusTypeIn_progress DeletionTaskStatusType = "IN_PROGRESS" - DeletionTaskStatusTypeFailed DeletionTaskStatusType = "FAILED" - DeletionTaskStatusTypeNot_started DeletionTaskStatusType = "NOT_STARTED" + DeletionTaskStatusTypeSucceeded DeletionTaskStatusType = "SUCCEEDED" + DeletionTaskStatusTypeInProgress DeletionTaskStatusType = "IN_PROGRESS" + DeletionTaskStatusTypeFailed DeletionTaskStatusType = "FAILED" + DeletionTaskStatusTypeNotStarted DeletionTaskStatusType = "NOT_STARTED" ) // Values returns all known values for DeletionTaskStatusType. Note that this can @@ -165,9 +165,9 @@ type JobStatusType string // Enum values for JobStatusType const ( - JobStatusTypeIn_progress JobStatusType = "IN_PROGRESS" - JobStatusTypeCompleted JobStatusType = "COMPLETED" - JobStatusTypeFailed JobStatusType = "FAILED" + JobStatusTypeInProgress JobStatusType = "IN_PROGRESS" + JobStatusTypeCompleted JobStatusType = "COMPLETED" + JobStatusTypeFailed JobStatusType = "FAILED" ) // Values returns all known values for JobStatusType. Note that this can be @@ -202,9 +202,9 @@ type PolicyEvaluationDecisionType string // Enum values for PolicyEvaluationDecisionType const ( - PolicyEvaluationDecisionTypeAllowed PolicyEvaluationDecisionType = "allowed" - PolicyEvaluationDecisionTypeExplicit_deny PolicyEvaluationDecisionType = "explicitDeny" - PolicyEvaluationDecisionTypeImplicit_deny PolicyEvaluationDecisionType = "implicitDeny" + PolicyEvaluationDecisionTypeAllowed PolicyEvaluationDecisionType = "allowed" + PolicyEvaluationDecisionTypeExplicitDeny PolicyEvaluationDecisionType = "explicitDeny" + PolicyEvaluationDecisionTypeImplicitDeny PolicyEvaluationDecisionType = "implicitDeny" ) // Values returns all known values for PolicyEvaluationDecisionType. Note that this @@ -262,13 +262,13 @@ type PolicySourceType string // Enum values for PolicySourceType const ( - PolicySourceTypeUser PolicySourceType = "user" - PolicySourceTypeGroup PolicySourceType = "group" - PolicySourceTypeRole PolicySourceType = "role" - PolicySourceTypeAws_managed PolicySourceType = "aws-managed" - PolicySourceTypeUser_managed PolicySourceType = "user-managed" - PolicySourceTypeResource PolicySourceType = "resource" - PolicySourceTypeNone PolicySourceType = "none" + PolicySourceTypeUser PolicySourceType = "user" + PolicySourceTypeGroup PolicySourceType = "group" + PolicySourceTypeRole PolicySourceType = "role" + PolicySourceTypeAwsManaged PolicySourceType = "aws-managed" + PolicySourceTypeUserManaged PolicySourceType = "user-managed" + PolicySourceTypeResource PolicySourceType = "resource" + PolicySourceTypeNone PolicySourceType = "none" ) // Values returns all known values for PolicySourceType. Note that this can be @@ -362,10 +362,10 @@ type SortKeyType string // Enum values for SortKeyType const ( - SortKeyTypeService_namespace_ascending SortKeyType = "SERVICE_NAMESPACE_ASCENDING" - SortKeyTypeService_namespace_descending SortKeyType = "SERVICE_NAMESPACE_DESCENDING" - SortKeyTypeLast_authenticated_time_ascending SortKeyType = "LAST_AUTHENTICATED_TIME_ASCENDING" - SortKeyTypeLast_authenticated_time_descending SortKeyType = "LAST_AUTHENTICATED_TIME_DESCENDING" + SortKeyTypeServiceNamespaceAscending SortKeyType = "SERVICE_NAMESPACE_ASCENDING" + SortKeyTypeServiceNamespaceDescending SortKeyType = "SERVICE_NAMESPACE_DESCENDING" + SortKeyTypeLastAuthenticatedTimeAscending SortKeyType = "LAST_AUTHENTICATED_TIME_ASCENDING" + SortKeyTypeLastAuthenticatedTimeDescending SortKeyType = "LAST_AUTHENTICATED_TIME_DESCENDING" ) // Values returns all known values for SortKeyType. Note that this can be expanded diff --git a/service/iam/types/types.go b/service/iam/types/types.go index c9da4a5553b..87ec671f3c4 100644 --- a/service/iam/types/types.go +++ b/service/iam/types/types.go @@ -101,14 +101,13 @@ type AccessKeyLastUsed struct { // (http://www.iso.org/iso/iso8601), when the access key was most recently used. // This field is null in the following situations: // - // * The user does not have an + // * The user does not have an // access key. // - // * An access key exists but has not been used since IAM began + // * An access key exists but has not been used since IAM began // tracking this information. // - // * There is no sign-in data associated with the - // user. + // * There is no sign-in data associated with the user. // // This member is required. LastUsedDate *time.Time @@ -116,16 +115,16 @@ type AccessKeyLastUsed struct { // The AWS Region where this access key was most recently used. The value for this // field is "N/A" in the following situations: // - // * The user does not have an - // access key. + // * The user does not have an access + // key. // - // * An access key exists but has not been used since IAM began - // tracking this information. + // * An access key exists but has not been used since IAM began tracking this + // information. // - // * There is no sign-in data associated with the - // user. + // * There is no sign-in data associated with the user. // - // For more information about AWS Regions, see Regions and Endpoints + // For more + // information about AWS Regions, see Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html) in the Amazon Web // Services General Reference. // @@ -135,14 +134,14 @@ type AccessKeyLastUsed struct { // The name of the AWS service with which this access key was most recently used. // The value of this field is "N/A" in the following situations: // - // * The user - // does not have an access key. + // * The user does + // not have an access key. // - // * An access key exists but has not been used - // since IAM started tracking this information. + // * An access key exists but has not been used since IAM + // started tracking this information. // - // * There is no sign-in data - // associated with the user. + // * There is no sign-in data associated with + // the user. // // This member is required. ServiceName *string @@ -379,12 +378,12 @@ type EvaluationResult struct { // Contains information about an IAM group entity. This data type is used as a // response element in the following operations: // -// * CreateGroup +// * CreateGroup // -// * -// GetGroup +// * GetGroup // -// * ListGroups +// * +// ListGroups type Group struct { // The Amazon Resource Name (ARN) specifying the group. For more information about @@ -462,15 +461,14 @@ type GroupDetail struct { // Contains information about an instance profile. This data type is used as a // response element in the following operations: // -// * CreateInstanceProfile +// * CreateInstanceProfile // +// * +// GetInstanceProfile // -// * GetInstanceProfile +// * ListInstanceProfiles // -// * ListInstanceProfiles -// -// * -// ListInstanceProfilesForRole +// * ListInstanceProfilesForRole type InstanceProfile struct { // The Amazon Resource Name (ARN) specifying the instance profile. For more @@ -1540,12 +1538,12 @@ type TrackedActionLastAccessed struct { // Contains information about an IAM user entity. This data type is used as a // response element in the following operations: // -// * CreateUser -// -// * GetUser +// * CreateUser // +// * GetUser // -// * ListUsers +// * +// ListUsers type User struct { // The Amazon Resource Name (ARN) that identifies the user. For more information @@ -1592,16 +1590,16 @@ type User struct { // value), then it indicates that they never signed in with a password. This can be // because: // - // * The user never had a password. + // * The user never had a password. // - // * A password exists but has - // not been used since IAM started tracking this information on October 20, - // 2014. + // * A password exists but has not been + // used since IAM started tracking this information on October 20, 2014. // - // A null value does not mean that the user never had a password. Also, if - // the user does not currently have a password but had one in the past, then this - // field contains the date and time the most recent password was used. This value - // is returned only in the GetUser and ListUsers operations. + // A null + // value does not mean that the user never had a password. Also, if the user does + // not currently have a password but had one in the past, then this field contains + // the date and time the most recent password was used. This value is returned only + // in the GetUser and ListUsers operations. PasswordLastUsed *time.Time // The ARN of the policy used to set the permissions boundary for the user. For diff --git a/service/identitystore/types/enums.go b/service/identitystore/types/enums.go index ae9c949ba4d..4579ede05df 100644 --- a/service/identitystore/types/enums.go +++ b/service/identitystore/types/enums.go @@ -6,9 +6,9 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeGroup ResourceType = "GROUP" - ResourceTypeUser ResourceType = "USER" - ResourceTypeIdentity_store ResourceType = "IDENTITY_STORE" + ResourceTypeGroup ResourceType = "GROUP" + ResourceTypeUser ResourceType = "USER" + ResourceTypeIdentityStore ResourceType = "IDENTITY_STORE" ) // Values returns all known values for ResourceType. Note that this can be expanded diff --git a/service/imagebuilder/api_op_ListDistributionConfigurations.go b/service/imagebuilder/api_op_ListDistributionConfigurations.go index a640db966e8..5553ca2fa04 100644 --- a/service/imagebuilder/api_op_ListDistributionConfigurations.go +++ b/service/imagebuilder/api_op_ListDistributionConfigurations.go @@ -31,7 +31,7 @@ type ListDistributionConfigurationsInput struct { // The filters. // - // * name - The name of this distribution configuration. + // * name - The name of this distribution configuration. Filters []*types.Filter // The maximum items to return in a request. diff --git a/service/imagebuilder/types/enums.go b/service/imagebuilder/types/enums.go index a85e2347d7a..9fd4463b1fa 100644 --- a/service/imagebuilder/types/enums.go +++ b/service/imagebuilder/types/enums.go @@ -122,8 +122,8 @@ type PipelineExecutionStartCondition string // Enum values for PipelineExecutionStartCondition const ( - PipelineExecutionStartConditionExpression_match_only PipelineExecutionStartCondition = "EXPRESSION_MATCH_ONLY" - PipelineExecutionStartConditionExpression_match_and_dependency_updates_available PipelineExecutionStartCondition = "EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE" + PipelineExecutionStartConditionExpressionMatchOnly PipelineExecutionStartCondition = "EXPRESSION_MATCH_ONLY" + PipelineExecutionStartConditionExpressionMatchAndDependencyUpdatesAvailable PipelineExecutionStartCondition = "EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE" ) // Values returns all known values for PipelineExecutionStartCondition. Note that diff --git a/service/inspector/types/enums.go b/service/inspector/types/enums.go index decd6e62c87..3e2951ded53 100644 --- a/service/inspector/types/enums.go +++ b/service/inspector/types/enums.go @@ -6,14 +6,14 @@ type AccessDeniedErrorCode string // Enum values for AccessDeniedErrorCode const ( - AccessDeniedErrorCodeAccess_denied_to_assessment_target AccessDeniedErrorCode = "ACCESS_DENIED_TO_ASSESSMENT_TARGET" - AccessDeniedErrorCodeAccess_denied_to_assessment_template AccessDeniedErrorCode = "ACCESS_DENIED_TO_ASSESSMENT_TEMPLATE" - AccessDeniedErrorCodeAccess_denied_to_assessment_run AccessDeniedErrorCode = "ACCESS_DENIED_TO_ASSESSMENT_RUN" - AccessDeniedErrorCodeAccess_denied_to_finding AccessDeniedErrorCode = "ACCESS_DENIED_TO_FINDING" - AccessDeniedErrorCodeAccess_denied_to_resource_group AccessDeniedErrorCode = "ACCESS_DENIED_TO_RESOURCE_GROUP" - AccessDeniedErrorCodeAccess_denied_to_rules_package AccessDeniedErrorCode = "ACCESS_DENIED_TO_RULES_PACKAGE" - AccessDeniedErrorCodeAccess_denied_to_sns_topic AccessDeniedErrorCode = "ACCESS_DENIED_TO_SNS_TOPIC" - AccessDeniedErrorCodeAccess_denied_to_iam_role AccessDeniedErrorCode = "ACCESS_DENIED_TO_IAM_ROLE" + AccessDeniedErrorCodeAccessDeniedToAssessmentTarget AccessDeniedErrorCode = "ACCESS_DENIED_TO_ASSESSMENT_TARGET" + AccessDeniedErrorCodeAccessDeniedToAssessmentTemplate AccessDeniedErrorCode = "ACCESS_DENIED_TO_ASSESSMENT_TEMPLATE" + AccessDeniedErrorCodeAccessDeniedToAssessmentRun AccessDeniedErrorCode = "ACCESS_DENIED_TO_ASSESSMENT_RUN" + AccessDeniedErrorCodeAccessDeniedToFinding AccessDeniedErrorCode = "ACCESS_DENIED_TO_FINDING" + AccessDeniedErrorCodeAccessDeniedToResourceGroup AccessDeniedErrorCode = "ACCESS_DENIED_TO_RESOURCE_GROUP" + AccessDeniedErrorCodeAccessDeniedToRulesPackage AccessDeniedErrorCode = "ACCESS_DENIED_TO_RULES_PACKAGE" + AccessDeniedErrorCodeAccessDeniedToSnsTopic AccessDeniedErrorCode = "ACCESS_DENIED_TO_SNS_TOPIC" + AccessDeniedErrorCodeAccessDeniedToIamRole AccessDeniedErrorCode = "ACCESS_DENIED_TO_IAM_ROLE" ) // Values returns all known values for AccessDeniedErrorCode. Note that this can be @@ -82,10 +82,10 @@ type AssessmentRunNotificationSnsStatusCode string // Enum values for AssessmentRunNotificationSnsStatusCode const ( - AssessmentRunNotificationSnsStatusCodeSuccess AssessmentRunNotificationSnsStatusCode = "SUCCESS" - AssessmentRunNotificationSnsStatusCodeTopic_does_not_exist AssessmentRunNotificationSnsStatusCode = "TOPIC_DOES_NOT_EXIST" - AssessmentRunNotificationSnsStatusCodeAccess_denied AssessmentRunNotificationSnsStatusCode = "ACCESS_DENIED" - AssessmentRunNotificationSnsStatusCodeInternal_error AssessmentRunNotificationSnsStatusCode = "INTERNAL_ERROR" + AssessmentRunNotificationSnsStatusCodeSuccess AssessmentRunNotificationSnsStatusCode = "SUCCESS" + AssessmentRunNotificationSnsStatusCodeTopicDoesNotExist AssessmentRunNotificationSnsStatusCode = "TOPIC_DOES_NOT_EXIST" + AssessmentRunNotificationSnsStatusCodeAccessDenied AssessmentRunNotificationSnsStatusCode = "ACCESS_DENIED" + AssessmentRunNotificationSnsStatusCodeInternalError AssessmentRunNotificationSnsStatusCode = "INTERNAL_ERROR" ) // Values returns all known values for AssessmentRunNotificationSnsStatusCode. Note @@ -105,19 +105,19 @@ type AssessmentRunState string // Enum values for AssessmentRunState const ( - AssessmentRunStateCreated AssessmentRunState = "CREATED" - AssessmentRunStateStart_data_collection_pending AssessmentRunState = "START_DATA_COLLECTION_PENDING" - AssessmentRunStateStart_data_collection_in_progress AssessmentRunState = "START_DATA_COLLECTION_IN_PROGRESS" - AssessmentRunStateCollecting_data AssessmentRunState = "COLLECTING_DATA" - AssessmentRunStateStop_data_collection_pending AssessmentRunState = "STOP_DATA_COLLECTION_PENDING" - AssessmentRunStateData_collected AssessmentRunState = "DATA_COLLECTED" - AssessmentRunStateStart_evaluating_rules_pending AssessmentRunState = "START_EVALUATING_RULES_PENDING" - AssessmentRunStateEvaluating_rules AssessmentRunState = "EVALUATING_RULES" - AssessmentRunStateFailed AssessmentRunState = "FAILED" - AssessmentRunStateError AssessmentRunState = "ERROR" - AssessmentRunStateCompleted AssessmentRunState = "COMPLETED" - AssessmentRunStateCompleted_with_errors AssessmentRunState = "COMPLETED_WITH_ERRORS" - AssessmentRunStateCanceled AssessmentRunState = "CANCELED" + AssessmentRunStateCreated AssessmentRunState = "CREATED" + AssessmentRunStateStartDataCollectionPending AssessmentRunState = "START_DATA_COLLECTION_PENDING" + AssessmentRunStateStartDataCollectionInProgress AssessmentRunState = "START_DATA_COLLECTION_IN_PROGRESS" + AssessmentRunStateCollectingData AssessmentRunState = "COLLECTING_DATA" + AssessmentRunStateStopDataCollectionPending AssessmentRunState = "STOP_DATA_COLLECTION_PENDING" + AssessmentRunStateDataCollected AssessmentRunState = "DATA_COLLECTED" + AssessmentRunStateStartEvaluatingRulesPending AssessmentRunState = "START_EVALUATING_RULES_PENDING" + AssessmentRunStateEvaluatingRules AssessmentRunState = "EVALUATING_RULES" + AssessmentRunStateFailed AssessmentRunState = "FAILED" + AssessmentRunStateError AssessmentRunState = "ERROR" + AssessmentRunStateCompleted AssessmentRunState = "COMPLETED" + AssessmentRunStateCompletedWithErrors AssessmentRunState = "COMPLETED_WITH_ERRORS" + AssessmentRunStateCanceled AssessmentRunState = "CANCELED" ) // Values returns all known values for AssessmentRunState. Note that this can be @@ -145,7 +145,7 @@ type AssetType string // Enum values for AssetType const ( - AssetTypeEc2_instance AssetType = "ec2-instance" + AssetTypeEc2Instance AssetType = "ec2-instance" ) // Values returns all known values for AssetType. Note that this can be expanded in @@ -161,12 +161,12 @@ type FailedItemErrorCode string // Enum values for FailedItemErrorCode const ( - FailedItemErrorCodeInvalid_arn FailedItemErrorCode = "INVALID_ARN" - FailedItemErrorCodeDuplicate_arn FailedItemErrorCode = "DUPLICATE_ARN" - FailedItemErrorCodeItem_does_not_exist FailedItemErrorCode = "ITEM_DOES_NOT_EXIST" - FailedItemErrorCodeAccess_denied FailedItemErrorCode = "ACCESS_DENIED" - FailedItemErrorCodeLimit_exceeded FailedItemErrorCode = "LIMIT_EXCEEDED" - FailedItemErrorCodeInternal_error FailedItemErrorCode = "INTERNAL_ERROR" + FailedItemErrorCodeInvalidArn FailedItemErrorCode = "INVALID_ARN" + FailedItemErrorCodeDuplicateArn FailedItemErrorCode = "DUPLICATE_ARN" + FailedItemErrorCodeItemDoesNotExist FailedItemErrorCode = "ITEM_DOES_NOT_EXIST" + FailedItemErrorCodeAccessDenied FailedItemErrorCode = "ACCESS_DENIED" + FailedItemErrorCodeLimitExceeded FailedItemErrorCode = "LIMIT_EXCEEDED" + FailedItemErrorCodeInternalError FailedItemErrorCode = "INTERNAL_ERROR" ) // Values returns all known values for FailedItemErrorCode. Note that this can be @@ -187,11 +187,11 @@ type InspectorEvent string // Enum values for InspectorEvent const ( - InspectorEventAssessment_run_started InspectorEvent = "ASSESSMENT_RUN_STARTED" - InspectorEventAssessment_run_completed InspectorEvent = "ASSESSMENT_RUN_COMPLETED" - InspectorEventAssessment_run_state_changed InspectorEvent = "ASSESSMENT_RUN_STATE_CHANGED" - InspectorEventFinding_reported InspectorEvent = "FINDING_REPORTED" - InspectorEventOther InspectorEvent = "OTHER" + InspectorEventAssessmentRunStarted InspectorEvent = "ASSESSMENT_RUN_STARTED" + InspectorEventAssessmentRunCompleted InspectorEvent = "ASSESSMENT_RUN_COMPLETED" + InspectorEventAssessmentRunStateChanged InspectorEvent = "ASSESSMENT_RUN_STATE_CHANGED" + InspectorEventFindingReported InspectorEvent = "FINDING_REPORTED" + InspectorEventOther InspectorEvent = "OTHER" ) // Values returns all known values for InspectorEvent. Note that this can be @@ -211,8 +211,8 @@ type InvalidCrossAccountRoleErrorCode string // Enum values for InvalidCrossAccountRoleErrorCode const ( - InvalidCrossAccountRoleErrorCodeRole_does_not_exist_or_invalid_trust_relationship InvalidCrossAccountRoleErrorCode = "ROLE_DOES_NOT_EXIST_OR_INVALID_TRUST_RELATIONSHIP" - InvalidCrossAccountRoleErrorCodeRole_does_not_have_correct_policy InvalidCrossAccountRoleErrorCode = "ROLE_DOES_NOT_HAVE_CORRECT_POLICY" + InvalidCrossAccountRoleErrorCodeRoleDoesNotExistOrInvalidTrustRelationship InvalidCrossAccountRoleErrorCode = "ROLE_DOES_NOT_EXIST_OR_INVALID_TRUST_RELATIONSHIP" + InvalidCrossAccountRoleErrorCodeRoleDoesNotHaveCorrectPolicy InvalidCrossAccountRoleErrorCode = "ROLE_DOES_NOT_HAVE_CORRECT_POLICY" ) // Values returns all known values for InvalidCrossAccountRoleErrorCode. Note that @@ -230,60 +230,60 @@ type InvalidInputErrorCode string // Enum values for InvalidInputErrorCode const ( - InvalidInputErrorCodeInvalid_assessment_target_arn InvalidInputErrorCode = "INVALID_ASSESSMENT_TARGET_ARN" - InvalidInputErrorCodeInvalid_assessment_template_arn InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_ARN" - InvalidInputErrorCodeInvalid_assessment_run_arn InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_ARN" - InvalidInputErrorCodeInvalid_finding_arn InvalidInputErrorCode = "INVALID_FINDING_ARN" - InvalidInputErrorCodeInvalid_resource_group_arn InvalidInputErrorCode = "INVALID_RESOURCE_GROUP_ARN" - InvalidInputErrorCodeInvalid_rules_package_arn InvalidInputErrorCode = "INVALID_RULES_PACKAGE_ARN" - InvalidInputErrorCodeInvalid_resource_arn InvalidInputErrorCode = "INVALID_RESOURCE_ARN" - InvalidInputErrorCodeInvalid_sns_topic_arn InvalidInputErrorCode = "INVALID_SNS_TOPIC_ARN" - InvalidInputErrorCodeInvalid_iam_role_arn InvalidInputErrorCode = "INVALID_IAM_ROLE_ARN" - InvalidInputErrorCodeInvalid_assessment_target_name InvalidInputErrorCode = "INVALID_ASSESSMENT_TARGET_NAME" - InvalidInputErrorCodeInvalid_assessment_target_name_pattern InvalidInputErrorCode = "INVALID_ASSESSMENT_TARGET_NAME_PATTERN" - InvalidInputErrorCodeInvalid_assessment_template_name InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_NAME" - InvalidInputErrorCodeInvalid_assessment_template_name_pattern InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_NAME_PATTERN" - InvalidInputErrorCodeInvalid_assessment_template_duration InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_DURATION" - InvalidInputErrorCodeInvalid_assessment_template_duration_range InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_DURATION_RANGE" - InvalidInputErrorCodeInvalid_assessment_run_duration_range InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_DURATION_RANGE" - InvalidInputErrorCodeInvalid_assessment_run_start_time_range InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_START_TIME_RANGE" - InvalidInputErrorCodeInvalid_assessment_run_completion_time_range InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_COMPLETION_TIME_RANGE" - InvalidInputErrorCodeInvalid_assessment_run_state_change_time_range InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_STATE_CHANGE_TIME_RANGE" - InvalidInputErrorCodeInvalid_assessment_run_state InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_STATE" - InvalidInputErrorCodeInvalid_tag InvalidInputErrorCode = "INVALID_TAG" - InvalidInputErrorCodeInvalid_tag_key InvalidInputErrorCode = "INVALID_TAG_KEY" - InvalidInputErrorCodeInvalid_tag_value InvalidInputErrorCode = "INVALID_TAG_VALUE" - InvalidInputErrorCodeInvalid_resource_group_tag_key InvalidInputErrorCode = "INVALID_RESOURCE_GROUP_TAG_KEY" - InvalidInputErrorCodeInvalid_resource_group_tag_value InvalidInputErrorCode = "INVALID_RESOURCE_GROUP_TAG_VALUE" - InvalidInputErrorCodeInvalid_attribute InvalidInputErrorCode = "INVALID_ATTRIBUTE" - InvalidInputErrorCodeInvalid_user_attribute InvalidInputErrorCode = "INVALID_USER_ATTRIBUTE" - InvalidInputErrorCodeInvalid_user_attribute_key InvalidInputErrorCode = "INVALID_USER_ATTRIBUTE_KEY" - InvalidInputErrorCodeInvalid_user_attribute_value InvalidInputErrorCode = "INVALID_USER_ATTRIBUTE_VALUE" - InvalidInputErrorCodeInvalid_pagination_token InvalidInputErrorCode = "INVALID_PAGINATION_TOKEN" - InvalidInputErrorCodeInvalid_max_results InvalidInputErrorCode = "INVALID_MAX_RESULTS" - InvalidInputErrorCodeInvalid_agent_id InvalidInputErrorCode = "INVALID_AGENT_ID" - InvalidInputErrorCodeInvalid_auto_scaling_group InvalidInputErrorCode = "INVALID_AUTO_SCALING_GROUP" - InvalidInputErrorCodeInvalid_rule_name InvalidInputErrorCode = "INVALID_RULE_NAME" - InvalidInputErrorCodeInvalid_severity InvalidInputErrorCode = "INVALID_SEVERITY" - InvalidInputErrorCodeInvalid_locale InvalidInputErrorCode = "INVALID_LOCALE" - InvalidInputErrorCodeInvalid_event InvalidInputErrorCode = "INVALID_EVENT" - InvalidInputErrorCodeAssessment_target_name_already_taken InvalidInputErrorCode = "ASSESSMENT_TARGET_NAME_ALREADY_TAKEN" - InvalidInputErrorCodeAssessment_template_name_already_taken InvalidInputErrorCode = "ASSESSMENT_TEMPLATE_NAME_ALREADY_TAKEN" - InvalidInputErrorCodeInvalid_number_of_assessment_target_arns InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_TARGET_ARNS" - InvalidInputErrorCodeInvalid_number_of_assessment_template_arns InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_TEMPLATE_ARNS" - InvalidInputErrorCodeInvalid_number_of_assessment_run_arns InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_RUN_ARNS" - InvalidInputErrorCodeInvalid_number_of_finding_arns InvalidInputErrorCode = "INVALID_NUMBER_OF_FINDING_ARNS" - InvalidInputErrorCodeInvalid_number_of_resource_group_arns InvalidInputErrorCode = "INVALID_NUMBER_OF_RESOURCE_GROUP_ARNS" - InvalidInputErrorCodeInvalid_number_of_rules_package_arns InvalidInputErrorCode = "INVALID_NUMBER_OF_RULES_PACKAGE_ARNS" - InvalidInputErrorCodeInvalid_number_of_assessment_run_states InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_RUN_STATES" - InvalidInputErrorCodeInvalid_number_of_tags InvalidInputErrorCode = "INVALID_NUMBER_OF_TAGS" - InvalidInputErrorCodeInvalid_number_of_resource_group_tags InvalidInputErrorCode = "INVALID_NUMBER_OF_RESOURCE_GROUP_TAGS" - InvalidInputErrorCodeInvalid_number_of_attributes InvalidInputErrorCode = "INVALID_NUMBER_OF_ATTRIBUTES" - InvalidInputErrorCodeInvalid_number_of_user_attributes InvalidInputErrorCode = "INVALID_NUMBER_OF_USER_ATTRIBUTES" - InvalidInputErrorCodeInvalid_number_of_agent_ids InvalidInputErrorCode = "INVALID_NUMBER_OF_AGENT_IDS" - InvalidInputErrorCodeInvalid_number_of_auto_scaling_groups InvalidInputErrorCode = "INVALID_NUMBER_OF_AUTO_SCALING_GROUPS" - InvalidInputErrorCodeInvalid_number_of_rule_names InvalidInputErrorCode = "INVALID_NUMBER_OF_RULE_NAMES" - InvalidInputErrorCodeInvalid_number_of_severities InvalidInputErrorCode = "INVALID_NUMBER_OF_SEVERITIES" + InvalidInputErrorCodeInvalidAssessmentTargetArn InvalidInputErrorCode = "INVALID_ASSESSMENT_TARGET_ARN" + InvalidInputErrorCodeInvalidAssessmentTemplateArn InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_ARN" + InvalidInputErrorCodeInvalidAssessmentRunArn InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_ARN" + InvalidInputErrorCodeInvalidFindingArn InvalidInputErrorCode = "INVALID_FINDING_ARN" + InvalidInputErrorCodeInvalidResourceGroupArn InvalidInputErrorCode = "INVALID_RESOURCE_GROUP_ARN" + InvalidInputErrorCodeInvalidRulesPackageArn InvalidInputErrorCode = "INVALID_RULES_PACKAGE_ARN" + InvalidInputErrorCodeInvalidResourceArn InvalidInputErrorCode = "INVALID_RESOURCE_ARN" + InvalidInputErrorCodeInvalidSnsTopicArn InvalidInputErrorCode = "INVALID_SNS_TOPIC_ARN" + InvalidInputErrorCodeInvalidIamRoleArn InvalidInputErrorCode = "INVALID_IAM_ROLE_ARN" + InvalidInputErrorCodeInvalidAssessmentTargetName InvalidInputErrorCode = "INVALID_ASSESSMENT_TARGET_NAME" + InvalidInputErrorCodeInvalidAssessmentTargetNamePattern InvalidInputErrorCode = "INVALID_ASSESSMENT_TARGET_NAME_PATTERN" + InvalidInputErrorCodeInvalidAssessmentTemplateName InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_NAME" + InvalidInputErrorCodeInvalidAssessmentTemplateNamePattern InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_NAME_PATTERN" + InvalidInputErrorCodeInvalidAssessmentTemplateDuration InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_DURATION" + InvalidInputErrorCodeInvalidAssessmentTemplateDurationRange InvalidInputErrorCode = "INVALID_ASSESSMENT_TEMPLATE_DURATION_RANGE" + InvalidInputErrorCodeInvalidAssessmentRunDurationRange InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_DURATION_RANGE" + InvalidInputErrorCodeInvalidAssessmentRunStartTimeRange InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_START_TIME_RANGE" + InvalidInputErrorCodeInvalidAssessmentRunCompletionTimeRange InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_COMPLETION_TIME_RANGE" + InvalidInputErrorCodeInvalidAssessmentRunStateChangeTimeRange InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_STATE_CHANGE_TIME_RANGE" + InvalidInputErrorCodeInvalidAssessmentRunState InvalidInputErrorCode = "INVALID_ASSESSMENT_RUN_STATE" + InvalidInputErrorCodeInvalidTag InvalidInputErrorCode = "INVALID_TAG" + InvalidInputErrorCodeInvalidTagKey InvalidInputErrorCode = "INVALID_TAG_KEY" + InvalidInputErrorCodeInvalidTagValue InvalidInputErrorCode = "INVALID_TAG_VALUE" + InvalidInputErrorCodeInvalidResourceGroupTagKey InvalidInputErrorCode = "INVALID_RESOURCE_GROUP_TAG_KEY" + InvalidInputErrorCodeInvalidResourceGroupTagValue InvalidInputErrorCode = "INVALID_RESOURCE_GROUP_TAG_VALUE" + InvalidInputErrorCodeInvalidAttribute InvalidInputErrorCode = "INVALID_ATTRIBUTE" + InvalidInputErrorCodeInvalidUserAttribute InvalidInputErrorCode = "INVALID_USER_ATTRIBUTE" + InvalidInputErrorCodeInvalidUserAttributeKey InvalidInputErrorCode = "INVALID_USER_ATTRIBUTE_KEY" + InvalidInputErrorCodeInvalidUserAttributeValue InvalidInputErrorCode = "INVALID_USER_ATTRIBUTE_VALUE" + InvalidInputErrorCodeInvalidPaginationToken InvalidInputErrorCode = "INVALID_PAGINATION_TOKEN" + InvalidInputErrorCodeInvalidMaxResults InvalidInputErrorCode = "INVALID_MAX_RESULTS" + InvalidInputErrorCodeInvalidAgentId InvalidInputErrorCode = "INVALID_AGENT_ID" + InvalidInputErrorCodeInvalidAutoScalingGroup InvalidInputErrorCode = "INVALID_AUTO_SCALING_GROUP" + InvalidInputErrorCodeInvalidRuleName InvalidInputErrorCode = "INVALID_RULE_NAME" + InvalidInputErrorCodeInvalidSeverity InvalidInputErrorCode = "INVALID_SEVERITY" + InvalidInputErrorCodeInvalidLocale InvalidInputErrorCode = "INVALID_LOCALE" + InvalidInputErrorCodeInvalidEvent InvalidInputErrorCode = "INVALID_EVENT" + InvalidInputErrorCodeAssessmentTargetNameAlreadyTaken InvalidInputErrorCode = "ASSESSMENT_TARGET_NAME_ALREADY_TAKEN" + InvalidInputErrorCodeAssessmentTemplateNameAlreadyTaken InvalidInputErrorCode = "ASSESSMENT_TEMPLATE_NAME_ALREADY_TAKEN" + InvalidInputErrorCodeInvalidNumberOfAssessmentTargetArns InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_TARGET_ARNS" + InvalidInputErrorCodeInvalidNumberOfAssessmentTemplateArns InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_TEMPLATE_ARNS" + InvalidInputErrorCodeInvalidNumberOfAssessmentRunArns InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_RUN_ARNS" + InvalidInputErrorCodeInvalidNumberOfFindingArns InvalidInputErrorCode = "INVALID_NUMBER_OF_FINDING_ARNS" + InvalidInputErrorCodeInvalidNumberOfResourceGroupArns InvalidInputErrorCode = "INVALID_NUMBER_OF_RESOURCE_GROUP_ARNS" + InvalidInputErrorCodeInvalidNumberOfRulesPackageArns InvalidInputErrorCode = "INVALID_NUMBER_OF_RULES_PACKAGE_ARNS" + InvalidInputErrorCodeInvalidNumberOfAssessmentRunStates InvalidInputErrorCode = "INVALID_NUMBER_OF_ASSESSMENT_RUN_STATES" + InvalidInputErrorCodeInvalidNumberOfTags InvalidInputErrorCode = "INVALID_NUMBER_OF_TAGS" + InvalidInputErrorCodeInvalidNumberOfResourceGroupTags InvalidInputErrorCode = "INVALID_NUMBER_OF_RESOURCE_GROUP_TAGS" + InvalidInputErrorCodeInvalidNumberOfAttributes InvalidInputErrorCode = "INVALID_NUMBER_OF_ATTRIBUTES" + InvalidInputErrorCodeInvalidNumberOfUserAttributes InvalidInputErrorCode = "INVALID_NUMBER_OF_USER_ATTRIBUTES" + InvalidInputErrorCodeInvalidNumberOfAgentIds InvalidInputErrorCode = "INVALID_NUMBER_OF_AGENT_IDS" + InvalidInputErrorCodeInvalidNumberOfAutoScalingGroups InvalidInputErrorCode = "INVALID_NUMBER_OF_AUTO_SCALING_GROUPS" + InvalidInputErrorCodeInvalidNumberOfRuleNames InvalidInputErrorCode = "INVALID_NUMBER_OF_RULE_NAMES" + InvalidInputErrorCodeInvalidNumberOfSeverities InvalidInputErrorCode = "INVALID_NUMBER_OF_SEVERITIES" ) // Values returns all known values for InvalidInputErrorCode. Note that this can be @@ -352,11 +352,11 @@ type LimitExceededErrorCode string // Enum values for LimitExceededErrorCode const ( - LimitExceededErrorCodeAssessment_target_limit_exceeded LimitExceededErrorCode = "ASSESSMENT_TARGET_LIMIT_EXCEEDED" - LimitExceededErrorCodeAssessment_template_limit_exceeded LimitExceededErrorCode = "ASSESSMENT_TEMPLATE_LIMIT_EXCEEDED" - LimitExceededErrorCodeAssessment_run_limit_exceeded LimitExceededErrorCode = "ASSESSMENT_RUN_LIMIT_EXCEEDED" - LimitExceededErrorCodeResource_group_limit_exceeded LimitExceededErrorCode = "RESOURCE_GROUP_LIMIT_EXCEEDED" - LimitExceededErrorCodeEvent_subscription_limit_exceeded LimitExceededErrorCode = "EVENT_SUBSCRIPTION_LIMIT_EXCEEDED" + LimitExceededErrorCodeAssessmentTargetLimitExceeded LimitExceededErrorCode = "ASSESSMENT_TARGET_LIMIT_EXCEEDED" + LimitExceededErrorCodeAssessmentTemplateLimitExceeded LimitExceededErrorCode = "ASSESSMENT_TEMPLATE_LIMIT_EXCEEDED" + LimitExceededErrorCodeAssessmentRunLimitExceeded LimitExceededErrorCode = "ASSESSMENT_RUN_LIMIT_EXCEEDED" + LimitExceededErrorCodeResourceGroupLimitExceeded LimitExceededErrorCode = "RESOURCE_GROUP_LIMIT_EXCEEDED" + LimitExceededErrorCodeEventSubscriptionLimitExceeded LimitExceededErrorCode = "EVENT_SUBSCRIPTION_LIMIT_EXCEEDED" ) // Values returns all known values for LimitExceededErrorCode. Note that this can @@ -376,7 +376,7 @@ type Locale string // Enum values for Locale const ( - LocaleEn_us Locale = "EN_US" + LocaleEnUs Locale = "EN_US" ) // Values returns all known values for Locale. Note that this can be expanded in @@ -392,14 +392,14 @@ type NoSuchEntityErrorCode string // Enum values for NoSuchEntityErrorCode const ( - NoSuchEntityErrorCodeAssessment_target_does_not_exist NoSuchEntityErrorCode = "ASSESSMENT_TARGET_DOES_NOT_EXIST" - NoSuchEntityErrorCodeAssessment_template_does_not_exist NoSuchEntityErrorCode = "ASSESSMENT_TEMPLATE_DOES_NOT_EXIST" - NoSuchEntityErrorCodeAssessment_run_does_not_exist NoSuchEntityErrorCode = "ASSESSMENT_RUN_DOES_NOT_EXIST" - NoSuchEntityErrorCodeFinding_does_not_exist NoSuchEntityErrorCode = "FINDING_DOES_NOT_EXIST" - NoSuchEntityErrorCodeResource_group_does_not_exist NoSuchEntityErrorCode = "RESOURCE_GROUP_DOES_NOT_EXIST" - NoSuchEntityErrorCodeRules_package_does_not_exist NoSuchEntityErrorCode = "RULES_PACKAGE_DOES_NOT_EXIST" - NoSuchEntityErrorCodeSns_topic_does_not_exist NoSuchEntityErrorCode = "SNS_TOPIC_DOES_NOT_EXIST" - NoSuchEntityErrorCodeIam_role_does_not_exist NoSuchEntityErrorCode = "IAM_ROLE_DOES_NOT_EXIST" + NoSuchEntityErrorCodeAssessmentTargetDoesNotExist NoSuchEntityErrorCode = "ASSESSMENT_TARGET_DOES_NOT_EXIST" + NoSuchEntityErrorCodeAssessmentTemplateDoesNotExist NoSuchEntityErrorCode = "ASSESSMENT_TEMPLATE_DOES_NOT_EXIST" + NoSuchEntityErrorCodeAssessmentRunDoesNotExist NoSuchEntityErrorCode = "ASSESSMENT_RUN_DOES_NOT_EXIST" + NoSuchEntityErrorCodeFindingDoesNotExist NoSuchEntityErrorCode = "FINDING_DOES_NOT_EXIST" + NoSuchEntityErrorCodeResourceGroupDoesNotExist NoSuchEntityErrorCode = "RESOURCE_GROUP_DOES_NOT_EXIST" + NoSuchEntityErrorCodeRulesPackageDoesNotExist NoSuchEntityErrorCode = "RULES_PACKAGE_DOES_NOT_EXIST" + NoSuchEntityErrorCodeSnsTopicDoesNotExist NoSuchEntityErrorCode = "SNS_TOPIC_DOES_NOT_EXIST" + NoSuchEntityErrorCodeIamRoleDoesNotExist NoSuchEntityErrorCode = "IAM_ROLE_DOES_NOT_EXIST" ) // Values returns all known values for NoSuchEntityErrorCode. Note that this can be @@ -422,8 +422,8 @@ type PreviewStatus string // Enum values for PreviewStatus const ( - PreviewStatusWork_in_progress PreviewStatus = "WORK_IN_PROGRESS" - PreviewStatusCompleted PreviewStatus = "COMPLETED" + PreviewStatusWorkInProgress PreviewStatus = "WORK_IN_PROGRESS" + PreviewStatusCompleted PreviewStatus = "COMPLETED" ) // Values returns all known values for PreviewStatus. Note that this can be @@ -458,9 +458,9 @@ type ReportStatus string // Enum values for ReportStatus const ( - ReportStatusWork_in_progress ReportStatus = "WORK_IN_PROGRESS" - ReportStatusFailed ReportStatus = "FAILED" - ReportStatusCompleted ReportStatus = "COMPLETED" + ReportStatusWorkInProgress ReportStatus = "WORK_IN_PROGRESS" + ReportStatusFailed ReportStatus = "FAILED" + ReportStatusCompleted ReportStatus = "COMPLETED" ) // Values returns all known values for ReportStatus. Note that this can be expanded @@ -496,8 +496,8 @@ type ScopeType string // Enum values for ScopeType const ( - ScopeTypeInstance_id ScopeType = "INSTANCE_ID" - ScopeTypeRules_package_arn ScopeType = "RULES_PACKAGE_ARN" + ScopeTypeInstanceId ScopeType = "INSTANCE_ID" + ScopeTypeRulesPackageArn ScopeType = "RULES_PACKAGE_ARN" ) // Values returns all known values for ScopeType. Note that this can be expanded in @@ -538,8 +538,8 @@ type StopAction string // Enum values for StopAction const ( - StopActionStart_evaluation StopAction = "START_EVALUATION" - StopActionSkip_evaluation StopAction = "SKIP_EVALUATION" + StopActionStartEvaluation StopAction = "START_EVALUATION" + StopActionSkipEvaluation StopAction = "SKIP_EVALUATION" ) // Values returns all known values for StopAction. Note that this can be expanded diff --git a/service/iot/api_op_AssociateTargetsWithJob.go b/service/iot/api_op_AssociateTargetsWithJob.go index d2cf1e9ec2f..246ce441473 100644 --- a/service/iot/api_op_AssociateTargetsWithJob.go +++ b/service/iot/api_op_AssociateTargetsWithJob.go @@ -12,14 +12,14 @@ import ( // Associates a group with a continuous job. The following criteria must be met: // -// -// * The job must have been created with the targetSelection field set to +// * +// The job must have been created with the targetSelection field set to // "CONTINUOUS". // -// * The job status must currently be "IN_PROGRESS". +// * The job status must currently be "IN_PROGRESS". // -// * The -// total number of targets associated with a job must not exceed 100. +// * The total +// number of targets associated with a job must not exceed 100. func (c *Client) AssociateTargetsWithJob(ctx context.Context, params *AssociateTargetsWithJobInput, optFns ...func(*Options)) (*AssociateTargetsWithJobOutput, error) { if params == nil { params = &AssociateTargetsWithJobInput{} diff --git a/service/iot/api_op_DescribeEndpoint.go b/service/iot/api_op_DescribeEndpoint.go index 4e830adb2e9..78d5a004855 100644 --- a/service/iot/api_op_DescribeEndpoint.go +++ b/service/iot/api_op_DescribeEndpoint.go @@ -31,21 +31,21 @@ type DescribeEndpointInput struct { // The endpoint type. Valid endpoint types include: // - // * iot:Data - Returns a + // * iot:Data - Returns a // VeriSign signed data endpoint. // - // * iot:Data-ATS - Returns an ATS signed data + // * iot:Data-ATS - Returns an ATS signed data // endpoint. // - // * iot:CredentialProvider - Returns an AWS IoT credentials - // provider API endpoint. + // * iot:CredentialProvider - Returns an AWS IoT credentials provider + // API endpoint. // - // * iot:Jobs - Returns an AWS IoT device management - // Jobs API endpoint. + // * iot:Jobs - Returns an AWS IoT device management Jobs API + // endpoint. // - // We strongly recommend that customers use the newer - // iot:Data-ATS endpoint type to avoid issues related to the widespread distrust of - // Symantec certificate authorities. + // We strongly recommend that customers use the newer iot:Data-ATS + // endpoint type to avoid issues related to the widespread distrust of Symantec + // certificate authorities. EndpointType *string } diff --git a/service/iot/api_op_DescribeIndex.go b/service/iot/api_op_DescribeIndex.go index 925966eaccf..f8f88a9123f 100644 --- a/service/iot/api_op_DescribeIndex.go +++ b/service/iot/api_op_DescribeIndex.go @@ -46,16 +46,16 @@ type DescribeIndexOutput struct { // Contains a value that specifies the type of indexing performed. Valid values // are: // - // * REGISTRY – Your thing index contains only registry data. + // * REGISTRY – Your thing index contains only registry data. // - // * - // REGISTRY_AND_SHADOW - Your thing index contains registry data and shadow data. + // * + // REGISTRY_AND_SHADOW - Your thing index contains registry data and shadow + // data. // + // * REGISTRY_AND_CONNECTIVITY_STATUS - Your thing index contains registry + // data and thing connectivity status data. // - // * REGISTRY_AND_CONNECTIVITY_STATUS - Your thing index contains registry data and - // thing connectivity status data. - // - // * + // * // REGISTRY_AND_SHADOW_AND_CONNECTIVITY_STATUS - Your thing index contains registry // data, shadow data, and thing connectivity status data. Schema *string diff --git a/service/iot/types/enums.go b/service/iot/types/enums.go index bc9db6b473a..6a55a63756f 100644 --- a/service/iot/types/enums.go +++ b/service/iot/types/enums.go @@ -60,12 +60,12 @@ type AuditCheckRunStatus string // Enum values for AuditCheckRunStatus const ( - AuditCheckRunStatusIn_progress AuditCheckRunStatus = "IN_PROGRESS" - AuditCheckRunStatusWaiting_for_data_collection AuditCheckRunStatus = "WAITING_FOR_DATA_COLLECTION" - AuditCheckRunStatusCanceled AuditCheckRunStatus = "CANCELED" - AuditCheckRunStatusCompleted_compliant AuditCheckRunStatus = "COMPLETED_COMPLIANT" - AuditCheckRunStatusCompleted_non_compliant AuditCheckRunStatus = "COMPLETED_NON_COMPLIANT" - AuditCheckRunStatusFailed AuditCheckRunStatus = "FAILED" + AuditCheckRunStatusInProgress AuditCheckRunStatus = "IN_PROGRESS" + AuditCheckRunStatusWaitingForDataCollection AuditCheckRunStatus = "WAITING_FOR_DATA_COLLECTION" + AuditCheckRunStatusCanceled AuditCheckRunStatus = "CANCELED" + AuditCheckRunStatusCompletedCompliant AuditCheckRunStatus = "COMPLETED_COMPLIANT" + AuditCheckRunStatusCompletedNonCompliant AuditCheckRunStatus = "COMPLETED_NON_COMPLIANT" + AuditCheckRunStatusFailed AuditCheckRunStatus = "FAILED" ) // Values returns all known values for AuditCheckRunStatus. Note that this can be @@ -130,12 +130,12 @@ type AuditMitigationActionsExecutionStatus string // Enum values for AuditMitigationActionsExecutionStatus const ( - AuditMitigationActionsExecutionStatusIn_progress AuditMitigationActionsExecutionStatus = "IN_PROGRESS" - AuditMitigationActionsExecutionStatusCompleted AuditMitigationActionsExecutionStatus = "COMPLETED" - AuditMitigationActionsExecutionStatusFailed AuditMitigationActionsExecutionStatus = "FAILED" - AuditMitigationActionsExecutionStatusCanceled AuditMitigationActionsExecutionStatus = "CANCELED" - AuditMitigationActionsExecutionStatusSkipped AuditMitigationActionsExecutionStatus = "SKIPPED" - AuditMitigationActionsExecutionStatusPending AuditMitigationActionsExecutionStatus = "PENDING" + AuditMitigationActionsExecutionStatusInProgress AuditMitigationActionsExecutionStatus = "IN_PROGRESS" + AuditMitigationActionsExecutionStatusCompleted AuditMitigationActionsExecutionStatus = "COMPLETED" + AuditMitigationActionsExecutionStatusFailed AuditMitigationActionsExecutionStatus = "FAILED" + AuditMitigationActionsExecutionStatusCanceled AuditMitigationActionsExecutionStatus = "CANCELED" + AuditMitigationActionsExecutionStatusSkipped AuditMitigationActionsExecutionStatus = "SKIPPED" + AuditMitigationActionsExecutionStatusPending AuditMitigationActionsExecutionStatus = "PENDING" ) // Values returns all known values for AuditMitigationActionsExecutionStatus. Note @@ -157,10 +157,10 @@ type AuditMitigationActionsTaskStatus string // Enum values for AuditMitigationActionsTaskStatus const ( - AuditMitigationActionsTaskStatusIn_progress AuditMitigationActionsTaskStatus = "IN_PROGRESS" - AuditMitigationActionsTaskStatusCompleted AuditMitigationActionsTaskStatus = "COMPLETED" - AuditMitigationActionsTaskStatusFailed AuditMitigationActionsTaskStatus = "FAILED" - AuditMitigationActionsTaskStatusCanceled AuditMitigationActionsTaskStatus = "CANCELED" + AuditMitigationActionsTaskStatusInProgress AuditMitigationActionsTaskStatus = "IN_PROGRESS" + AuditMitigationActionsTaskStatusCompleted AuditMitigationActionsTaskStatus = "COMPLETED" + AuditMitigationActionsTaskStatusFailed AuditMitigationActionsTaskStatus = "FAILED" + AuditMitigationActionsTaskStatusCanceled AuditMitigationActionsTaskStatus = "CANCELED" ) // Values returns all known values for AuditMitigationActionsTaskStatus. Note that @@ -196,10 +196,10 @@ type AuditTaskStatus string // Enum values for AuditTaskStatus const ( - AuditTaskStatusIn_progress AuditTaskStatus = "IN_PROGRESS" - AuditTaskStatusCompleted AuditTaskStatus = "COMPLETED" - AuditTaskStatusFailed AuditTaskStatus = "FAILED" - AuditTaskStatusCanceled AuditTaskStatus = "CANCELED" + AuditTaskStatusInProgress AuditTaskStatus = "IN_PROGRESS" + AuditTaskStatusCompleted AuditTaskStatus = "COMPLETED" + AuditTaskStatusFailed AuditTaskStatus = "FAILED" + AuditTaskStatusCanceled AuditTaskStatus = "CANCELED" ) // Values returns all known values for AuditTaskStatus. Note that this can be @@ -218,8 +218,8 @@ type AuditTaskType string // Enum values for AuditTaskType const ( - AuditTaskTypeOn_demand_audit_task AuditTaskType = "ON_DEMAND_AUDIT_TASK" - AuditTaskTypeScheduled_audit_task AuditTaskType = "SCHEDULED_AUDIT_TASK" + AuditTaskTypeOnDemandAuditTask AuditTaskType = "ON_DEMAND_AUDIT_TASK" + AuditTaskTypeScheduledAuditTask AuditTaskType = "SCHEDULED_AUDIT_TASK" ) // Values returns all known values for AuditTaskType. Note that this can be @@ -236,9 +236,9 @@ type AuthDecision string // Enum values for AuthDecision const ( - AuthDecisionAllowed AuthDecision = "ALLOWED" - AuthDecisionExplicit_deny AuthDecision = "EXPLICIT_DENY" - AuthDecisionImplicit_deny AuthDecision = "IMPLICIT_DENY" + AuthDecisionAllowed AuthDecision = "ALLOWED" + AuthDecisionExplicitDeny AuthDecision = "EXPLICIT_DENY" + AuthDecisionImplicitDeny AuthDecision = "IMPLICIT_DENY" ) // Values returns all known values for AuthDecision. Note that this can be expanded @@ -309,10 +309,10 @@ type AwsJobAbortCriteriaFailureType string // Enum values for AwsJobAbortCriteriaFailureType const ( - AwsJobAbortCriteriaFailureTypeFailed AwsJobAbortCriteriaFailureType = "FAILED" - AwsJobAbortCriteriaFailureTypeRejected AwsJobAbortCriteriaFailureType = "REJECTED" - AwsJobAbortCriteriaFailureTypeTimed_out AwsJobAbortCriteriaFailureType = "TIMED_OUT" - AwsJobAbortCriteriaFailureTypeAll AwsJobAbortCriteriaFailureType = "ALL" + AwsJobAbortCriteriaFailureTypeFailed AwsJobAbortCriteriaFailureType = "FAILED" + AwsJobAbortCriteriaFailureTypeRejected AwsJobAbortCriteriaFailureType = "REJECTED" + AwsJobAbortCriteriaFailureTypeTimedOut AwsJobAbortCriteriaFailureType = "TIMED_OUT" + AwsJobAbortCriteriaFailureTypeAll AwsJobAbortCriteriaFailureType = "ALL" ) // Values returns all known values for AwsJobAbortCriteriaFailureType. Note that @@ -396,8 +396,8 @@ type CertificateMode string // Enum values for CertificateMode const ( - CertificateModeDefault CertificateMode = "DEFAULT" - CertificateModeSni_only CertificateMode = "SNI_ONLY" + CertificateModeDefault CertificateMode = "DEFAULT" + CertificateModeSniOnly CertificateMode = "SNI_ONLY" ) // Values returns all known values for CertificateMode. Note that this can be @@ -414,12 +414,12 @@ type CertificateStatus string // Enum values for CertificateStatus const ( - CertificateStatusActive CertificateStatus = "ACTIVE" - CertificateStatusInactive CertificateStatus = "INACTIVE" - CertificateStatusRevoked CertificateStatus = "REVOKED" - CertificateStatusPending_transfer CertificateStatus = "PENDING_TRANSFER" - CertificateStatusRegister_inactive CertificateStatus = "REGISTER_INACTIVE" - CertificateStatusPending_activation CertificateStatus = "PENDING_ACTIVATION" + CertificateStatusActive CertificateStatus = "ACTIVE" + CertificateStatusInactive CertificateStatus = "INACTIVE" + CertificateStatusRevoked CertificateStatus = "REVOKED" + CertificateStatusPendingTransfer CertificateStatus = "PENDING_TRANSFER" + CertificateStatusRegisterInactive CertificateStatus = "REGISTER_INACTIVE" + CertificateStatusPendingActivation CertificateStatus = "PENDING_ACTIVATION" ) // Values returns all known values for CertificateStatus. Note that this can be @@ -440,14 +440,14 @@ type ComparisonOperator string // Enum values for ComparisonOperator const ( - ComparisonOperatorLess_than ComparisonOperator = "less-than" - ComparisonOperatorLess_than_equals ComparisonOperator = "less-than-equals" - ComparisonOperatorGreater_than ComparisonOperator = "greater-than" - ComparisonOperatorGreater_than_equals ComparisonOperator = "greater-than-equals" - ComparisonOperatorIn_cidr_set ComparisonOperator = "in-cidr-set" - ComparisonOperatorNot_in_cidr_set ComparisonOperator = "not-in-cidr-set" - ComparisonOperatorIn_port_set ComparisonOperator = "in-port-set" - ComparisonOperatorNot_in_port_set ComparisonOperator = "not-in-port-set" + ComparisonOperatorLessThan ComparisonOperator = "less-than" + ComparisonOperatorLessThanEquals ComparisonOperator = "less-than-equals" + ComparisonOperatorGreaterThan ComparisonOperator = "greater-than" + ComparisonOperatorGreaterThanEquals ComparisonOperator = "greater-than-equals" + ComparisonOperatorInCidrSet ComparisonOperator = "in-cidr-set" + ComparisonOperatorNotInCidrSet ComparisonOperator = "not-in-cidr-set" + ComparisonOperatorInPortSet ComparisonOperator = "in-port-set" + ComparisonOperatorNotInPortSet ComparisonOperator = "not-in-port-set" ) // Values returns all known values for ComparisonOperator. Note that this can be @@ -515,7 +515,7 @@ type DimensionType string // Enum values for DimensionType const ( - DimensionTypeTopic_filter DimensionType = "TOPIC_FILTER" + DimensionTypeTopicFilter DimensionType = "TOPIC_FILTER" ) // Values returns all known values for DimensionType. Note that this can be @@ -531,8 +531,8 @@ type DimensionValueOperator string // Enum values for DimensionValueOperator const ( - DimensionValueOperatorIn DimensionValueOperator = "IN" - DimensionValueOperatorNot_in DimensionValueOperator = "NOT_IN" + DimensionValueOperatorIn DimensionValueOperator = "IN" + DimensionValueOperatorNotIn DimensionValueOperator = "NOT_IN" ) // Values returns all known values for DimensionValueOperator. Note that this can @@ -567,9 +567,9 @@ type DomainType string // Enum values for DomainType const ( - DomainTypeEndpoint DomainType = "ENDPOINT" - DomainTypeAws_managed DomainType = "AWS_MANAGED" - DomainTypeCustomer_managed DomainType = "CUSTOMER_MANAGED" + DomainTypeEndpoint DomainType = "ENDPOINT" + DomainTypeAwsManaged DomainType = "AWS_MANAGED" + DomainTypeCustomerManaged DomainType = "CUSTOMER_MANAGED" ) // Values returns all known values for DomainType. Note that this can be expanded @@ -625,17 +625,17 @@ type EventType string // Enum values for EventType const ( - EventTypeThing EventType = "THING" - EventTypeThing_group EventType = "THING_GROUP" - EventTypeThing_type EventType = "THING_TYPE" - EventTypeThing_group_membership EventType = "THING_GROUP_MEMBERSHIP" - EventTypeThing_group_hierarchy EventType = "THING_GROUP_HIERARCHY" - EventTypeThing_type_association EventType = "THING_TYPE_ASSOCIATION" - EventTypeJob EventType = "JOB" - EventTypeJob_execution EventType = "JOB_EXECUTION" - EventTypePolicy EventType = "POLICY" - EventTypeCertificate EventType = "CERTIFICATE" - EventTypeCa_certificate EventType = "CA_CERTIFICATE" + EventTypeThing EventType = "THING" + EventTypeThingGroup EventType = "THING_GROUP" + EventTypeThingType EventType = "THING_TYPE" + EventTypeThingGroupMembership EventType = "THING_GROUP_MEMBERSHIP" + EventTypeThingGroupHierarchy EventType = "THING_GROUP_HIERARCHY" + EventTypeThingTypeAssociation EventType = "THING_TYPE_ASSOCIATION" + EventTypeJob EventType = "JOB" + EventTypeJobExecution EventType = "JOB_EXECUTION" + EventTypePolicy EventType = "POLICY" + EventTypeCertificate EventType = "CERTIFICATE" + EventTypeCaCertificate EventType = "CA_CERTIFICATE" ) // Values returns all known values for EventType. Note that this can be expanded in @@ -701,10 +701,10 @@ type JobExecutionFailureType string // Enum values for JobExecutionFailureType const ( - JobExecutionFailureTypeFailed JobExecutionFailureType = "FAILED" - JobExecutionFailureTypeRejected JobExecutionFailureType = "REJECTED" - JobExecutionFailureTypeTimed_out JobExecutionFailureType = "TIMED_OUT" - JobExecutionFailureTypeAll JobExecutionFailureType = "ALL" + JobExecutionFailureTypeFailed JobExecutionFailureType = "FAILED" + JobExecutionFailureTypeRejected JobExecutionFailureType = "REJECTED" + JobExecutionFailureTypeTimedOut JobExecutionFailureType = "TIMED_OUT" + JobExecutionFailureTypeAll JobExecutionFailureType = "ALL" ) // Values returns all known values for JobExecutionFailureType. Note that this can @@ -723,14 +723,14 @@ type JobExecutionStatus string // Enum values for JobExecutionStatus const ( - JobExecutionStatusQueued JobExecutionStatus = "QUEUED" - JobExecutionStatusIn_progress JobExecutionStatus = "IN_PROGRESS" - JobExecutionStatusSucceeded JobExecutionStatus = "SUCCEEDED" - JobExecutionStatusFailed JobExecutionStatus = "FAILED" - JobExecutionStatusTimed_out JobExecutionStatus = "TIMED_OUT" - JobExecutionStatusRejected JobExecutionStatus = "REJECTED" - JobExecutionStatusRemoved JobExecutionStatus = "REMOVED" - JobExecutionStatusCanceled JobExecutionStatus = "CANCELED" + JobExecutionStatusQueued JobExecutionStatus = "QUEUED" + JobExecutionStatusInProgress JobExecutionStatus = "IN_PROGRESS" + JobExecutionStatusSucceeded JobExecutionStatus = "SUCCEEDED" + JobExecutionStatusFailed JobExecutionStatus = "FAILED" + JobExecutionStatusTimedOut JobExecutionStatus = "TIMED_OUT" + JobExecutionStatusRejected JobExecutionStatus = "REJECTED" + JobExecutionStatusRemoved JobExecutionStatus = "REMOVED" + JobExecutionStatusCanceled JobExecutionStatus = "CANCELED" ) // Values returns all known values for JobExecutionStatus. Note that this can be @@ -753,10 +753,10 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusIn_progress JobStatus = "IN_PROGRESS" - JobStatusCanceled JobStatus = "CANCELED" - JobStatusCompleted JobStatus = "COMPLETED" - JobStatusDeletion_in_progress JobStatus = "DELETION_IN_PROGRESS" + JobStatusInProgress JobStatus = "IN_PROGRESS" + JobStatusCanceled JobStatus = "CANCELED" + JobStatusCompleted JobStatus = "COMPLETED" + JobStatusDeletionInProgress JobStatus = "DELETION_IN_PROGRESS" ) // Values returns all known values for JobStatus. Note that this can be expanded in @@ -799,8 +799,8 @@ type LogTargetType string // Enum values for LogTargetType const ( - LogTargetTypeDefault LogTargetType = "DEFAULT" - LogTargetTypeThing_group LogTargetType = "THING_GROUP" + LogTargetTypeDefault LogTargetType = "DEFAULT" + LogTargetTypeThingGroup LogTargetType = "THING_GROUP" ) // Values returns all known values for LogTargetType. Note that this can be @@ -835,12 +835,12 @@ type MitigationActionType string // Enum values for MitigationActionType const ( - MitigationActionTypeUpdate_device_certificate MitigationActionType = "UPDATE_DEVICE_CERTIFICATE" - MitigationActionTypeUpdate_ca_certificate MitigationActionType = "UPDATE_CA_CERTIFICATE" - MitigationActionTypeAdd_things_to_thing_group MitigationActionType = "ADD_THINGS_TO_THING_GROUP" - MitigationActionTypeReplace_default_policy_version MitigationActionType = "REPLACE_DEFAULT_POLICY_VERSION" - MitigationActionTypeEnable_iot_logging MitigationActionType = "ENABLE_IOT_LOGGING" - MitigationActionTypePublish_finding_to_sns MitigationActionType = "PUBLISH_FINDING_TO_SNS" + MitigationActionTypeUpdateDeviceCertificate MitigationActionType = "UPDATE_DEVICE_CERTIFICATE" + MitigationActionTypeUpdateCaCertificate MitigationActionType = "UPDATE_CA_CERTIFICATE" + MitigationActionTypeAddThingsToThingGroup MitigationActionType = "ADD_THINGS_TO_THING_GROUP" + MitigationActionTypeReplaceDefaultPolicyVersion MitigationActionType = "REPLACE_DEFAULT_POLICY_VERSION" + MitigationActionTypeEnableIotLogging MitigationActionType = "ENABLE_IOT_LOGGING" + MitigationActionTypePublishFindingToSns MitigationActionType = "PUBLISH_FINDING_TO_SNS" ) // Values returns all known values for MitigationActionType. Note that this can be @@ -861,10 +861,10 @@ type OTAUpdateStatus string // Enum values for OTAUpdateStatus const ( - OTAUpdateStatusCreate_pending OTAUpdateStatus = "CREATE_PENDING" - OTAUpdateStatusCreate_in_progress OTAUpdateStatus = "CREATE_IN_PROGRESS" - OTAUpdateStatusCreate_complete OTAUpdateStatus = "CREATE_COMPLETE" - OTAUpdateStatusCreate_failed OTAUpdateStatus = "CREATE_FAILED" + OTAUpdateStatusCreatePending OTAUpdateStatus = "CREATE_PENDING" + OTAUpdateStatusCreateInProgress OTAUpdateStatus = "CREATE_IN_PROGRESS" + OTAUpdateStatusCreateComplete OTAUpdateStatus = "CREATE_COMPLETE" + OTAUpdateStatusCreateFailed OTAUpdateStatus = "CREATE_FAILED" ) // Values returns all known values for OTAUpdateStatus. Note that this can be @@ -883,7 +883,7 @@ type PolicyTemplateName string // Enum values for PolicyTemplateName const ( - PolicyTemplateNameBlank_policy PolicyTemplateName = "BLANK_POLICY" + PolicyTemplateNameBlankPolicy PolicyTemplateName = "BLANK_POLICY" ) // Values returns all known values for PolicyTemplateName. Note that this can be @@ -935,14 +935,14 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeDevice_certificate ResourceType = "DEVICE_CERTIFICATE" - ResourceTypeCa_certificate ResourceType = "CA_CERTIFICATE" - ResourceTypeIot_policy ResourceType = "IOT_POLICY" - ResourceTypeCognito_identity_pool ResourceType = "COGNITO_IDENTITY_POOL" - ResourceTypeClient_id ResourceType = "CLIENT_ID" - ResourceTypeAccount_settings ResourceType = "ACCOUNT_SETTINGS" - ResourceTypeRole_alias ResourceType = "ROLE_ALIAS" - ResourceTypeIam_role ResourceType = "IAM_ROLE" + ResourceTypeDeviceCertificate ResourceType = "DEVICE_CERTIFICATE" + ResourceTypeCaCertificate ResourceType = "CA_CERTIFICATE" + ResourceTypeIotPolicy ResourceType = "IOT_POLICY" + ResourceTypeCognitoIdentityPool ResourceType = "COGNITO_IDENTITY_POOL" + ResourceTypeClientId ResourceType = "CLIENT_ID" + ResourceTypeAccountSettings ResourceType = "ACCOUNT_SETTINGS" + ResourceTypeRoleAlias ResourceType = "ROLE_ALIAS" + ResourceTypeIamRole ResourceType = "IAM_ROLE" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -983,9 +983,9 @@ type ServiceType string // Enum values for ServiceType const ( - ServiceTypeData ServiceType = "DATA" - ServiceTypeCredential_provider ServiceType = "CREDENTIAL_PROVIDER" - ServiceTypeJobs ServiceType = "JOBS" + ServiceTypeData ServiceType = "DATA" + ServiceTypeCredentialProvider ServiceType = "CREDENTIAL_PROVIDER" + ServiceTypeJobs ServiceType = "JOBS" ) // Values returns all known values for ServiceType. Note that this can be expanded @@ -1082,9 +1082,9 @@ type ThingIndexingMode string // Enum values for ThingIndexingMode const ( - ThingIndexingModeOff ThingIndexingMode = "OFF" - ThingIndexingModeRegistry ThingIndexingMode = "REGISTRY" - ThingIndexingModeRegistry_and_shadow ThingIndexingMode = "REGISTRY_AND_SHADOW" + ThingIndexingModeOff ThingIndexingMode = "OFF" + ThingIndexingModeRegistry ThingIndexingMode = "REGISTRY" + ThingIndexingModeRegistryAndShadow ThingIndexingMode = "REGISTRY_AND_SHADOW" ) // Values returns all known values for ThingIndexingMode. Note that this can be @@ -1102,10 +1102,10 @@ type TopicRuleDestinationStatus string // Enum values for TopicRuleDestinationStatus const ( - TopicRuleDestinationStatusEnabled TopicRuleDestinationStatus = "ENABLED" - TopicRuleDestinationStatusIn_progress TopicRuleDestinationStatus = "IN_PROGRESS" - TopicRuleDestinationStatusDisabled TopicRuleDestinationStatus = "DISABLED" - TopicRuleDestinationStatusError TopicRuleDestinationStatus = "ERROR" + TopicRuleDestinationStatusEnabled TopicRuleDestinationStatus = "ENABLED" + TopicRuleDestinationStatusInProgress TopicRuleDestinationStatus = "IN_PROGRESS" + TopicRuleDestinationStatusDisabled TopicRuleDestinationStatus = "DISABLED" + TopicRuleDestinationStatusError TopicRuleDestinationStatus = "ERROR" ) // Values returns all known values for TopicRuleDestinationStatus. Note that this @@ -1124,9 +1124,9 @@ type ViolationEventType string // Enum values for ViolationEventType const ( - ViolationEventTypeIn_alarm ViolationEventType = "in-alarm" - ViolationEventTypeAlarm_cleared ViolationEventType = "alarm-cleared" - ViolationEventTypeAlarm_invalidated ViolationEventType = "alarm-invalidated" + ViolationEventTypeInAlarm ViolationEventType = "in-alarm" + ViolationEventTypeAlarmCleared ViolationEventType = "alarm-cleared" + ViolationEventTypeAlarmInvalidated ViolationEventType = "alarm-invalidated" ) // Values returns all known values for ViolationEventType. Note that this can be diff --git a/service/iot/types/types.go b/service/iot/types/types.go index 5af2ff3f791..162ea7c0598 100644 --- a/service/iot/types/types.go +++ b/service/iot/types/types.go @@ -982,11 +982,11 @@ type Destination struct { // dbc123defghijk.iot.us-west-2.amazonaws.com), a customer managed domain, or a // default endpoint. // -// * Data +// * Data // -// * Jobs +// * Jobs // -// * CredentialProvider +// * CredentialProvider // // The domain // configuration feature is in public preview and is subject to change. @@ -2722,13 +2722,13 @@ type ThingIndexingConfiguration struct { // Thing indexing mode. Valid values are: // - // * REGISTRY – Your thing index - // contains registry data only. + // * REGISTRY – Your thing index contains + // registry data only. // - // * REGISTRY_AND_SHADOW - Your thing index - // contains registry and shadow data. + // * REGISTRY_AND_SHADOW - Your thing index contains registry + // and shadow data. // - // * OFF - Thing indexing is disabled. + // * OFF - Thing indexing is disabled. // // This member is required. ThingIndexingMode ThingIndexingMode @@ -2742,11 +2742,11 @@ type ThingIndexingConfiguration struct { // Thing connectivity indexing mode. Valid values are: // - // * STATUS – Your thing - // index contains connectivity status. To enable thing connectivity indexing, + // * STATUS – Your thing index + // contains connectivity status. To enable thing connectivity indexing, // thingIndexMode must not be set to OFF. // - // * OFF - Thing connectivity status + // * OFF - Thing connectivity status // indexing is disabled. ThingConnectivityIndexingMode ThingConnectivityIndexingMode } diff --git a/service/iotanalytics/api_op_BatchPutMessage.go b/service/iotanalytics/api_op_BatchPutMessage.go index df6f961fbbf..c3d10f35202 100644 --- a/service/iotanalytics/api_op_BatchPutMessage.go +++ b/service/iotanalytics/api_op_BatchPutMessage.go @@ -38,23 +38,23 @@ type BatchPutMessageInput struct { // "string", "payload": "string"}'. Note that the field names of message payloads // (data) that you send to AWS IoT Analytics: // - // * Must contain only alphanumeric + // * Must contain only alphanumeric // characters and undescores (_); no other special characters are allowed. // - // * - // Must begin with an alphabetic character or single underscore (_). + // * Must + // begin with an alphabetic character or single underscore (_). // - // * Cannot - // contain hyphens (-). + // * Cannot contain + // hyphens (-). // - // * In regular expression terms: + // * In regular expression terms: // "^[A-Za-z_]([A-Za-z0-9]*|[A-Za-z0-9][A-Za-z0-9_]*)$". // - // * Cannot be greater - // than 255 characters. + // * Cannot be greater than + // 255 characters. // - // * Are case-insensitive. (Fields named "foo" and "FOO" - // in the same payload are considered duplicates.) + // * Are case-insensitive. (Fields named "foo" and "FOO" in the + // same payload are considered duplicates.) // // For example, {"temp_01": 29} or // {"_temp_01": 29} are valid, but {"temp-01": 29}, {"01_temp": 29} or diff --git a/service/iotanalytics/types/enums.go b/service/iotanalytics/types/enums.go index de138ad1b7e..3e4374b002e 100644 --- a/service/iotanalytics/types/enums.go +++ b/service/iotanalytics/types/enums.go @@ -26,8 +26,8 @@ type ComputeType string // Enum values for ComputeType const ( - ComputeTypeAcu_1 ComputeType = "ACU_1" - ComputeTypeAcu_2 ComputeType = "ACU_2" + ComputeTypeAcu1 ComputeType = "ACU_1" + ComputeTypeAcu2 ComputeType = "ACU_2" ) // Values returns all known values for ComputeType. Note that this can be expanded diff --git a/service/iotevents/types/types.go b/service/iotevents/types/types.go index e56252c9c38..2793d528c9d 100644 --- a/service/iotevents/types/types.go +++ b/service/iotevents/types/types.go @@ -76,11 +76,11 @@ type Action struct { // in the AWS IoT SiteWise API Reference. For parameters that are string data type, // you can specify the following options: // -// * Use a string. For example, the +// * Use a string. For example, the // timeInSeconds value can be '1586400675'. // -// * Use an expression. For example, -// the timeInSeconds value can be +// * Use an expression. For example, the +// timeInSeconds value can be // '${$input.TemperatureInput.sensorData.timestamp/1000}'. For more information, // see Expressions // (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) @@ -104,12 +104,12 @@ type AssetPropertyTimestamp struct { // in the AWS IoT SiteWise API Reference. For parameters that are string data type, // you can specify the following options: // -// * Use a string. For example, the -// quality value can be 'GOOD'. +// * Use a string. For example, the quality +// value can be 'GOOD'. // -// * Use an expression. For example, the quality -// value can be $input.TemperatureInput.sensorData.quality . For more information, -// see Expressions +// * Use an expression. For example, the quality value can be +// $input.TemperatureInput.sensorData.quality . For more information, see +// Expressions // (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) // in the AWS IoT Events Developer Guide. type AssetPropertyValue struct { @@ -138,10 +138,10 @@ type AssetPropertyValue struct { // in the AWS IoT SiteWise API Reference. For parameters that are string data type, // you can specify the following options: // -// * Use a string. For example, the +// * Use a string. For example, the // doubleValue value can be '47.9'. // -// * Use an expression. For example, the +// * Use an expression. For example, the // doubleValue value can be $input.TemperatureInput.sensorData.temperature. For // more information, see Expressions // (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) @@ -354,28 +354,27 @@ type DynamoDBAction struct { // The data type for the hash key (also called the partition key). You can specify // the following values: // - // * STRING - The hash key is a string. + // * STRING - The hash key is a string. // - // * NUMBER - - // The hash key is a number. + // * NUMBER - The hash + // key is a number. // - // If you don't specify hashKeyType, the default value - // is STRING. + // If you don't specify hashKeyType, the default value is STRING. HashKeyType *string // The type of operation to perform. You can specify the following values: // - // * + // * // INSERT - Insert data as a new item into the DynamoDB table. This item uses the // specified hash key as a partition key. If you specified a range key, the item // uses the range key as a sort key. // - // * UPDATE - Update an existing item of the + // * UPDATE - Update an existing item of the // DynamoDB table with new data. This item's partition key must match the specified // hash key. If you specified a range key, the range key must match the item's sort // key. // - // * DELETE - Delete an existing item of the DynamoDB table. This item's + // * DELETE - Delete an existing item of the DynamoDB table. This item's // partition key must match the specified hash key. If you specified a range key, // the range key must match the item's sort key. // @@ -400,13 +399,12 @@ type DynamoDBAction struct { // The data type for the range key (also called the sort key), You can specify the // following values: // - // * STRING - The range key is a string. + // * STRING - The range key is a string. // - // * NUMBER - The - // range key is number. + // * NUMBER - The range + // key is number. // - // If you don't specify rangeKeyField, the default value is - // STRING. + // If you don't specify rangeKeyField, the default value is STRING. RangeKeyType *string // The value of the range key (also called the sort key). @@ -572,11 +570,11 @@ type IotEventsAction struct { // property in AWS IoT SiteWise. For parameters that are string data type, you can // specify the following options: // -// * Use a string. For example, the -// propertyAlias value can be '/company/windfarm/3/turbine/7/temperature'. +// * Use a string. For example, the propertyAlias +// value can be '/company/windfarm/3/turbine/7/temperature'. // -// * -// Use an expression. For example, the propertyAlias value can be +// * Use an expression. +// For example, the propertyAlias value can be // 'company/windfarm/${$input.TemperatureInput.sensorData.windfarmID}/turbine/${$input.TemperatureInput.sensorData.turbineID}/temperature'. // For more information, see Expressions // (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) diff --git a/service/iotjobsdataplane/types/enums.go b/service/iotjobsdataplane/types/enums.go index 9af84220078..946f56ee26a 100644 --- a/service/iotjobsdataplane/types/enums.go +++ b/service/iotjobsdataplane/types/enums.go @@ -6,14 +6,14 @@ type JobExecutionStatus string // Enum values for JobExecutionStatus const ( - JobExecutionStatusQueued JobExecutionStatus = "QUEUED" - JobExecutionStatusIn_progress JobExecutionStatus = "IN_PROGRESS" - JobExecutionStatusSucceeded JobExecutionStatus = "SUCCEEDED" - JobExecutionStatusFailed JobExecutionStatus = "FAILED" - JobExecutionStatusTimed_out JobExecutionStatus = "TIMED_OUT" - JobExecutionStatusRejected JobExecutionStatus = "REJECTED" - JobExecutionStatusRemoved JobExecutionStatus = "REMOVED" - JobExecutionStatusCanceled JobExecutionStatus = "CANCELED" + JobExecutionStatusQueued JobExecutionStatus = "QUEUED" + JobExecutionStatusInProgress JobExecutionStatus = "IN_PROGRESS" + JobExecutionStatusSucceeded JobExecutionStatus = "SUCCEEDED" + JobExecutionStatusFailed JobExecutionStatus = "FAILED" + JobExecutionStatusTimedOut JobExecutionStatus = "TIMED_OUT" + JobExecutionStatusRejected JobExecutionStatus = "REJECTED" + JobExecutionStatusRemoved JobExecutionStatus = "REMOVED" + JobExecutionStatusCanceled JobExecutionStatus = "CANCELED" ) // Values returns all known values for JobExecutionStatus. Note that this can be diff --git a/service/iotsitewise/api_op_BatchPutAssetPropertyValue.go b/service/iotsitewise/api_op_BatchPutAssetPropertyValue.go index 6c2b6ab902c..f6f9f8b4268 100644 --- a/service/iotsitewise/api_op_BatchPutAssetPropertyValue.go +++ b/service/iotsitewise/api_op_BatchPutAssetPropertyValue.go @@ -19,10 +19,10 @@ import ( // the AWS IoT SiteWise User Guide. To identify an asset property, you must specify // one of the following: // -// * The assetId and propertyId of an asset property. +// * The assetId and propertyId of an asset property. // -// -// * A propertyAlias, which is a data stream alias (for example, +// * A +// propertyAlias, which is a data stream alias (for example, // /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, // see UpdateAssetProperty // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html). diff --git a/service/iotsitewise/api_op_CreatePortal.go b/service/iotsitewise/api_op_CreatePortal.go index 8d6f858728d..fe478f9760f 100644 --- a/service/iotsitewise/api_op_CreatePortal.go +++ b/service/iotsitewise/api_op_CreatePortal.go @@ -65,21 +65,20 @@ type CreatePortalInput struct { // The service to use to authenticate users to the portal. Choose from the // following options: // - // * SSO – The portal uses AWS Single Sign-On to - // authenticate users and manage user permissions. Before you can create a portal - // that uses AWS SSO, you must enable AWS SSO. For more information, see Enabling - // AWS SSO + // * SSO – The portal uses AWS Single Sign-On to authenticate + // users and manage user permissions. Before you can create a portal that uses AWS + // SSO, you must enable AWS SSO. For more information, see Enabling AWS SSO // (https://docs.aws.amazon.com/iot-sitewise/latest/userguide/monitor-get-started.html#mon-gs-sso) // in the AWS IoT SiteWise User Guide. This option is only available in AWS Regions // other than the China Regions. // - // * IAM – The portal uses AWS Identity and - // Access Management (IAM) to authenticate users and manage user permissions. IAM - // users must have the iotsitewise:CreatePresignedPortalUrl permission to sign in - // to the portal. This option is only available in the China Regions. + // * IAM – The portal uses AWS Identity and Access + // Management (IAM) to authenticate users and manage user permissions. IAM users + // must have the iotsitewise:CreatePresignedPortalUrl permission to sign in to the + // portal. This option is only available in the China Regions. // - // You can't - // change this value after you create a portal. Default: SSO + // You can't change + // this value after you create a portal. Default: SSO PortalAuthMode types.AuthMode // A description for the portal. diff --git a/service/iotsitewise/api_op_DescribeGatewayCapabilityConfiguration.go b/service/iotsitewise/api_op_DescribeGatewayCapabilityConfiguration.go index 04347454489..2c4d7856891 100644 --- a/service/iotsitewise/api_op_DescribeGatewayCapabilityConfiguration.go +++ b/service/iotsitewise/api_op_DescribeGatewayCapabilityConfiguration.go @@ -68,13 +68,13 @@ type DescribeGatewayCapabilityConfigurationOutput struct { // The synchronization status of the capability configuration. The sync status can // be one of the following: // - // * IN_SYNC – The gateway is running the capability + // * IN_SYNC – The gateway is running the capability // configuration. // - // * OUT_OF_SYNC – The gateway hasn't received the capability + // * OUT_OF_SYNC – The gateway hasn't received the capability // configuration. // - // * SYNC_FAILED – The gateway rejected the capability + // * SYNC_FAILED – The gateway rejected the capability // configuration. // // This member is required. diff --git a/service/iotsitewise/api_op_GetAssetPropertyAggregates.go b/service/iotsitewise/api_op_GetAssetPropertyAggregates.go index ba3df5cb6bf..89adbd43451 100644 --- a/service/iotsitewise/api_op_GetAssetPropertyAggregates.go +++ b/service/iotsitewise/api_op_GetAssetPropertyAggregates.go @@ -19,10 +19,10 @@ import ( // in the AWS IoT SiteWise User Guide. To identify an asset property, you must // specify one of the following: // -// * The assetId and propertyId of an asset +// * The assetId and propertyId of an asset // property. // -// * A propertyAlias, which is a data stream alias (for example, +// * A propertyAlias, which is a data stream alias (for example, // /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, // see UpdateAssetProperty // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html). diff --git a/service/iotsitewise/api_op_GetAssetPropertyValue.go b/service/iotsitewise/api_op_GetAssetPropertyValue.go index 9ffb8e086dc..7bbf85219fa 100644 --- a/service/iotsitewise/api_op_GetAssetPropertyValue.go +++ b/service/iotsitewise/api_op_GetAssetPropertyValue.go @@ -18,10 +18,10 @@ import ( // in the AWS IoT SiteWise User Guide. To identify an asset property, you must // specify one of the following: // -// * The assetId and propertyId of an asset +// * The assetId and propertyId of an asset // property. // -// * A propertyAlias, which is a data stream alias (for example, +// * A propertyAlias, which is a data stream alias (for example, // /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, // see UpdateAssetProperty // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html). diff --git a/service/iotsitewise/api_op_GetAssetPropertyValueHistory.go b/service/iotsitewise/api_op_GetAssetPropertyValueHistory.go index 3075f51d990..7735cb822b0 100644 --- a/service/iotsitewise/api_op_GetAssetPropertyValueHistory.go +++ b/service/iotsitewise/api_op_GetAssetPropertyValueHistory.go @@ -19,10 +19,10 @@ import ( // in the AWS IoT SiteWise User Guide. To identify an asset property, you must // specify one of the following: // -// * The assetId and propertyId of an asset +// * The assetId and propertyId of an asset // property. // -// * A propertyAlias, which is a data stream alias (for example, +// * A propertyAlias, which is a data stream alias (for example, // /company/windfarm/3/turbine/7/temperature). To define an asset property's alias, // see UpdateAssetProperty // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetProperty.html). diff --git a/service/iotsitewise/api_op_ListAssets.go b/service/iotsitewise/api_op_ListAssets.go index 37a3f89266a..a708d034db3 100644 --- a/service/iotsitewise/api_op_ListAssets.go +++ b/service/iotsitewise/api_op_ListAssets.go @@ -15,13 +15,13 @@ import ( // Retrieves a paginated list of asset summaries. You can use this operation to do // the following: // -// * List assets based on a specific asset model. +// * List assets based on a specific asset model. // -// * List -// top-level assets. +// * List top-level +// assets. // -// You can't use this operation to list all assets. To retrieve -// summaries for all of your assets, use ListAssetModels +// You can't use this operation to list all assets. To retrieve summaries +// for all of your assets, use ListAssetModels // (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_ListAssetModels.html) // to get all of your asset model IDs. Then, use ListAssets to get all assets for // each asset model. @@ -49,14 +49,13 @@ type ListAssetsInput struct { // The filter for the requested list of assets. Choose one of the following // options: // - // * ALL – The list includes all assets for a given asset model ID. - // The assetModelId parameter is required if you filter by ALL. + // * ALL – The list includes all assets for a given asset model ID. The + // assetModelId parameter is required if you filter by ALL. // - // * TOP_LEVEL – - // The list includes only top-level assets in the asset hierarchy tree. + // * TOP_LEVEL – The list + // includes only top-level assets in the asset hierarchy tree. // - // Default: - // ALL + // Default: ALL Filter types.ListAssetsFilter // The maximum number of results to be returned per paginated request. Default: 50 diff --git a/service/iotsitewise/api_op_ListAssociatedAssets.go b/service/iotsitewise/api_op_ListAssociatedAssets.go index 7c75208ddc9..473f75c04ee 100644 --- a/service/iotsitewise/api_op_ListAssociatedAssets.go +++ b/service/iotsitewise/api_op_ListAssociatedAssets.go @@ -15,10 +15,10 @@ import ( // Retrieves a paginated list of associated assets. You can use this operation to // do the following: // -// * List child assets associated to a parent asset by a +// * List child assets associated to a parent asset by a // hierarchy that you specify. // -// * List an asset's parent asset. +// * List an asset's parent asset. func (c *Client) ListAssociatedAssets(ctx context.Context, params *ListAssociatedAssetsInput, optFns ...func(*Options)) (*ListAssociatedAssetsOutput, error) { if params == nil { params = &ListAssociatedAssetsInput{} @@ -60,11 +60,11 @@ type ListAssociatedAssetsInput struct { // The direction to list associated assets. Choose one of the following options: // - // - // * CHILD – The list includes all child assets associated to the asset. The + // * + // CHILD – The list includes all child assets associated to the asset. The // hierarchyId parameter is required if you choose CHILD. // - // * PARENT – The list + // * PARENT – The list // includes the asset's parent asset. // // Default: CHILD diff --git a/service/iotsitewise/api_op_UpdateGatewayCapabilityConfiguration.go b/service/iotsitewise/api_op_UpdateGatewayCapabilityConfiguration.go index cd1ecf2c77a..00a4646aa37 100644 --- a/service/iotsitewise/api_op_UpdateGatewayCapabilityConfiguration.go +++ b/service/iotsitewise/api_op_UpdateGatewayCapabilityConfiguration.go @@ -68,13 +68,13 @@ type UpdateGatewayCapabilityConfigurationOutput struct { // The synchronization status of the capability configuration. The sync status can // be one of the following: // - // * IN_SYNC – The gateway is running the capability + // * IN_SYNC – The gateway is running the capability // configuration. // - // * OUT_OF_SYNC – The gateway hasn't received the capability + // * OUT_OF_SYNC – The gateway hasn't received the capability // configuration. // - // * SYNC_FAILED – The gateway rejected the capability + // * SYNC_FAILED – The gateway rejected the capability // configuration. // // After you update a capability configuration, its sync status is diff --git a/service/iotsitewise/api_op_UpdatePortal.go b/service/iotsitewise/api_op_UpdatePortal.go index cf6f1918362..b6d95899979 100644 --- a/service/iotsitewise/api_op_UpdatePortal.go +++ b/service/iotsitewise/api_op_UpdatePortal.go @@ -66,11 +66,11 @@ type UpdatePortalInput struct { // Contains an image that is one of the following: // - // * An image file. Choose - // this option to upload a new image. + // * An image file. Choose this + // option to upload a new image. // - // * The ID of an existing image. Choose - // this option to keep an existing image. + // * The ID of an existing image. Choose this option + // to keep an existing image. PortalLogoImage *types.Image } diff --git a/service/iotsitewise/types/enums.go b/service/iotsitewise/types/enums.go index 6d3220acf44..21bc6ffdca5 100644 --- a/service/iotsitewise/types/enums.go +++ b/service/iotsitewise/types/enums.go @@ -6,12 +6,12 @@ type AggregateType string // Enum values for AggregateType const ( - AggregateTypeAverage AggregateType = "AVERAGE" - AggregateTypeCount AggregateType = "COUNT" - AggregateTypeMaximum AggregateType = "MAXIMUM" - AggregateTypeMinimum AggregateType = "MINIMUM" - AggregateTypeSum AggregateType = "SUM" - AggregateTypeStandard_deviation AggregateType = "STANDARD_DEVIATION" + AggregateTypeAverage AggregateType = "AVERAGE" + AggregateTypeCount AggregateType = "COUNT" + AggregateTypeMaximum AggregateType = "MAXIMUM" + AggregateTypeMinimum AggregateType = "MINIMUM" + AggregateTypeSum AggregateType = "SUM" + AggregateTypeStandardDeviation AggregateType = "STANDARD_DEVIATION" ) // Values returns all known values for AggregateType. Note that this can be @@ -32,7 +32,7 @@ type AssetErrorCode string // Enum values for AssetErrorCode const ( - AssetErrorCodeInternal_failure AssetErrorCode = "INTERNAL_FAILURE" + AssetErrorCodeInternalFailure AssetErrorCode = "INTERNAL_FAILURE" ) // Values returns all known values for AssetErrorCode. Note that this can be @@ -149,9 +149,9 @@ type CapabilitySyncStatus string // Enum values for CapabilitySyncStatus const ( - CapabilitySyncStatusIn_sync CapabilitySyncStatus = "IN_SYNC" - CapabilitySyncStatusOut_of_sync CapabilitySyncStatus = "OUT_OF_SYNC" - CapabilitySyncStatusSync_failed CapabilitySyncStatus = "SYNC_FAILED" + CapabilitySyncStatusInSync CapabilitySyncStatus = "IN_SYNC" + CapabilitySyncStatusOutOfSync CapabilitySyncStatus = "OUT_OF_SYNC" + CapabilitySyncStatusSyncFailed CapabilitySyncStatus = "SYNC_FAILED" ) // Values returns all known values for CapabilitySyncStatus. Note that this can be @@ -169,8 +169,8 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeValidation_error ErrorCode = "VALIDATION_ERROR" - ErrorCodeInternal_failure ErrorCode = "INTERNAL_FAILURE" + ErrorCodeValidationError ErrorCode = "VALIDATION_ERROR" + ErrorCodeInternalFailure ErrorCode = "INTERNAL_FAILURE" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -223,8 +223,8 @@ type ListAssetsFilter string // Enum values for ListAssetsFilter const ( - ListAssetsFilterAll ListAssetsFilter = "ALL" - ListAssetsFilterTop_level ListAssetsFilter = "TOP_LEVEL" + ListAssetsFilterAll ListAssetsFilter = "ALL" + ListAssetsFilterTopLevel ListAssetsFilter = "TOP_LEVEL" ) // Values returns all known values for ListAssetsFilter. Note that this can be @@ -261,9 +261,9 @@ type MonitorErrorCode string // Enum values for MonitorErrorCode const ( - MonitorErrorCodeInternal_failure MonitorErrorCode = "INTERNAL_FAILURE" - MonitorErrorCodeValidation_error MonitorErrorCode = "VALIDATION_ERROR" - MonitorErrorCodeLimit_exceeded MonitorErrorCode = "LIMIT_EXCEEDED" + MonitorErrorCodeInternalFailure MonitorErrorCode = "INTERNAL_FAILURE" + MonitorErrorCodeValidationError MonitorErrorCode = "VALIDATION_ERROR" + MonitorErrorCodeLimitExceeded MonitorErrorCode = "LIMIT_EXCEEDED" ) // Values returns all known values for MonitorErrorCode. Note that this can be diff --git a/service/iotsitewise/types/types.go b/service/iotsitewise/types/types.go index a430080d193..1d57201af35 100644 --- a/service/iotsitewise/types/types.go +++ b/service/iotsitewise/types/types.go @@ -540,13 +540,13 @@ type GatewayCapabilitySummary struct { // The synchronization status of the capability configuration. The sync status can // be one of the following: // - // * IN_SYNC – The gateway is running the capability + // * IN_SYNC – The gateway is running the capability // configuration. // - // * OUT_OF_SYNC – The gateway hasn't received the capability + // * OUT_OF_SYNC – The gateway hasn't received the capability // configuration. // - // * SYNC_FAILED – The gateway rejected the capability + // * SYNC_FAILED – The gateway rejected the capability // configuration. // // This member is required. @@ -657,11 +657,11 @@ type Identity struct { // Contains an image that is one of the following: // -// * An image file. Choose -// this option to upload a new image. +// * An image file. Choose this +// option to upload a new image. // -// * The ID of an existing image. Choose -// this option to keep an existing image. +// * The ID of an existing image. Choose this option +// to keep an existing image. type Image struct { // Contains an image file. diff --git a/service/iotthingsgraph/api_op_GetEntities.go b/service/iotthingsgraph/api_op_GetEntities.go index 01e3cb9cc32..8690db426ea 100644 --- a/service/iotthingsgraph/api_op_GetEntities.go +++ b/service/iotthingsgraph/api_op_GetEntities.go @@ -14,28 +14,28 @@ import ( // Gets definitions of the specified entities. Uses the latest version of the // user's namespace by default. This API returns the following TDM entities. // -// * +// * // Properties // -// * States +// * States // -// * Events +// * Events // -// * Actions +// * Actions // -// * Capabilities +// * Capabilities // -// * -// Mappings +// * Mappings // -// * Devices +// * +// Devices // -// * Device Models +// * Device Models // -// * Services +// * Services // -// This action -// doesn't return definitions for systems, flows, and deployments. +// This action doesn't return definitions for +// systems, flows, and deployments. func (c *Client) GetEntities(ctx context.Context, params *GetEntitiesInput, optFns ...func(*Options)) (*GetEntitiesOutput, error) { if params == nil { params = &GetEntitiesInput{} diff --git a/service/iotthingsgraph/types/enums.go b/service/iotthingsgraph/types/enums.go index 8b5f49b44bf..7093e823f32 100644 --- a/service/iotthingsgraph/types/enums.go +++ b/service/iotthingsgraph/types/enums.go @@ -40,10 +40,10 @@ type EntityFilterName string // Enum values for EntityFilterName const ( - EntityFilterNameName EntityFilterName = "NAME" - EntityFilterNameNamespace EntityFilterName = "NAMESPACE" - EntityFilterNameSemantic_type_path EntityFilterName = "SEMANTIC_TYPE_PATH" - EntityFilterNameReferenced_entity_id EntityFilterName = "REFERENCED_ENTITY_ID" + EntityFilterNameName EntityFilterName = "NAME" + EntityFilterNameNamespace EntityFilterName = "NAMESPACE" + EntityFilterNameSemanticTypePath EntityFilterName = "SEMANTIC_TYPE_PATH" + EntityFilterNameReferencedEntityId EntityFilterName = "REFERENCED_ENTITY_ID" ) // Values returns all known values for EntityFilterName. Note that this can be @@ -62,16 +62,16 @@ type EntityType string // Enum values for EntityType const ( - EntityTypeDevice EntityType = "DEVICE" - EntityTypeService EntityType = "SERVICE" - EntityTypeDevice_model EntityType = "DEVICE_MODEL" - EntityTypeCapability EntityType = "CAPABILITY" - EntityTypeState EntityType = "STATE" - EntityTypeAction EntityType = "ACTION" - EntityTypeEvent EntityType = "EVENT" - EntityTypeProperty EntityType = "PROPERTY" - EntityTypeMapping EntityType = "MAPPING" - EntityTypeEnum EntityType = "ENUM" + EntityTypeDevice EntityType = "DEVICE" + EntityTypeService EntityType = "SERVICE" + EntityTypeDeviceModel EntityType = "DEVICE_MODEL" + EntityTypeCapability EntityType = "CAPABILITY" + EntityTypeState EntityType = "STATE" + EntityTypeAction EntityType = "ACTION" + EntityTypeEvent EntityType = "EVENT" + EntityTypeProperty EntityType = "PROPERTY" + EntityTypeMapping EntityType = "MAPPING" + EntityTypeEnum EntityType = "ENUM" ) // Values returns all known values for EntityType. Note that this can be expanded @@ -96,23 +96,23 @@ type FlowExecutionEventType string // Enum values for FlowExecutionEventType const ( - FlowExecutionEventTypeExecution_started FlowExecutionEventType = "EXECUTION_STARTED" - FlowExecutionEventTypeExecution_failed FlowExecutionEventType = "EXECUTION_FAILED" - FlowExecutionEventTypeExecution_aborted FlowExecutionEventType = "EXECUTION_ABORTED" - FlowExecutionEventTypeExecution_succeeded FlowExecutionEventType = "EXECUTION_SUCCEEDED" - FlowExecutionEventTypeStep_started FlowExecutionEventType = "STEP_STARTED" - FlowExecutionEventTypeStep_failed FlowExecutionEventType = "STEP_FAILED" - FlowExecutionEventTypeStep_succeeded FlowExecutionEventType = "STEP_SUCCEEDED" - FlowExecutionEventTypeActivity_scheduled FlowExecutionEventType = "ACTIVITY_SCHEDULED" - FlowExecutionEventTypeActivity_started FlowExecutionEventType = "ACTIVITY_STARTED" - FlowExecutionEventTypeActivity_failed FlowExecutionEventType = "ACTIVITY_FAILED" - FlowExecutionEventTypeActivity_succeeded FlowExecutionEventType = "ACTIVITY_SUCCEEDED" - FlowExecutionEventTypeStart_flow_execution_task FlowExecutionEventType = "START_FLOW_EXECUTION_TASK" - FlowExecutionEventTypeSchedule_next_ready_steps_task FlowExecutionEventType = "SCHEDULE_NEXT_READY_STEPS_TASK" - FlowExecutionEventTypeThing_action_task FlowExecutionEventType = "THING_ACTION_TASK" - FlowExecutionEventTypeThing_action_task_failed FlowExecutionEventType = "THING_ACTION_TASK_FAILED" - FlowExecutionEventTypeThing_action_task_succeeded FlowExecutionEventType = "THING_ACTION_TASK_SUCCEEDED" - FlowExecutionEventTypeAcknowledge_task_message FlowExecutionEventType = "ACKNOWLEDGE_TASK_MESSAGE" + FlowExecutionEventTypeExecutionStarted FlowExecutionEventType = "EXECUTION_STARTED" + FlowExecutionEventTypeExecutionFailed FlowExecutionEventType = "EXECUTION_FAILED" + FlowExecutionEventTypeExecutionAborted FlowExecutionEventType = "EXECUTION_ABORTED" + FlowExecutionEventTypeExecutionSucceeded FlowExecutionEventType = "EXECUTION_SUCCEEDED" + FlowExecutionEventTypeStepStarted FlowExecutionEventType = "STEP_STARTED" + FlowExecutionEventTypeStepFailed FlowExecutionEventType = "STEP_FAILED" + FlowExecutionEventTypeStepSucceeded FlowExecutionEventType = "STEP_SUCCEEDED" + FlowExecutionEventTypeActivityScheduled FlowExecutionEventType = "ACTIVITY_SCHEDULED" + FlowExecutionEventTypeActivityStarted FlowExecutionEventType = "ACTIVITY_STARTED" + FlowExecutionEventTypeActivityFailed FlowExecutionEventType = "ACTIVITY_FAILED" + FlowExecutionEventTypeActivitySucceeded FlowExecutionEventType = "ACTIVITY_SUCCEEDED" + FlowExecutionEventTypeStartFlowExecutionTask FlowExecutionEventType = "START_FLOW_EXECUTION_TASK" + FlowExecutionEventTypeScheduleNextReadyStepsTask FlowExecutionEventType = "SCHEDULE_NEXT_READY_STEPS_TASK" + FlowExecutionEventTypeThingActionTask FlowExecutionEventType = "THING_ACTION_TASK" + FlowExecutionEventTypeThingActionTaskFailed FlowExecutionEventType = "THING_ACTION_TASK_FAILED" + FlowExecutionEventTypeThingActionTaskSucceeded FlowExecutionEventType = "THING_ACTION_TASK_SUCCEEDED" + FlowExecutionEventTypeAcknowledgeTaskMessage FlowExecutionEventType = "ACKNOWLEDGE_TASK_MESSAGE" ) // Values returns all known values for FlowExecutionEventType. Note that this can @@ -166,7 +166,7 @@ type FlowTemplateFilterName string // Enum values for FlowTemplateFilterName const ( - FlowTemplateFilterNameDevice_model_id FlowTemplateFilterName = "DEVICE_MODEL_ID" + FlowTemplateFilterNameDeviceModelId FlowTemplateFilterName = "DEVICE_MODEL_ID" ) // Values returns all known values for FlowTemplateFilterName. Note that this can @@ -182,9 +182,9 @@ type NamespaceDeletionStatus string // Enum values for NamespaceDeletionStatus const ( - NamespaceDeletionStatusIn_progress NamespaceDeletionStatus = "IN_PROGRESS" - NamespaceDeletionStatusSucceeded NamespaceDeletionStatus = "SUCCEEDED" - NamespaceDeletionStatusFailed NamespaceDeletionStatus = "FAILED" + NamespaceDeletionStatusInProgress NamespaceDeletionStatus = "IN_PROGRESS" + NamespaceDeletionStatusSucceeded NamespaceDeletionStatus = "SUCCEEDED" + NamespaceDeletionStatusFailed NamespaceDeletionStatus = "FAILED" ) // Values returns all known values for NamespaceDeletionStatus. Note that this can @@ -202,7 +202,7 @@ type NamespaceDeletionStatusErrorCodes string // Enum values for NamespaceDeletionStatusErrorCodes const ( - NamespaceDeletionStatusErrorCodesValidation_failed NamespaceDeletionStatusErrorCodes = "VALIDATION_FAILED" + NamespaceDeletionStatusErrorCodesValidationFailed NamespaceDeletionStatusErrorCodes = "VALIDATION_FAILED" ) // Values returns all known values for NamespaceDeletionStatusErrorCodes. Note that @@ -219,14 +219,14 @@ type SystemInstanceDeploymentStatus string // Enum values for SystemInstanceDeploymentStatus const ( - SystemInstanceDeploymentStatusNot_deployed SystemInstanceDeploymentStatus = "NOT_DEPLOYED" - SystemInstanceDeploymentStatusBootstrap SystemInstanceDeploymentStatus = "BOOTSTRAP" - SystemInstanceDeploymentStatusDeploy_in_progress SystemInstanceDeploymentStatus = "DEPLOY_IN_PROGRESS" - SystemInstanceDeploymentStatusDeployed_in_target SystemInstanceDeploymentStatus = "DEPLOYED_IN_TARGET" - SystemInstanceDeploymentStatusUndeploy_in_progress SystemInstanceDeploymentStatus = "UNDEPLOY_IN_PROGRESS" - SystemInstanceDeploymentStatusFailed SystemInstanceDeploymentStatus = "FAILED" - SystemInstanceDeploymentStatusPending_delete SystemInstanceDeploymentStatus = "PENDING_DELETE" - SystemInstanceDeploymentStatusDeleted_in_target SystemInstanceDeploymentStatus = "DELETED_IN_TARGET" + SystemInstanceDeploymentStatusNotDeployed SystemInstanceDeploymentStatus = "NOT_DEPLOYED" + SystemInstanceDeploymentStatusBootstrap SystemInstanceDeploymentStatus = "BOOTSTRAP" + SystemInstanceDeploymentStatusDeployInProgress SystemInstanceDeploymentStatus = "DEPLOY_IN_PROGRESS" + SystemInstanceDeploymentStatusDeployedInTarget SystemInstanceDeploymentStatus = "DEPLOYED_IN_TARGET" + SystemInstanceDeploymentStatusUndeployInProgress SystemInstanceDeploymentStatus = "UNDEPLOY_IN_PROGRESS" + SystemInstanceDeploymentStatusFailed SystemInstanceDeploymentStatus = "FAILED" + SystemInstanceDeploymentStatusPendingDelete SystemInstanceDeploymentStatus = "PENDING_DELETE" + SystemInstanceDeploymentStatusDeletedInTarget SystemInstanceDeploymentStatus = "DELETED_IN_TARGET" ) // Values returns all known values for SystemInstanceDeploymentStatus. Note that @@ -250,9 +250,9 @@ type SystemInstanceFilterName string // Enum values for SystemInstanceFilterName const ( - SystemInstanceFilterNameSystem_template_id SystemInstanceFilterName = "SYSTEM_TEMPLATE_ID" - SystemInstanceFilterNameStatus SystemInstanceFilterName = "STATUS" - SystemInstanceFilterNameGreengrass_group_name SystemInstanceFilterName = "GREENGRASS_GROUP_NAME" + SystemInstanceFilterNameSystemTemplateId SystemInstanceFilterName = "SYSTEM_TEMPLATE_ID" + SystemInstanceFilterNameStatus SystemInstanceFilterName = "STATUS" + SystemInstanceFilterNameGreengrassGroupName SystemInstanceFilterName = "GREENGRASS_GROUP_NAME" ) // Values returns all known values for SystemInstanceFilterName. Note that this can @@ -270,7 +270,7 @@ type SystemTemplateFilterName string // Enum values for SystemTemplateFilterName const ( - SystemTemplateFilterNameFlow_template_id SystemTemplateFilterName = "FLOW_TEMPLATE_ID" + SystemTemplateFilterNameFlowTemplateId SystemTemplateFilterName = "FLOW_TEMPLATE_ID" ) // Values returns all known values for SystemTemplateFilterName. Note that this can @@ -286,9 +286,9 @@ type UploadStatus string // Enum values for UploadStatus const ( - UploadStatusIn_progress UploadStatus = "IN_PROGRESS" - UploadStatusSucceeded UploadStatus = "SUCCEEDED" - UploadStatusFailed UploadStatus = "FAILED" + UploadStatusInProgress UploadStatus = "IN_PROGRESS" + UploadStatusSucceeded UploadStatus = "SUCCEEDED" + UploadStatusFailed UploadStatus = "FAILED" ) // Values returns all known values for UploadStatus. Note that this can be expanded diff --git a/service/ivs/api_op_CreateChannel.go b/service/ivs/api_op_CreateChannel.go index 55f6c0997d0..dcbb1a181c3 100644 --- a/service/ivs/api_op_CreateChannel.go +++ b/service/ivs/api_op_CreateChannel.go @@ -45,14 +45,14 @@ type CreateChannelInput struct { // exceed the allowable resolution or bitrate, the stream probably will disconnect // immediately. Valid values: // - // * STANDARD: Multiple qualities are generated - // from the original input, to automatically give viewers the best experience for - // their devices and network conditions. Vertical resolution can be up to 1080 and + // * STANDARD: Multiple qualities are generated from + // the original input, to automatically give viewers the best experience for their + // devices and network conditions. Vertical resolution can be up to 1080 and // bitrate can be up to 8.5 Mbps. // - // * BASIC: Amazon IVS delivers the original - // input to viewers. The viewer’s video-quality choice is limited to the original - // input. Vertical resolution can be up to 480 and bitrate can be up to 1.5 + // * BASIC: Amazon IVS delivers the original input + // to viewers. The viewer’s video-quality choice is limited to the original input. + // Vertical resolution can be up to 480 and bitrate can be up to 1.5 // Mbps. // // Default: STANDARD. diff --git a/service/ivs/api_op_UpdateChannel.go b/service/ivs/api_op_UpdateChannel.go index 365c7d05b1a..e5ca5b3a472 100644 --- a/service/ivs/api_op_UpdateChannel.go +++ b/service/ivs/api_op_UpdateChannel.go @@ -49,14 +49,14 @@ type UpdateChannelInput struct { // exceed the allowable resolution or bitrate, the stream probably will disconnect // immediately. Valid values: // - // * STANDARD: Multiple qualities are generated - // from the original input, to automatically give viewers the best experience for - // their devices and network conditions. Vertical resolution can be up to 1080 and + // * STANDARD: Multiple qualities are generated from + // the original input, to automatically give viewers the best experience for their + // devices and network conditions. Vertical resolution can be up to 1080 and // bitrate can be up to 8.5 Mbps. // - // * BASIC: Amazon IVS delivers the original - // input to viewers. The viewer’s video-quality choice is limited to the original - // input. Vertical resolution can be up to 480 and bitrate can be up to 1.5 + // * BASIC: Amazon IVS delivers the original input + // to viewers. The viewer’s video-quality choice is limited to the original input. + // Vertical resolution can be up to 480 and bitrate can be up to 1.5 // Mbps. // // Default: STANDARD. diff --git a/service/ivs/doc.go b/service/ivs/doc.go index b72badac581..96975c0c907 100644 --- a/service/ivs/doc.go +++ b/service/ivs/doc.go @@ -14,61 +14,59 @@ // Endpoints The following are the Amazon IVS service endpoints (all HTTPS): Region // name: US West (Oregon) // -// * Region: us-west-2 +// * Region: us-west-2 // -// * Endpoint: +// * Endpoint: // ivs.us-west-2.amazonaws.com // // Region name: US East (Virginia) // -// * Region: +// * Region: // us-east-1 // -// * Endpoint: ivs.us-east-1.amazonaws.com +// * Endpoint: ivs.us-east-1.amazonaws.com // // Region name: EU West // (Dublin) // -// * Region: eu-west-1 +// * Region: eu-west-1 // -// * Endpoint: -// ivs.eu-west-1.amazonaws.com +// * Endpoint: ivs.eu-west-1.amazonaws.com // -// Allowed Header Values +// Allowed +// Header Values // -// * Accept: -// application/json +// * Accept: application/json // -// * Accept-Encoding: gzip, deflate +// * Accept-Encoding: gzip, deflate // -// * Content-Type: -// application/json +// * +// Content-Type: application/json // -// Resources The following resources contain information about -// your IVS live stream (see Getting Started with Amazon IVS +// Resources The following resources contain +// information about your IVS live stream (see Getting Started with Amazon IVS // (https://docs.aws.amazon.com/ivs/latest/userguide/GSIVS.html)): // -// * Channel — +// * Channel — // Stores configuration data related to your live stream. You first create a // channel and then use the channel’s stream key to start your live stream. See the // Channel endpoints for more information. // -// * Stream key — An identifier -// assigned by Amazon IVS when you create a channel, which is then used to -// authorize streaming. See the StreamKey endpoints for more information. Treat the -// stream key like a secret, since it allows anyone to stream to the channel. -// -// -// * Playback key pair — Video playback may be restricted using -// playback-authorization tokens, which use public-key encryption. A playback key -// pair is the public-private pair of keys used to sign and validate the -// playback-authorization token. See the PlaybackKeyPair endpoints for more -// information. -// -// Tagging A tag is a metadata label that you assign to an AWS -// resource. A tag comprises a key and a value, both set by you. For example, you -// might set a tag as topic:nature to label a particular video category. See -// Tagging AWS Resources +// * Stream key — An identifier assigned +// by Amazon IVS when you create a channel, which is then used to authorize +// streaming. See the StreamKey endpoints for more information. Treat the stream +// key like a secret, since it allows anyone to stream to the channel. +// +// * Playback +// key pair — Video playback may be restricted using playback-authorization tokens, +// which use public-key encryption. A playback key pair is the public-private pair +// of keys used to sign and validate the playback-authorization token. See the +// PlaybackKeyPair endpoints for more information. +// +// Tagging A tag is a metadata +// label that you assign to an AWS resource. A tag comprises a key and a value, +// both set by you. For example, you might set a tag as topic:nature to label a +// particular video category. See Tagging AWS Resources // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) for more // information, including restrictions that apply to tags. Tags can help you // identify and organize your AWS resources. For example, you can use the same tag @@ -79,92 +77,91 @@ // ListTagsForResource. The following resources support tagging: Channels, Stream // Keys, and Playback Key Pairs. Channel Endpoints // -// * CreateChannel — Creates a -// new channel and an associated stream key to start streaming. +// * CreateChannel — Creates a new +// channel and an associated stream key to start streaming. // -// * GetChannel — -// Gets the channel configuration for the specified channel ARN (Amazon Resource +// * GetChannel — Gets +// the channel configuration for the specified channel ARN (Amazon Resource // Name). // -// * BatchGetChannel — Performs GetChannel on multiple ARNs +// * BatchGetChannel — Performs GetChannel on multiple ARNs // simultaneously. // -// * ListChannels — Gets summary information about all -// channels in your account, in the AWS region where the API request is processed. -// This list can be filtered to match a specified string. +// * ListChannels — Gets summary information about all channels in +// your account, in the AWS region where the API request is processed. This list +// can be filtered to match a specified string. // -// * UpdateChannel — -// Updates a channel's configuration. This does not affect an ongoing stream of -// this channel. You must stop and restart the stream for the changes to take -// effect. +// * UpdateChannel — Updates a +// channel's configuration. This does not affect an ongoing stream of this channel. +// You must stop and restart the stream for the changes to take effect. // -// * DeleteChannel — Deletes the specified channel. +// * +// DeleteChannel — Deletes the specified channel. // -// StreamKey -// Endpoints +// StreamKey Endpoints // -// * CreateStreamKey — Creates a stream key, used to initiate a -// stream, for the specified channel ARN. +// * +// CreateStreamKey — Creates a stream key, used to initiate a stream, for the +// specified channel ARN. // -// * GetStreamKey — Gets stream key -// information for the specified ARN. +// * GetStreamKey — Gets stream key information for the +// specified ARN. // -// * BatchGetStreamKey — Performs -// GetStreamKey on multiple ARNs simultaneously. +// * BatchGetStreamKey — Performs GetStreamKey on multiple ARNs +// simultaneously. // -// * ListStreamKeys — Gets -// summary information about stream keys for the specified channel. +// * ListStreamKeys — Gets summary information about stream keys +// for the specified channel. // -// * -// DeleteStreamKey — Deletes the stream key for the specified ARN, so it can no -// longer be used to stream. +// * DeleteStreamKey — Deletes the stream key for the +// specified ARN, so it can no longer be used to stream. // // Stream Endpoints // -// * GetStream — Gets information -// about the active (live) stream on a specified channel. +// * +// GetStream — Gets information about the active (live) stream on a specified +// channel. // -// * ListStreams — Gets -// summary information about live streams in your account, in the AWS region where -// the API request is processed. +// * ListStreams — Gets summary information about live streams in your +// account, in the AWS region where the API request is processed. // -// * StopStream — Disconnects the incoming RTMPS -// stream for the specified channel. Can be used in conjunction with -// DeleteStreamKey to prevent further streaming to a channel. +// * StopStream — +// Disconnects the incoming RTMPS stream for the specified channel. Can be used in +// conjunction with DeleteStreamKey to prevent further streaming to a channel. // -// * PutMetadata — -// Inserts metadata into an RTMPS stream for the specified channel. A maximum of 5 -// requests per second per channel is allowed, each with a maximum 1KB +// * +// PutMetadata — Inserts metadata into an RTMPS stream for the specified channel. A +// maximum of 5 requests per second per channel is allowed, each with a maximum 1KB // payload. // // PlaybackKeyPair Endpoints // -// * ImportPlaybackKeyPair — Imports the +// * ImportPlaybackKeyPair — Imports the // public portion of a new key pair and returns its arn and fingerprint. The // privateKey can then be used to generate viewer authorization tokens, to grant // viewers access to authorized channels. // -// * GetPlaybackKeyPair — Gets a -// specified playback authorization key pair and returns the arn and fingerprint. -// The privateKey held by the caller can be used to generate viewer authorization +// * GetPlaybackKeyPair — Gets a specified +// playback authorization key pair and returns the arn and fingerprint. The +// privateKey held by the caller can be used to generate viewer authorization // tokens, to grant viewers access to authorized channels. // -// * -// ListPlaybackKeyPairs — Gets summary information about playback key pairs. +// * ListPlaybackKeyPairs +// — Gets summary information about playback key pairs. // -// * -// DeletePlaybackKeyPair — Deletes a specified authorization key pair. This -// invalidates future viewer tokens generated using the key pair’s privateKey. +// * DeletePlaybackKeyPair — +// Deletes a specified authorization key pair. This invalidates future viewer +// tokens generated using the key pair’s privateKey. // -// AWS -// Tags Endpoints +// AWS Tags Endpoints // -// * TagResource — Adds or updates tags for the AWS resource -// with the specified ARN. +// * +// TagResource — Adds or updates tags for the AWS resource with the specified +// ARN. // -// * UntagResource — Removes tags from the resource -// with the specified ARN. +// * UntagResource — Removes tags from the resource with the specified +// ARN. // -// * ListTagsForResource — Gets information about AWS -// tags for the specified ARN. +// * ListTagsForResource — Gets information about AWS tags for the specified +// ARN. package ivs diff --git a/service/ivs/types/types.go b/service/ivs/types/types.go index a3ecde67798..86837f44424 100644 --- a/service/ivs/types/types.go +++ b/service/ivs/types/types.go @@ -48,14 +48,14 @@ type Channel struct { // exceed the allowable resolution or bitrate, the stream probably will disconnect // immediately. Valid values: // - // * STANDARD: Multiple qualities are generated - // from the original input, to automatically give viewers the best experience for - // their devices and network conditions. Vertical resolution can be up to 1080 and + // * STANDARD: Multiple qualities are generated from + // the original input, to automatically give viewers the best experience for their + // devices and network conditions. Vertical resolution can be up to 1080 and // bitrate can be up to 8.5 Mbps. // - // * BASIC: Amazon IVS delivers the original - // input to viewers. The viewer’s video-quality choice is limited to the original - // input. Vertical resolution can be up to 480 and bitrate can be up to 1.5 + // * BASIC: Amazon IVS delivers the original input + // to viewers. The viewer’s video-quality choice is limited to the original input. + // Vertical resolution can be up to 480 and bitrate can be up to 1.5 // Mbps. // // Default: STANDARD. diff --git a/service/kafka/api_op_UntagResource.go b/service/kafka/api_op_UntagResource.go index 3f8406b3965..0606326f8d3 100644 --- a/service/kafka/api_op_UntagResource.go +++ b/service/kafka/api_op_UntagResource.go @@ -37,20 +37,19 @@ type UntagResourceInput struct { // Tag keys must be unique for a given cluster. In addition, the following // restrictions apply: // - // * Each tag key must be unique. If you add a tag with a - // key that's already in use, your new tag overwrites the existing key-value - // pair. + // * Each tag key must be unique. If you add a tag with a key + // that's already in use, your new tag overwrites the existing key-value pair. // - // * You can't start a tag key with aws: because this prefix is reserved - // for use by AWS. AWS creates tags that begin with this prefix on your behalf, but - // you can't edit or delete them. + // * + // You can't start a tag key with aws: because this prefix is reserved for use by + // AWS. AWS creates tags that begin with this prefix on your behalf, but you can't + // edit or delete them. // - // * Tag keys must be between 1 and 128 Unicode - // characters in length. + // * Tag keys must be between 1 and 128 Unicode characters in + // length. // - // * Tag keys must consist of the following characters: - // Unicode letters, digits, white space, and the following special characters: _ . - // / = + - @. + // * Tag keys must consist of the following characters: Unicode letters, + // digits, white space, and the following special characters: _ . / = + - @. // // This member is required. TagKeys []*string diff --git a/service/kafka/types/enums.go b/service/kafka/types/enums.go index c4d9a5b0c24..ef90d705b2e 100644 --- a/service/kafka/types/enums.go +++ b/service/kafka/types/enums.go @@ -22,9 +22,9 @@ type ClientBroker string // Enum values for ClientBroker const ( - ClientBrokerTls ClientBroker = "TLS" - ClientBrokerTls_plaintext ClientBroker = "TLS_PLAINTEXT" - ClientBrokerPlaintext ClientBroker = "PLAINTEXT" + ClientBrokerTls ClientBroker = "TLS" + ClientBrokerTlsPlaintext ClientBroker = "TLS_PLAINTEXT" + ClientBrokerPlaintext ClientBroker = "PLAINTEXT" ) // Values returns all known values for ClientBroker. Note that this can be expanded @@ -66,9 +66,9 @@ type ConfigurationState string // Enum values for ConfigurationState const ( - ConfigurationStateActive ConfigurationState = "ACTIVE" - ConfigurationStateDeleting ConfigurationState = "DELETING" - ConfigurationStateDelete_failed ConfigurationState = "DELETE_FAILED" + ConfigurationStateActive ConfigurationState = "ACTIVE" + ConfigurationStateDeleting ConfigurationState = "DELETING" + ConfigurationStateDeleteFailed ConfigurationState = "DELETE_FAILED" ) // Values returns all known values for ConfigurationState. Note that this can be @@ -86,9 +86,9 @@ type EnhancedMonitoring string // Enum values for EnhancedMonitoring const ( - EnhancedMonitoringDefault EnhancedMonitoring = "DEFAULT" - EnhancedMonitoringPer_broker EnhancedMonitoring = "PER_BROKER" - EnhancedMonitoringPer_topic_per_broker EnhancedMonitoring = "PER_TOPIC_PER_BROKER" + EnhancedMonitoringDefault EnhancedMonitoring = "DEFAULT" + EnhancedMonitoringPerBroker EnhancedMonitoring = "PER_BROKER" + EnhancedMonitoringPerTopicPerBroker EnhancedMonitoring = "PER_TOPIC_PER_BROKER" ) // Values returns all known values for EnhancedMonitoring. Note that this can be diff --git a/service/kendra/api_op_BatchPutDocument.go b/service/kendra/api_op_BatchPutDocument.go index acdd308fc49..533518ead42 100644 --- a/service/kendra/api_op_BatchPutDocument.go +++ b/service/kendra/api_op_BatchPutDocument.go @@ -39,12 +39,12 @@ type BatchPutDocumentInput struct { // One or more documents to add to the index. Documents have the following file // size limits. // - // * 5 MB total size for inline documents + // * 5 MB total size for inline documents // - // * 50 MB total size - // for files from an S3 bucket + // * 50 MB total size for + // files from an S3 bucket // - // * 5 MB extracted text for any file + // * 5 MB extracted text for any file // // For more // information about file size and transaction per second quotas, see Quotas diff --git a/service/kendra/api_op_Query.go b/service/kendra/api_op_Query.go index 093d4a43c5b..c521320ebea 100644 --- a/service/kendra/api_op_Query.go +++ b/service/kendra/api_op_Query.go @@ -18,17 +18,16 @@ import ( // Kendra searches your index for text content and question and answer (FAQ) // content. By default the response contains three types of results. // -// * -// Relevant passages +// * Relevant +// passages // -// * Matching FAQs +// * Matching FAQs // -// * Relevant documents +// * Relevant documents // -// You can -// specify that the query return only one type of result using the -// QueryResultTypeConfig parameter. Each query returns the 100 most relevant -// results. +// You can specify that the query +// return only one type of result using the QueryResultTypeConfig parameter. Each +// query returns the 100 most relevant results. func (c *Client) Query(ctx context.Context, params *QueryInput, optFns ...func(*Options)) (*QueryOutput, error) { if params == nil { params = &QueryInput{} diff --git a/service/kendra/types/enums.go b/service/kendra/types/enums.go index eddbab717e5..33d278dac77 100644 --- a/service/kendra/types/enums.go +++ b/service/kendra/types/enums.go @@ -6,7 +6,7 @@ type AdditionalResultAttributeValueType string // Enum values for AdditionalResultAttributeValueType const ( - AdditionalResultAttributeValueTypeText_with_highlights_value AdditionalResultAttributeValueType = "TEXT_WITH_HIGHLIGHTS_VALUE" + AdditionalResultAttributeValueTypeTextWithHighlightsValue AdditionalResultAttributeValueType = "TEXT_WITH_HIGHLIGHTS_VALUE" ) // Values returns all known values for AdditionalResultAttributeValueType. Note @@ -23,11 +23,11 @@ type ContentType string // Enum values for ContentType const ( - ContentTypePdf ContentType = "PDF" - ContentTypeHtml ContentType = "HTML" - ContentTypeMs_word ContentType = "MS_WORD" - ContentTypePlain_text ContentType = "PLAIN_TEXT" - ContentTypePpt ContentType = "PPT" + ContentTypePdf ContentType = "PDF" + ContentTypeHtml ContentType = "HTML" + ContentTypeMsWord ContentType = "MS_WORD" + ContentTypePlainText ContentType = "PLAIN_TEXT" + ContentTypePpt ContentType = "PPT" ) // Values returns all known values for ContentType. Note that this can be expanded @@ -47,10 +47,10 @@ type DatabaseEngineType string // Enum values for DatabaseEngineType const ( - DatabaseEngineTypeRds_aurora_mysql DatabaseEngineType = "RDS_AURORA_MYSQL" - DatabaseEngineTypeRds_aurora_postgresql DatabaseEngineType = "RDS_AURORA_POSTGRESQL" - DatabaseEngineTypeRds_mysql DatabaseEngineType = "RDS_MYSQL" - DatabaseEngineTypeRds_postgresql DatabaseEngineType = "RDS_POSTGRESQL" + DatabaseEngineTypeRdsAuroraMysql DatabaseEngineType = "RDS_AURORA_MYSQL" + DatabaseEngineTypeRdsAuroraPostgresql DatabaseEngineType = "RDS_AURORA_POSTGRESQL" + DatabaseEngineTypeRdsMysql DatabaseEngineType = "RDS_MYSQL" + DatabaseEngineTypeRdsPostgresql DatabaseEngineType = "RDS_POSTGRESQL" ) // Values returns all known values for DatabaseEngineType. Note that this can be @@ -93,13 +93,13 @@ type DataSourceSyncJobStatus string // Enum values for DataSourceSyncJobStatus const ( - DataSourceSyncJobStatusFailed DataSourceSyncJobStatus = "FAILED" - DataSourceSyncJobStatusSucceeded DataSourceSyncJobStatus = "SUCCEEDED" - DataSourceSyncJobStatusSyncing DataSourceSyncJobStatus = "SYNCING" - DataSourceSyncJobStatusIncomplete DataSourceSyncJobStatus = "INCOMPLETE" - DataSourceSyncJobStatusStopping DataSourceSyncJobStatus = "STOPPING" - DataSourceSyncJobStatusAborted DataSourceSyncJobStatus = "ABORTED" - DataSourceSyncJobStatusSyncing_indexing DataSourceSyncJobStatus = "SYNCING_INDEXING" + DataSourceSyncJobStatusFailed DataSourceSyncJobStatus = "FAILED" + DataSourceSyncJobStatusSucceeded DataSourceSyncJobStatus = "SUCCEEDED" + DataSourceSyncJobStatusSyncing DataSourceSyncJobStatus = "SYNCING" + DataSourceSyncJobStatusIncomplete DataSourceSyncJobStatus = "INCOMPLETE" + DataSourceSyncJobStatusStopping DataSourceSyncJobStatus = "STOPPING" + DataSourceSyncJobStatusAborted DataSourceSyncJobStatus = "ABORTED" + DataSourceSyncJobStatusSyncingIndexing DataSourceSyncJobStatus = "SYNCING_INDEXING" ) // Values returns all known values for DataSourceSyncJobStatus. Note that this can @@ -149,10 +149,10 @@ type DocumentAttributeValueType string // Enum values for DocumentAttributeValueType const ( - DocumentAttributeValueTypeString_value DocumentAttributeValueType = "STRING_VALUE" - DocumentAttributeValueTypeString_list_value DocumentAttributeValueType = "STRING_LIST_VALUE" - DocumentAttributeValueTypeLong_value DocumentAttributeValueType = "LONG_VALUE" - DocumentAttributeValueTypeDate_value DocumentAttributeValueType = "DATE_VALUE" + DocumentAttributeValueTypeStringValue DocumentAttributeValueType = "STRING_VALUE" + DocumentAttributeValueTypeStringListValue DocumentAttributeValueType = "STRING_LIST_VALUE" + DocumentAttributeValueTypeLongValue DocumentAttributeValueType = "LONG_VALUE" + DocumentAttributeValueTypeDateValue DocumentAttributeValueType = "DATE_VALUE" ) // Values returns all known values for DocumentAttributeValueType. Note that this @@ -171,8 +171,8 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeInternal_error ErrorCode = "InternalError" - ErrorCodeInvalid_request ErrorCode = "InvalidRequest" + ErrorCodeInternalError ErrorCode = "InternalError" + ErrorCodeInvalidRequest ErrorCode = "InvalidRequest" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -189,9 +189,9 @@ type FaqFileFormat string // Enum values for FaqFileFormat const ( - FaqFileFormatCsv FaqFileFormat = "CSV" - FaqFileFormatCsv_with_header FaqFileFormat = "CSV_WITH_HEADER" - FaqFileFormatJson FaqFileFormat = "JSON" + FaqFileFormatCsv FaqFileFormat = "CSV" + FaqFileFormatCsvWithHeader FaqFileFormat = "CSV_WITH_HEADER" + FaqFileFormatJson FaqFileFormat = "JSON" ) // Values returns all known values for FaqFileFormat. Note that this can be @@ -233,8 +233,8 @@ type IndexEdition string // Enum values for IndexEdition const ( - IndexEditionDeveloper_edition IndexEdition = "DEVELOPER_EDITION" - IndexEditionEnterprise_edition IndexEdition = "ENTERPRISE_EDITION" + IndexEditionDeveloperEdition IndexEdition = "DEVELOPER_EDITION" + IndexEditionEnterpriseEdition IndexEdition = "ENTERPRISE_EDITION" ) // Values returns all known values for IndexEdition. Note that this can be expanded @@ -251,12 +251,12 @@ type IndexStatus string // Enum values for IndexStatus const ( - IndexStatusCreating IndexStatus = "CREATING" - IndexStatusActive IndexStatus = "ACTIVE" - IndexStatusDeleting IndexStatus = "DELETING" - IndexStatusFailed IndexStatus = "FAILED" - IndexStatusUpdating IndexStatus = "UPDATING" - IndexStatusSystem_updating IndexStatus = "SYSTEM_UPDATING" + IndexStatusCreating IndexStatus = "CREATING" + IndexStatusActive IndexStatus = "ACTIVE" + IndexStatusDeleting IndexStatus = "DELETING" + IndexStatusFailed IndexStatus = "FAILED" + IndexStatusUpdating IndexStatus = "UPDATING" + IndexStatusSystemUpdating IndexStatus = "SYSTEM_UPDATING" ) // Values returns all known values for IndexStatus. Note that this can be expanded @@ -313,8 +313,8 @@ type QueryIdentifiersEnclosingOption string // Enum values for QueryIdentifiersEnclosingOption const ( - QueryIdentifiersEnclosingOptionDouble_quotes QueryIdentifiersEnclosingOption = "DOUBLE_QUOTES" - QueryIdentifiersEnclosingOptionNone QueryIdentifiersEnclosingOption = "NONE" + QueryIdentifiersEnclosingOptionDoubleQuotes QueryIdentifiersEnclosingOption = "DOUBLE_QUOTES" + QueryIdentifiersEnclosingOptionNone QueryIdentifiersEnclosingOption = "NONE" ) // Values returns all known values for QueryIdentifiersEnclosingOption. Note that @@ -332,9 +332,9 @@ type QueryResultType string // Enum values for QueryResultType const ( - QueryResultTypeDocument QueryResultType = "DOCUMENT" - QueryResultTypeQuestion_answer QueryResultType = "QUESTION_ANSWER" - QueryResultTypeAnswer QueryResultType = "ANSWER" + QueryResultTypeDocument QueryResultType = "DOCUMENT" + QueryResultTypeQuestionAnswer QueryResultType = "QUESTION_ANSWER" + QueryResultTypeAnswer QueryResultType = "ANSWER" ) // Values returns all known values for QueryResultType. Note that this can be @@ -370,8 +370,8 @@ type RelevanceType string // Enum values for RelevanceType const ( - RelevanceTypeRelevant RelevanceType = "RELEVANT" - RelevanceTypeNot_relevant RelevanceType = "NOT_RELEVANT" + RelevanceTypeRelevant RelevanceType = "RELEVANT" + RelevanceTypeNotRelevant RelevanceType = "NOT_RELEVANT" ) // Values returns all known values for RelevanceType. Note that this can be @@ -388,8 +388,8 @@ type SalesforceChatterFeedIncludeFilterType string // Enum values for SalesforceChatterFeedIncludeFilterType const ( - SalesforceChatterFeedIncludeFilterTypeActive_user SalesforceChatterFeedIncludeFilterType = "ACTIVE_USER" - SalesforceChatterFeedIncludeFilterTypeStandard_user SalesforceChatterFeedIncludeFilterType = "STANDARD_USER" + SalesforceChatterFeedIncludeFilterTypeActiveUser SalesforceChatterFeedIncludeFilterType = "ACTIVE_USER" + SalesforceChatterFeedIncludeFilterTypeStandardUser SalesforceChatterFeedIncludeFilterType = "STANDARD_USER" ) // Values returns all known values for SalesforceChatterFeedIncludeFilterType. Note @@ -476,10 +476,10 @@ type ScoreConfidence string // Enum values for ScoreConfidence const ( - ScoreConfidenceVery_high ScoreConfidence = "VERY_HIGH" - ScoreConfidenceHigh ScoreConfidence = "HIGH" - ScoreConfidenceMedium ScoreConfidence = "MEDIUM" - ScoreConfidenceLow ScoreConfidence = "LOW" + ScoreConfidenceVeryHigh ScoreConfidence = "VERY_HIGH" + ScoreConfidenceHigh ScoreConfidence = "HIGH" + ScoreConfidenceMedium ScoreConfidence = "MEDIUM" + ScoreConfidenceLow ScoreConfidence = "LOW" ) // Values returns all known values for ScoreConfidence. Note that this can be @@ -516,7 +516,7 @@ type SharePointVersion string // Enum values for SharePointVersion const ( - SharePointVersionSharepoint_online SharePointVersion = "SHAREPOINT_ONLINE" + SharePointVersionSharepointOnline SharePointVersion = "SHAREPOINT_ONLINE" ) // Values returns all known values for SharePointVersion. Note that this can be diff --git a/service/kendra/types/types.go b/service/kendra/types/types.go index 8bf8dbdf873..1ac28c03b0c 100644 --- a/service/kendra/types/types.go +++ b/service/kendra/types/types.go @@ -895,25 +895,25 @@ type SalesforceConfiguration struct { // the key/value pairs required to connect to your Salesforce instance. The secret // must contain a JSON structure with the following keys: // - // * authenticationUrl - // - The OAUTH endpoint that Amazon Kendra connects to get an OAUTH token. + // * authenticationUrl - + // The OAUTH endpoint that Amazon Kendra connects to get an OAUTH token. // - // * + // * // consumerKey - The application public key generated when you created your // Salesforce application. // - // * consumerSecret - The application private key + // * consumerSecret - The application private key // generated when you created your Salesforce application. // - // * password - The + // * password - The // password associated with the user logging in to the Salesforce instance. // - // * + // * // securityToken - The token associated with the user account logging in to the // Salesforce instance. // - // * username - The user name of the user logging in to - // the Salesforce instance. + // * username - The user name of the user logging in to the + // Salesforce instance. // // This member is required. SecretArn *string @@ -1255,33 +1255,33 @@ type SharePointConfiguration struct { // the Sortable flag set to true, otherwise Amazon Kendra returns an exception. You // can sort attributes of the following types. // -// * Date value +// * Date value // -// * Long -// value +// * Long value // -// * String value +// * +// String value // // You can't sort attributes of the following type. // -// -// * String list value +// * String list +// value type SortingConfiguration struct { // The name of the document attribute used to sort the response. You can use any // field that has the Sortable flag set to true. You can also sort by any of the // following built-in attributes: // - // * _category + // * _category // - // * _created_at + // * _created_at // - // * + // * // _last_updated_at // - // * _version + // * _version // - // * _view_count + // * _view_count // // This member is required. DocumentAttributeKey *string diff --git a/service/kinesis/api_op_CreateStream.go b/service/kinesis/api_op_CreateStream.go index f4263c2a139..63e38136a40 100644 --- a/service/kinesis/api_op_CreateStream.go +++ b/service/kinesis/api_op_CreateStream.go @@ -29,14 +29,14 @@ import ( // operations only on an ACTIVE stream. You receive a LimitExceededException when // making a CreateStream request when you try to do one of the following: // -// * -// Have more than five streams in the CREATING state at any point in time. +// * Have +// more than five streams in the CREATING state at any point in time. // -// * -// Create more shards than are authorized for your account. +// * Create +// more shards than are authorized for your account. // -// For the default shard -// limit for an AWS account, see Amazon Kinesis Data Streams Limits +// For the default shard limit +// for an AWS account, see Amazon Kinesis Data Streams Limits // (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, // contact AWS Support diff --git a/service/kinesis/api_op_DisableEnhancedMonitoring.go b/service/kinesis/api_op_DisableEnhancedMonitoring.go index be1ada3c5cc..a42563879a7 100644 --- a/service/kinesis/api_op_DisableEnhancedMonitoring.go +++ b/service/kinesis/api_op_DisableEnhancedMonitoring.go @@ -33,27 +33,27 @@ type DisableEnhancedMonitoringInput struct { // List of shard-level metrics to disable. The following are the valid shard-level // metrics. The value "ALL" disables every metric. // - // * IncomingBytes + // * IncomingBytes // - // * + // * // IncomingRecords // - // * OutgoingBytes + // * OutgoingBytes // - // * OutgoingRecords + // * OutgoingRecords // - // * + // * // WriteProvisionedThroughputExceeded // - // * ReadProvisionedThroughputExceeded + // * ReadProvisionedThroughputExceeded // + // * + // IteratorAgeMilliseconds // - // * IteratorAgeMilliseconds + // * ALL // - // * ALL - // - // For more information, see Monitoring the - // Amazon Kinesis Data Streams Service with Amazon CloudWatch + // For more information, see Monitoring the Amazon + // Kinesis Data Streams Service with Amazon CloudWatch // (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) // in the Amazon Kinesis Data Streams Developer Guide. // diff --git a/service/kinesis/api_op_EnableEnhancedMonitoring.go b/service/kinesis/api_op_EnableEnhancedMonitoring.go index 130a682c31d..ab6d0bdfc83 100644 --- a/service/kinesis/api_op_EnableEnhancedMonitoring.go +++ b/service/kinesis/api_op_EnableEnhancedMonitoring.go @@ -33,27 +33,27 @@ type EnableEnhancedMonitoringInput struct { // List of shard-level metrics to enable. The following are the valid shard-level // metrics. The value "ALL" enables every metric. // - // * IncomingBytes + // * IncomingBytes // - // * + // * // IncomingRecords // - // * OutgoingBytes + // * OutgoingBytes // - // * OutgoingRecords + // * OutgoingRecords // - // * + // * // WriteProvisionedThroughputExceeded // - // * ReadProvisionedThroughputExceeded + // * ReadProvisionedThroughputExceeded // + // * + // IteratorAgeMilliseconds // - // * IteratorAgeMilliseconds + // * ALL // - // * ALL - // - // For more information, see Monitoring the - // Amazon Kinesis Data Streams Service with Amazon CloudWatch + // For more information, see Monitoring the Amazon + // Kinesis Data Streams Service with Amazon CloudWatch // (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) // in the Amazon Kinesis Data Streams Developer Guide. // diff --git a/service/kinesis/api_op_GetShardIterator.go b/service/kinesis/api_op_GetShardIterator.go index 506def3dffd..5b22373329a 100644 --- a/service/kinesis/api_op_GetShardIterator.go +++ b/service/kinesis/api_op_GetShardIterator.go @@ -68,24 +68,24 @@ type GetShardIteratorInput struct { // Determines how the shard iterator is used to start reading data records from the // shard. The following are the valid Amazon Kinesis shard iterator types: // - // * + // * // AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific // sequence number, provided in the value StartingSequenceNumber. // - // * + // * // AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a // specific sequence number, provided in the value StartingSequenceNumber. // - // * + // * // AT_TIMESTAMP - Start reading from the position denoted by a specific time stamp, // provided in the value Timestamp. // - // * TRIM_HORIZON - Start reading at the last + // * TRIM_HORIZON - Start reading at the last // untrimmed record in the shard in the system, which is the oldest data record in // the shard. // - // * LATEST - Start reading just after the most recent record in - // the shard, so that you always read the most recent data in the shard. + // * LATEST - Start reading just after the most recent record in the + // shard, so that you always read the most recent data in the shard. // // This member is required. ShardIteratorType types.ShardIteratorType diff --git a/service/kinesis/api_op_PutRecord.go b/service/kinesis/api_op_PutRecord.go index e921849e95f..6d9edae3db1 100644 --- a/service/kinesis/api_op_PutRecord.go +++ b/service/kinesis/api_op_PutRecord.go @@ -119,11 +119,11 @@ type PutRecordOutput struct { // The encryption type to use on the record. This parameter can be one of the // following values: // - // * NONE: Do not encrypt the records in the stream. + // * NONE: Do not encrypt the records in the stream. // - // * - // KMS: Use server-side encryption on the records in the stream using a - // customer-managed AWS KMS key. + // * KMS: Use + // server-side encryption on the records in the stream using a customer-managed AWS + // KMS key. EncryptionType types.EncryptionType // Metadata pertaining to the operation's result. diff --git a/service/kinesis/api_op_PutRecords.go b/service/kinesis/api_op_PutRecords.go index 555b2124d52..07811161802 100644 --- a/service/kinesis/api_op_PutRecords.go +++ b/service/kinesis/api_op_PutRecords.go @@ -107,10 +107,10 @@ type PutRecordsOutput struct { // The encryption type used on the records. This parameter can be one of the // following values: // - // * NONE: Do not encrypt the records. + // * NONE: Do not encrypt the records. // - // * KMS: Use - // server-side encryption on the records using a customer-managed AWS KMS key. + // * KMS: Use server-side + // encryption on the records using a customer-managed AWS KMS key. EncryptionType types.EncryptionType // The number of unsuccessfully processed records in a PutRecords request. diff --git a/service/kinesis/api_op_StartStreamEncryption.go b/service/kinesis/api_op_StartStreamEncryption.go index d41fd66d207..4601a0c2f03 100644 --- a/service/kinesis/api_op_StartStreamEncryption.go +++ b/service/kinesis/api_op_StartStreamEncryption.go @@ -53,20 +53,20 @@ type StartStreamEncryptionInput struct { // also use a master key owned by Kinesis Data Streams by specifying the alias // aws/kinesis. // - // * Key ARN example: + // * Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 // + // * + // Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName // - // * Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // - // * + // * // Globally unique key ID example: 12345678-1234-1234-1234-123456789012 // - // * - // Alias name example: alias/MyAliasName + // * Alias + // name example: alias/MyAliasName // - // * Master key owned by Kinesis Data - // Streams: alias/aws/kinesis + // * Master key owned by Kinesis Data Streams: + // alias/aws/kinesis // // This member is required. KeyId *string diff --git a/service/kinesis/api_op_StopStreamEncryption.go b/service/kinesis/api_op_StopStreamEncryption.go index 9a90b369f65..085617bb51f 100644 --- a/service/kinesis/api_op_StopStreamEncryption.go +++ b/service/kinesis/api_op_StopStreamEncryption.go @@ -52,20 +52,20 @@ type StopStreamEncryptionInput struct { // also use a master key owned by Kinesis Data Streams by specifying the alias // aws/kinesis. // - // * Key ARN example: + // * Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 // + // * + // Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName // - // * Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // - // * + // * // Globally unique key ID example: 12345678-1234-1234-1234-123456789012 // - // * - // Alias name example: alias/MyAliasName + // * Alias + // name example: alias/MyAliasName // - // * Master key owned by Kinesis Data - // Streams: alias/aws/kinesis + // * Master key owned by Kinesis Data Streams: + // alias/aws/kinesis // // This member is required. KeyId *string diff --git a/service/kinesis/api_op_UpdateShardCount.go b/service/kinesis/api_op_UpdateShardCount.go index 8cb507b2efc..ab8a770e58d 100644 --- a/service/kinesis/api_op_UpdateShardCount.go +++ b/service/kinesis/api_op_UpdateShardCount.go @@ -28,25 +28,24 @@ import ( // operation has the following default limits. By default, you cannot do the // following: // -// * Scale more than ten times per rolling 24-hour period per -// stream +// * Scale more than ten times per rolling 24-hour period per stream // -// * Scale up to more than double your current shard count for a -// stream +// * +// Scale up to more than double your current shard count for a stream // -// * Scale down below half your current shard count for a stream +// * Scale down +// below half your current shard count for a stream // -// * -// Scale up to more than 500 shards in a stream +// * Scale up to more than 500 +// shards in a stream // -// * Scale a stream with more -// than 500 shards down unless the result is less than 500 shards +// * Scale a stream with more than 500 shards down unless the +// result is less than 500 shards // -// * Scale up -// to more than the shard limit for your account +// * Scale up to more than the shard limit for your +// account // -// For the default limits for an AWS -// account, see Streams Limits +// For the default limits for an AWS account, see Streams Limits // (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide. To request an increase in // the call rate limit, the shard limit for this API, or your overall shard limit, @@ -82,18 +81,18 @@ type UpdateShardCountInput struct { // The new number of shards. This value has the following default limits. By // default, you cannot do the following: // - // * Set this value to more than double - // your current shard count for a stream. - // - // * Set this value below half your + // * Set this value to more than double your // current shard count for a stream. // - // * Set this value to more than 500 shards - // in a stream (the default limit for shard count per stream is 500 per account per - // region), unless you request a limit increase. + // * Set this value below half your current + // shard count for a stream. + // + // * Set this value to more than 500 shards in a stream + // (the default limit for shard count per stream is 500 per account per region), + // unless you request a limit increase. // - // * Scale a stream with more - // than 500 shards down unless you set this value to less than 500 shards. + // * Scale a stream with more than 500 shards + // down unless you set this value to less than 500 shards. // // This member is required. TargetShardCount *int32 diff --git a/service/kinesis/types/enums.go b/service/kinesis/types/enums.go index 2474aec6c8d..8ed6f8a3e75 100644 --- a/service/kinesis/types/enums.go +++ b/service/kinesis/types/enums.go @@ -44,14 +44,14 @@ type MetricsName string // Enum values for MetricsName const ( - MetricsNameIncoming_bytes MetricsName = "IncomingBytes" - MetricsNameIncoming_records MetricsName = "IncomingRecords" - MetricsNameOutgoing_bytes MetricsName = "OutgoingBytes" - MetricsNameOutgoing_records MetricsName = "OutgoingRecords" - MetricsNameWrite_provisioned_throughput_exceeded MetricsName = "WriteProvisionedThroughputExceeded" - MetricsNameRead_provisioned_throughput_exceeded MetricsName = "ReadProvisionedThroughputExceeded" - MetricsNameIterator_age_milliseconds MetricsName = "IteratorAgeMilliseconds" - MetricsNameAll MetricsName = "ALL" + MetricsNameIncomingBytes MetricsName = "IncomingBytes" + MetricsNameIncomingRecords MetricsName = "IncomingRecords" + MetricsNameOutgoingBytes MetricsName = "OutgoingBytes" + MetricsNameOutgoingRecords MetricsName = "OutgoingRecords" + MetricsNameWriteProvisionedThroughputExceeded MetricsName = "WriteProvisionedThroughputExceeded" + MetricsNameReadProvisionedThroughputExceeded MetricsName = "ReadProvisionedThroughputExceeded" + MetricsNameIteratorAgeMilliseconds MetricsName = "IteratorAgeMilliseconds" + MetricsNameAll MetricsName = "ALL" ) // Values returns all known values for MetricsName. Note that this can be expanded @@ -74,7 +74,7 @@ type ScalingType string // Enum values for ScalingType const ( - ScalingTypeUniform_scaling ScalingType = "UNIFORM_SCALING" + ScalingTypeUniformScaling ScalingType = "UNIFORM_SCALING" ) // Values returns all known values for ScalingType. Note that this can be expanded @@ -90,12 +90,12 @@ type ShardFilterType string // Enum values for ShardFilterType const ( - ShardFilterTypeAfter_shard_id ShardFilterType = "AFTER_SHARD_ID" - ShardFilterTypeAt_trim_horizon ShardFilterType = "AT_TRIM_HORIZON" - ShardFilterTypeFrom_trim_horizon ShardFilterType = "FROM_TRIM_HORIZON" - ShardFilterTypeAt_latest ShardFilterType = "AT_LATEST" - ShardFilterTypeAt_timestamp ShardFilterType = "AT_TIMESTAMP" - ShardFilterTypeFrom_timestamp ShardFilterType = "FROM_TIMESTAMP" + ShardFilterTypeAfterShardId ShardFilterType = "AFTER_SHARD_ID" + ShardFilterTypeAtTrimHorizon ShardFilterType = "AT_TRIM_HORIZON" + ShardFilterTypeFromTrimHorizon ShardFilterType = "FROM_TRIM_HORIZON" + ShardFilterTypeAtLatest ShardFilterType = "AT_LATEST" + ShardFilterTypeAtTimestamp ShardFilterType = "AT_TIMESTAMP" + ShardFilterTypeFromTimestamp ShardFilterType = "FROM_TIMESTAMP" ) // Values returns all known values for ShardFilterType. Note that this can be @@ -116,11 +116,11 @@ type ShardIteratorType string // Enum values for ShardIteratorType const ( - ShardIteratorTypeAt_sequence_number ShardIteratorType = "AT_SEQUENCE_NUMBER" - ShardIteratorTypeAfter_sequence_number ShardIteratorType = "AFTER_SEQUENCE_NUMBER" - ShardIteratorTypeTrim_horizon ShardIteratorType = "TRIM_HORIZON" - ShardIteratorTypeLatest ShardIteratorType = "LATEST" - ShardIteratorTypeAt_timestamp ShardIteratorType = "AT_TIMESTAMP" + ShardIteratorTypeAtSequenceNumber ShardIteratorType = "AT_SEQUENCE_NUMBER" + ShardIteratorTypeAfterSequenceNumber ShardIteratorType = "AFTER_SEQUENCE_NUMBER" + ShardIteratorTypeTrimHorizon ShardIteratorType = "TRIM_HORIZON" + ShardIteratorTypeLatest ShardIteratorType = "LATEST" + ShardIteratorTypeAtTimestamp ShardIteratorType = "AT_TIMESTAMP" ) // Values returns all known values for ShardIteratorType. Note that this can be diff --git a/service/kinesis/types/types.go b/service/kinesis/types/types.go index d8a480f0432..5d193b39b59 100644 --- a/service/kinesis/types/types.go +++ b/service/kinesis/types/types.go @@ -88,27 +88,27 @@ type EnhancedMetrics struct { // List of shard-level metrics. The following are the valid shard-level metrics. // The value "ALL" enhances every metric. // - // * IncomingBytes + // * IncomingBytes // - // * - // IncomingRecords + // * IncomingRecords // - // * OutgoingBytes + // * + // OutgoingBytes // - // * OutgoingRecords + // * OutgoingRecords // - // * - // WriteProvisionedThroughputExceeded - // - // * ReadProvisionedThroughputExceeded + // * WriteProvisionedThroughputExceeded // + // * + // ReadProvisionedThroughputExceeded // // * IteratorAgeMilliseconds // - // * ALL + // * ALL // - // For more information, see Monitoring the - // Amazon Kinesis Data Streams Service with Amazon CloudWatch + // For more + // information, see Monitoring the Amazon Kinesis Data Streams Service with Amazon + // CloudWatch // (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) // in the Amazon Kinesis Data Streams Developer Guide. ShardLevelMetrics []MetricsName @@ -209,11 +209,11 @@ type Record struct { // The encryption type used on the record. This parameter can be one of the // following values: // - // * NONE: Do not encrypt the records in the stream. + // * NONE: Do not encrypt the records in the stream. // - // * - // KMS: Use server-side encryption on the records in the stream using a - // customer-managed AWS KMS key. + // * KMS: Use + // server-side encryption on the records in the stream using a customer-managed AWS + // KMS key. EncryptionType EncryptionType } @@ -337,20 +337,20 @@ type StreamDescription struct { // The current status of the stream being described. The stream status is one of // the following states: // - // * CREATING - The stream is being created. Kinesis - // Data Streams immediately returns and sets StreamStatus to CREATING. + // * CREATING - The stream is being created. Kinesis Data + // Streams immediately returns and sets StreamStatus to CREATING. // - // * - // DELETING - The stream is being deleted. The specified stream is in the DELETING - // state until Kinesis Data Streams completes the deletion. + // * DELETING - The + // stream is being deleted. The specified stream is in the DELETING state until + // Kinesis Data Streams completes the deletion. // - // * ACTIVE - The - // stream exists and is ready for read and write operations or deletion. You should - // perform read and write operations only on an ACTIVE stream. + // * ACTIVE - The stream exists and + // is ready for read and write operations or deletion. You should perform read and + // write operations only on an ACTIVE stream. // - // * UPDATING - - // Shards in the stream are being merged or split. Read and write operations - // continue to work while the stream is in the UPDATING state. + // * UPDATING - Shards in the stream + // are being merged or split. Read and write operations continue to work while the + // stream is in the UPDATING state. // // This member is required. StreamStatus StreamStatus @@ -358,11 +358,11 @@ type StreamDescription struct { // The server-side encryption type used on the stream. This parameter can be one of // the following values: // - // * NONE: Do not encrypt the records in the stream. - // + // * NONE: Do not encrypt the records in the stream. // - // * KMS: Use server-side encryption on the records in the stream using a - // customer-managed AWS KMS key. + // * KMS: + // Use server-side encryption on the records in the stream using a customer-managed + // AWS KMS key. EncryptionType EncryptionType // The GUID for the customer-managed AWS KMS key to use for encryption. This value @@ -370,21 +370,20 @@ type StreamDescription struct { // a key, or an alias name prefixed by "alias/".You can also use a master key owned // by Kinesis Data Streams by specifying the alias aws/kinesis. // - // * Key ARN - // example: + // * Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 // + // * + // Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName // - // * Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // - // * + // * // Globally unique key ID example: 12345678-1234-1234-1234-123456789012 // - // * - // Alias name example: alias/MyAliasName + // * Alias + // name example: alias/MyAliasName // - // * Master key owned by Kinesis Data - // Streams: alias/aws/kinesis + // * Master key owned by Kinesis Data Streams: + // alias/aws/kinesis KeyId *string } @@ -424,20 +423,20 @@ type StreamDescriptionSummary struct { // The current status of the stream being described. The stream status is one of // the following states: // - // * CREATING - The stream is being created. Kinesis - // Data Streams immediately returns and sets StreamStatus to CREATING. + // * CREATING - The stream is being created. Kinesis Data + // Streams immediately returns and sets StreamStatus to CREATING. // - // * - // DELETING - The stream is being deleted. The specified stream is in the DELETING - // state until Kinesis Data Streams completes the deletion. + // * DELETING - The + // stream is being deleted. The specified stream is in the DELETING state until + // Kinesis Data Streams completes the deletion. // - // * ACTIVE - The - // stream exists and is ready for read and write operations or deletion. You should - // perform read and write operations only on an ACTIVE stream. + // * ACTIVE - The stream exists and + // is ready for read and write operations or deletion. You should perform read and + // write operations only on an ACTIVE stream. // - // * UPDATING - - // Shards in the stream are being merged or split. Read and write operations - // continue to work while the stream is in the UPDATING state. + // * UPDATING - Shards in the stream + // are being merged or split. Read and write operations continue to work while the + // stream is in the UPDATING state. // // This member is required. StreamStatus StreamStatus @@ -447,10 +446,9 @@ type StreamDescriptionSummary struct { // The encryption type used. This value is one of the following: // - // * KMS + // * KMS // - // * - // NONE + // * NONE EncryptionType EncryptionType // The GUID for the customer-managed AWS KMS key to use for encryption. This value @@ -458,21 +456,20 @@ type StreamDescriptionSummary struct { // a key, or an alias name prefixed by "alias/".You can also use a master key owned // by Kinesis Data Streams by specifying the alias aws/kinesis. // - // * Key ARN - // example: + // * Key ARN example: // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 // + // * + // Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName // - // * Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // - // - // * Globally unique key ID example: 12345678-1234-1234-1234-123456789012 + // * + // Globally unique key ID example: 12345678-1234-1234-1234-123456789012 // - // * - // Alias name example: alias/MyAliasName + // * Alias + // name example: alias/MyAliasName // - // * Master key owned by Kinesis Data - // Streams: alias/aws/kinesis + // * Master key owned by Kinesis Data Streams: + // alias/aws/kinesis KeyId *string } diff --git a/service/kinesisanalytics/types/enums.go b/service/kinesisanalytics/types/enums.go index ebffa49084b..dd15b6726c8 100644 --- a/service/kinesisanalytics/types/enums.go +++ b/service/kinesisanalytics/types/enums.go @@ -32,9 +32,9 @@ type InputStartingPosition string // Enum values for InputStartingPosition const ( - InputStartingPositionNow InputStartingPosition = "NOW" - InputStartingPositionTrim_horizon InputStartingPosition = "TRIM_HORIZON" - InputStartingPositionLast_stopped_point InputStartingPosition = "LAST_STOPPED_POINT" + InputStartingPositionNow InputStartingPosition = "NOW" + InputStartingPositionTrimHorizon InputStartingPosition = "TRIM_HORIZON" + InputStartingPositionLastStoppedPoint InputStartingPosition = "LAST_STOPPED_POINT" ) // Values returns all known values for InputStartingPosition. Note that this can be diff --git a/service/kinesisanalytics/types/types.go b/service/kinesisanalytics/types/types.go index 78ec0d3ab6b..86052ba7ed9 100644 --- a/service/kinesisanalytics/types/types.go +++ b/service/kinesisanalytics/types/types.go @@ -429,17 +429,16 @@ type InputStartingPositionConfiguration struct { // The starting position on the stream. // - // * NOW - Start reading just after the - // most recent record in the stream, start at the request time stamp that the - // customer issued. + // * NOW - Start reading just after the most + // recent record in the stream, start at the request time stamp that the customer + // issued. // - // * TRIM_HORIZON - Start reading at the last untrimmed - // record in the stream, which is the oldest record available in the stream. This - // option is not available for an Amazon Kinesis Firehose delivery stream. + // * TRIM_HORIZON - Start reading at the last untrimmed record in the + // stream, which is the oldest record available in the stream. This option is not + // available for an Amazon Kinesis Firehose delivery stream. // - // * - // LAST_STOPPED_POINT - Resume reading from where the application last stopped - // reading. + // * LAST_STOPPED_POINT + // - Resume reading from where the application last stopped reading. InputStartingPosition InputStartingPosition } diff --git a/service/kinesisanalyticsv2/api_op_AddApplicationVpcConfiguration.go b/service/kinesisanalyticsv2/api_op_AddApplicationVpcConfiguration.go index a01d8de2790..dc85dabd5c3 100644 --- a/service/kinesisanalyticsv2/api_op_AddApplicationVpcConfiguration.go +++ b/service/kinesisanalyticsv2/api_op_AddApplicationVpcConfiguration.go @@ -15,12 +15,12 @@ import ( // Applications can use VPCs to store and access resources securely. Note the // following about VPC configurations for Kinesis Data Analytics applications: // +// * +// VPC configurations are not supported for SQL applications. // -// * VPC configurations are not supported for SQL applications. -// -// * When a VPC -// is added to a Kinesis Data Analytics application, the application can no longer -// be accessed from the Internet directly. To enable Internet access to the +// * When a VPC is +// added to a Kinesis Data Analytics application, the application can no longer be +// accessed from the Internet directly. To enable Internet access to the // application, add an Internet gateway to your VPC. func (c *Client) AddApplicationVpcConfiguration(ctx context.Context, params *AddApplicationVpcConfigurationInput, optFns ...func(*Options)) (*AddApplicationVpcConfigurationOutput, error) { if params == nil { diff --git a/service/kinesisanalyticsv2/types/enums.go b/service/kinesisanalyticsv2/types/enums.go index 14647a53f2c..e6baa2eec2a 100644 --- a/service/kinesisanalyticsv2/types/enums.go +++ b/service/kinesisanalyticsv2/types/enums.go @@ -6,9 +6,9 @@ type ApplicationRestoreType string // Enum values for ApplicationRestoreType const ( - ApplicationRestoreTypeSkip_restore_from_snapshot ApplicationRestoreType = "SKIP_RESTORE_FROM_SNAPSHOT" - ApplicationRestoreTypeRestore_from_latest_snapshot ApplicationRestoreType = "RESTORE_FROM_LATEST_SNAPSHOT" - ApplicationRestoreTypeRestore_from_custom_snapshot ApplicationRestoreType = "RESTORE_FROM_CUSTOM_SNAPSHOT" + ApplicationRestoreTypeSkipRestoreFromSnapshot ApplicationRestoreType = "SKIP_RESTORE_FROM_SNAPSHOT" + ApplicationRestoreTypeRestoreFromLatestSnapshot ApplicationRestoreType = "RESTORE_FROM_LATEST_SNAPSHOT" + ApplicationRestoreTypeRestoreFromCustomSnapshot ApplicationRestoreType = "RESTORE_FROM_CUSTOM_SNAPSHOT" ) // Values returns all known values for ApplicationRestoreType. Note that this can @@ -26,14 +26,14 @@ type ApplicationStatus string // Enum values for ApplicationStatus const ( - ApplicationStatusDeleting ApplicationStatus = "DELETING" - ApplicationStatusStarting ApplicationStatus = "STARTING" - ApplicationStatusStopping ApplicationStatus = "STOPPING" - ApplicationStatusReady ApplicationStatus = "READY" - ApplicationStatusRunning ApplicationStatus = "RUNNING" - ApplicationStatusUpdating ApplicationStatus = "UPDATING" - ApplicationStatusAutoscaling ApplicationStatus = "AUTOSCALING" - ApplicationStatusForce_stopping ApplicationStatus = "FORCE_STOPPING" + ApplicationStatusDeleting ApplicationStatus = "DELETING" + ApplicationStatusStarting ApplicationStatus = "STARTING" + ApplicationStatusStopping ApplicationStatus = "STOPPING" + ApplicationStatusReady ApplicationStatus = "READY" + ApplicationStatusRunning ApplicationStatus = "RUNNING" + ApplicationStatusUpdating ApplicationStatus = "UPDATING" + ApplicationStatusAutoscaling ApplicationStatus = "AUTOSCALING" + ApplicationStatusForceStopping ApplicationStatus = "FORCE_STOPPING" ) // Values returns all known values for ApplicationStatus. Note that this can be @@ -92,9 +92,9 @@ type InputStartingPosition string // Enum values for InputStartingPosition const ( - InputStartingPositionNow InputStartingPosition = "NOW" - InputStartingPositionTrim_horizon InputStartingPosition = "TRIM_HORIZON" - InputStartingPositionLast_stopped_point InputStartingPosition = "LAST_STOPPED_POINT" + InputStartingPositionNow InputStartingPosition = "NOW" + InputStartingPositionTrimHorizon InputStartingPosition = "TRIM_HORIZON" + InputStartingPositionLastStoppedPoint InputStartingPosition = "LAST_STOPPED_POINT" ) // Values returns all known values for InputStartingPosition. Note that this can be @@ -174,9 +174,9 @@ type RuntimeEnvironment string // Enum values for RuntimeEnvironment const ( - RuntimeEnvironmentSql_1_0 RuntimeEnvironment = "SQL-1_0" - RuntimeEnvironmentFlink_1_6 RuntimeEnvironment = "FLINK-1_6" - RuntimeEnvironmentFlink_1_8 RuntimeEnvironment = "FLINK-1_8" + RuntimeEnvironmentSql10 RuntimeEnvironment = "SQL-1_0" + RuntimeEnvironmentFlink16 RuntimeEnvironment = "FLINK-1_6" + RuntimeEnvironmentFlink18 RuntimeEnvironment = "FLINK-1_8" ) // Values returns all known values for RuntimeEnvironment. Note that this can be diff --git a/service/kinesisanalyticsv2/types/types.go b/service/kinesisanalyticsv2/types/types.go index d1738cb4637..c62f010cc31 100644 --- a/service/kinesisanalyticsv2/types/types.go +++ b/service/kinesisanalyticsv2/types/types.go @@ -274,11 +274,11 @@ type CheckpointConfiguration struct { // following values, even if they are set to other values using APIs or application // code: // - // * CheckpointingEnabled: true + // * CheckpointingEnabled: true // - // * CheckpointInterval: 60000 + // * CheckpointInterval: 60000 // - // * + // * // MinPauseBetweenCheckpoints: 5000 // // This member is required. @@ -330,12 +330,12 @@ type CheckpointConfigurationDescription struct { // use the following values, even if they are set to other values using APIs or // application code: // - // * CheckpointingEnabled: true + // * CheckpointingEnabled: true // - // * CheckpointInterval: - // 60000 + // * CheckpointInterval: 60000 // - // * MinPauseBetweenCheckpoints: 5000 + // * + // MinPauseBetweenCheckpoints: 5000 ConfigurationType ConfigurationType // Describes the minimum time in milliseconds after a checkpoint operation @@ -369,12 +369,12 @@ type CheckpointConfigurationUpdate struct { // application will use the following values, even if they are set to other values // using APIs or application code: // - // * CheckpointingEnabled: true + // * CheckpointingEnabled: true // - // * + // * // CheckpointInterval: 60000 // - // * MinPauseBetweenCheckpoints: 5000 + // * MinPauseBetweenCheckpoints: 5000 ConfigurationTypeUpdate ConfigurationType // Describes updates to the minimum time in milliseconds after a checkpoint @@ -819,16 +819,16 @@ type InputStartingPositionConfiguration struct { // The starting position on the stream. // - // * NOW - Start reading just after the - // most recent record in the stream, and start at the request timestamp that the + // * NOW - Start reading just after the most + // recent record in the stream, and start at the request timestamp that the // customer issued. // - // * TRIM_HORIZON - Start reading at the last untrimmed - // record in the stream, which is the oldest record available in the stream. This - // option is not available for an Amazon Kinesis Data Firehose delivery stream. - // + // * TRIM_HORIZON - Start reading at the last untrimmed record in + // the stream, which is the oldest record available in the stream. This option is + // not available for an Amazon Kinesis Data Firehose delivery stream. // - // * LAST_STOPPED_POINT - Resume reading from where the application last stopped + // * + // LAST_STOPPED_POINT - Resume reading from where the application last stopped // reading. InputStartingPosition InputStartingPosition } diff --git a/service/kinesisvideo/api_op_UpdateDataRetention.go b/service/kinesisvideo/api_op_UpdateDataRetention.go index c58477dda85..e0627f6d329 100644 --- a/service/kinesisvideo/api_op_UpdateDataRetention.go +++ b/service/kinesisvideo/api_op_UpdateDataRetention.go @@ -19,15 +19,15 @@ import ( // the KinesisVideo:UpdateDataRetention action. Changing the data retention period // affects the data in the stream as follows: // -// * If the data retention period -// is increased, existing data is retained for the new retention period. For -// example, if the data retention period is increased from one hour to seven hours, -// all existing data is retained for seven hours. +// * If the data retention period is +// increased, existing data is retained for the new retention period. For example, +// if the data retention period is increased from one hour to seven hours, all +// existing data is retained for seven hours. // -// * If the data retention -// period is decreased, existing data is retained for the new retention period. For -// example, if the data retention period is decreased from seven hours to one hour, -// all existing data is retained for one hour, and any data older than one hour is +// * If the data retention period is +// decreased, existing data is retained for the new retention period. For example, +// if the data retention period is decreased from seven hours to one hour, all +// existing data is retained for one hour, and any data older than one hour is // deleted immediately. func (c *Client) UpdateDataRetention(ctx context.Context, params *UpdateDataRetentionInput, optFns ...func(*Options)) (*UpdateDataRetentionOutput, error) { if params == nil { diff --git a/service/kinesisvideo/types/enums.go b/service/kinesisvideo/types/enums.go index e590685841c..c640cfbb2ae 100644 --- a/service/kinesisvideo/types/enums.go +++ b/service/kinesisvideo/types/enums.go @@ -6,13 +6,13 @@ type APIName string // Enum values for APIName const ( - APINamePut_media APIName = "PUT_MEDIA" - APINameGet_media APIName = "GET_MEDIA" - APINameList_fragments APIName = "LIST_FRAGMENTS" - APINameGet_media_for_fragment_list APIName = "GET_MEDIA_FOR_FRAGMENT_LIST" - APINameGet_hls_streaming_session_url APIName = "GET_HLS_STREAMING_SESSION_URL" - APINameGet_dash_streaming_session_url APIName = "GET_DASH_STREAMING_SESSION_URL" - APINameGet_clip APIName = "GET_CLIP" + APINamePutMedia APIName = "PUT_MEDIA" + APINameGetMedia APIName = "GET_MEDIA" + APINameListFragments APIName = "LIST_FRAGMENTS" + APINameGetMediaForFragmentList APIName = "GET_MEDIA_FOR_FRAGMENT_LIST" + APINameGetHlsStreamingSessionUrl APIName = "GET_HLS_STREAMING_SESSION_URL" + APINameGetDashStreamingSessionUrl APIName = "GET_DASH_STREAMING_SESSION_URL" + APINameGetClip APIName = "GET_CLIP" ) // Values returns all known values for APIName. Note that this can be expanded in @@ -70,7 +70,7 @@ type ChannelType string // Enum values for ChannelType const ( - ChannelTypeSingle_master ChannelType = "SINGLE_MASTER" + ChannelTypeSingleMaster ChannelType = "SINGLE_MASTER" ) // Values returns all known values for ChannelType. Note that this can be expanded @@ -86,7 +86,7 @@ type ComparisonOperator string // Enum values for ComparisonOperator const ( - ComparisonOperatorBegins_with ComparisonOperator = "BEGINS_WITH" + ComparisonOperatorBeginsWith ComparisonOperator = "BEGINS_WITH" ) // Values returns all known values for ComparisonOperator. Note that this can be @@ -124,8 +124,8 @@ type UpdateDataRetentionOperation string // Enum values for UpdateDataRetentionOperation const ( - UpdateDataRetentionOperationIncrease_data_retention UpdateDataRetentionOperation = "INCREASE_DATA_RETENTION" - UpdateDataRetentionOperationDecrease_data_retention UpdateDataRetentionOperation = "DECREASE_DATA_RETENTION" + UpdateDataRetentionOperationIncreaseDataRetention UpdateDataRetentionOperation = "INCREASE_DATA_RETENTION" + UpdateDataRetentionOperationDecreaseDataRetention UpdateDataRetentionOperation = "DECREASE_DATA_RETENTION" ) // Values returns all known values for UpdateDataRetentionOperation. Note that this diff --git a/service/kinesisvideoarchivedmedia/api_op_GetClip.go b/service/kinesisvideoarchivedmedia/api_op_GetClip.go index adea647ea57..e54bfe80ff0 100644 --- a/service/kinesisvideoarchivedmedia/api_op_GetClip.go +++ b/service/kinesisvideoarchivedmedia/api_op_GetClip.go @@ -20,25 +20,25 @@ import ( // specifying GET_CLIP for the APIName parameter. An Amazon Kinesis video stream // has the following requirements for providing data through MP4: // -// * The media -// must contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded +// * The media must +// contain h.264 or h.265 encoded video and, optionally, AAC or G.711 encoded // audio. Specifically, the codec ID of track 1 should be V_MPEG/ISO/AVC (for // h.264) or V_MPEGH/ISO/HEVC (for H.265). Optionally, the codec ID of track 2 // should be A_AAC (for AAC) or A_MS/ACM (for G.711). // -// * Data retention must be +// * Data retention must be // greater than 0. // -// * The video track of each fragment must contain codec -// private data in the Advanced Video Coding (AVC) for H.264 format and HEVC for -// H.265 format. For more information, see MPEG-4 specification ISO/IEC 14496-15 +// * The video track of each fragment must contain codec private +// data in the Advanced Video Coding (AVC) for H.264 format and HEVC for H.265 +// format. For more information, see MPEG-4 specification ISO/IEC 14496-15 // (https://www.iso.org/standard/55980.html). For information about adapting stream // data to a given format, see NAL Adaptation Flags // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/producer-reference-nal.html). // -// -// * The audio track (if present) of each fragment must contain codec private data -// in the AAC format (AAC specification ISO/IEC 13818-7 +// * +// The audio track (if present) of each fragment must contain codec private data in +// the AAC format (AAC specification ISO/IEC 13818-7 // (https://www.iso.org/standard/43345.html)) or the MS Wave format // (http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html). // diff --git a/service/kinesisvideoarchivedmedia/api_op_GetDASHStreamingSessionURL.go b/service/kinesisvideoarchivedmedia/api_op_GetDASHStreamingSessionURL.go index bbf548c960e..763d8a846f6 100644 --- a/service/kinesisvideoarchivedmedia/api_op_GetDASHStreamingSessionURL.go +++ b/service/kinesisvideoarchivedmedia/api_op_GetDASHStreamingSessionURL.go @@ -18,40 +18,40 @@ import ( // An Amazon Kinesis video stream has the following requirements for providing data // through MPEG-DASH: // -// * The media must contain h.264 or h.265 encoded video -// and, optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track -// 1 should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). +// * The media must contain h.264 or h.265 encoded video and, +// optionally, AAC or G.711 encoded audio. Specifically, the codec ID of track 1 +// should be V_MPEG/ISO/AVC (for h.264) or V_MPEGH/ISO/HEVC (for H.265). // Optionally, the codec ID of track 2 should be A_AAC (for AAC) or A_MS/ACM (for // G.711). // -// * Data retention must be greater than 0. +// * Data retention must be greater than 0. // -// * The video track of -// each fragment must contain codec private data in the Advanced Video Coding (AVC) -// for H.264 format and HEVC for H.265 format. For more information, see MPEG-4 +// * The video track of each +// fragment must contain codec private data in the Advanced Video Coding (AVC) for +// H.264 format and HEVC for H.265 format. For more information, see MPEG-4 // specification ISO/IEC 14496-15 (https://www.iso.org/standard/55980.html). For // information about adapting stream data to a given format, see NAL Adaptation // Flags // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/producer-reference-nal.html). // -// -// * The audio track (if present) of each fragment must contain codec private data -// in the AAC format (AAC specification ISO/IEC 13818-7 +// * +// The audio track (if present) of each fragment must contain codec private data in +// the AAC format (AAC specification ISO/IEC 13818-7 // (https://www.iso.org/standard/43345.html)) or the MS Wave format // (http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html). // // The // following procedure shows how to use MPEG-DASH with Kinesis Video Streams: // -// -// * Get an endpoint using GetDataEndpoint +// * +// Get an endpoint using GetDataEndpoint // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_GetDataEndpoint.html), // specifying GET_DASH_STREAMING_SESSION_URL for the APIName parameter. // -// * -// Retrieve the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video -// Streams creates an MPEG-DASH streaming session to be used for accessing content -// in a stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an +// * Retrieve +// the MPEG-DASH URL using GetDASHStreamingSessionURL. Kinesis Video Streams +// creates an MPEG-DASH streaming session to be used for accessing content in a +// stream using the MPEG-DASH protocol. GetDASHStreamingSessionURL returns an // authenticated URL (that includes an encrypted session token) for the session's // MPEG-DASH manifest (the root resource needed for streaming with MPEG-DASH). // Don't share or store this token where an unauthorized entity could access it. @@ -61,53 +61,53 @@ import ( // time range, and format. No other media data (such as frames outside the // requested window or alternate bitrates) is made available. // -// * Provide the -// URL (containing the encrypted session token) for the MPEG-DASH manifest to a -// media player that supports the MPEG-DASH protocol. Kinesis Video Streams makes -// the initialization fragment and media fragments available through the manifest -// URL. The initialization fragment contains the codec private data for the stream, -// and other data needed to set up the video or audio decoder and renderer. The -// media fragments contain encoded video frames or encoded audio samples. +// * Provide the URL +// (containing the encrypted session token) for the MPEG-DASH manifest to a media +// player that supports the MPEG-DASH protocol. Kinesis Video Streams makes the +// initialization fragment and media fragments available through the manifest URL. +// The initialization fragment contains the codec private data for the stream, and +// other data needed to set up the video or audio decoder and renderer. The media +// fragments contain encoded video frames or encoded audio samples. // -// * -// The media player receives the authenticated URL and requests stream metadata and -// media data normally. When the media player requests data, it calls the following +// * The media +// player receives the authenticated URL and requests stream metadata and media +// data normally. When the media player requests data, it calls the following // actions: // -// * GetDASHManifest: Retrieves an MPEG DASH manifest, which -// contains the metadata for the media that you want to playback. +// * GetDASHManifest: Retrieves an MPEG DASH manifest, which contains the +// metadata for the media that you want to playback. // -// * -// GetMP4InitFragment: Retrieves the MP4 initialization fragment. The media player -// typically loads the initialization fragment before loading any media fragments. -// This fragment contains the "fytp" and "moov" MP4 atoms, and the child atoms that -// are needed to initialize the media player decoder. The initialization fragment -// does not correspond to a fragment in a Kinesis video stream. It contains only -// the codec private data for the stream and respective track, which the media -// player needs to decode the media frames. +// * GetMP4InitFragment: +// Retrieves the MP4 initialization fragment. The media player typically loads the +// initialization fragment before loading any media fragments. This fragment +// contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to +// initialize the media player decoder. The initialization fragment does not +// correspond to a fragment in a Kinesis video stream. It contains only the codec +// private data for the stream and respective track, which the media player needs +// to decode the media frames. // -// * GetMP4MediaFragment: -// Retrieves MP4 media fragments. These fragments contain the "moof" and "mdat" MP4 -// atoms and their child atoms, containing the encoded fragment's media frames and -// their timestamps. After the first media fragment is made available in a -// streaming session, any fragments that don't contain the same codec private data -// cause an error to be returned when those different media fragments are loaded. -// Therefore, the codec private data should not change between fragments in a -// session. This also means that the session fails if the fragments in a stream -// change from having only video to having both audio and video. Data retrieved -// with this action is billable. See Pricing +// * GetMP4MediaFragment: Retrieves MP4 media +// fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their +// child atoms, containing the encoded fragment's media frames and their +// timestamps. After the first media fragment is made available in a streaming +// session, any fragments that don't contain the same codec private data cause an +// error to be returned when those different media fragments are loaded. Therefore, +// the codec private data should not change between fragments in a session. This +// also means that the session fails if the fragments in a stream change from +// having only video to having both audio and video. Data retrieved with this +// action is billable. See Pricing // (https://aws.amazon.com/kinesis/video-streams/pricing/) for details. // // The // following restrictions apply to MPEG-DASH sessions: // -// * A streaming session -// URL should not be shared between players. The service might throttle a session -// if multiple media players are sharing it. For connection limits, see Kinesis -// Video Streams Limits +// * A streaming session URL +// should not be shared between players. The service might throttle a session if +// multiple media players are sharing it. For connection limits, see Kinesis Video +// Streams Limits // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // -// * A +// * A // Kinesis video stream can have a maximum of ten active MPEG-DASH streaming // sessions. If a new session is created when the maximum number of sessions is // already active, the oldest (earliest created) session is closed. The number of @@ -131,19 +131,19 @@ import ( // Video Streams archived media API, in addition to the HTTP status code and the // response body, it includes the following pieces of information: // -// * +// * // x-amz-ErrorType HTTP header – contains a more specific error type in addition to // what the HTTP status code provides. // -// * x-amz-RequestId HTTP header – if you -// want to report an issue to AWS, the support team can better diagnose the problem -// if given the Request Id. +// * x-amz-RequestId HTTP header – if you want +// to report an issue to AWS, the support team can better diagnose the problem if +// given the Request Id. // -// Both the HTTP status code and the ErrorType header can -// be utilized to make programmatic decisions about whether errors are retry-able -// and under what conditions, as well as provide information on what actions the -// client programmer might need to take in order to successfully try again. For -// more information, see the Errors section at the bottom of this topic, as well as +// Both the HTTP status code and the ErrorType header can be +// utilized to make programmatic decisions about whether errors are retry-able and +// under what conditions, as well as provide information on what actions the client +// programmer might need to take in order to successfully try again. For more +// information, see the Errors section at the bottom of this topic, as well as // Common Errors // (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/CommonErrors.html). func (c *Client) GetDASHStreamingSessionURL(ctx context.Context, params *GetDASHStreamingSessionURLInput, optFns ...func(*Options)) (*GetDASHStreamingSessionURLOutput, error) { @@ -219,7 +219,7 @@ type GetDASHStreamingSessionURLInput struct { // Whether to retrieve live, live replay, or archived, on-demand data. Features of // the three types of sessions include the following: // - // * LIVE : For sessions of + // * LIVE : For sessions of // this type, the MPEG-DASH manifest is continually updated with the latest // fragments as they become available. We recommend that the media player retrieve // a new manifest on a one-second interval. When this type of session is played in @@ -233,7 +233,7 @@ type GetDASHStreamingSessionURLInput struct { // missing fragment becomes available after a subsequent fragment is added to the // manifest, the older fragment is not added, and the gap is not filled. // - // * + // * // LIVE_REPLAY : For sessions of this type, the MPEG-DASH manifest is updated // similarly to how it is updated for LIVE mode except that it starts by including // fragments from a given start time. Instead of fragments being added as they are @@ -245,9 +245,9 @@ type GetDASHStreamingSessionURLInput struct { // creation. This mode is also useful to stream previously archived media without // being limited by the 1,000 fragment limit in the ON_DEMAND mode. // - // * - // ON_DEMAND : For sessions of this type, the MPEG-DASH manifest contains all the - // fragments for the session, up to the number that is specified in + // * ON_DEMAND : + // For sessions of this type, the MPEG-DASH manifest contains all the fragments for + // the session, up to the number that is specified in // MaxMediaPlaylistFragmentResults. The manifest must be retrieved only once for // each session. When this type of session is played in a media player, the user // interface typically displays a scrubber control for choosing the position in the diff --git a/service/kinesisvideoarchivedmedia/api_op_GetHLSStreamingSessionURL.go b/service/kinesisvideoarchivedmedia/api_op_GetHLSStreamingSessionURL.go index dc449dda766..2169f6661d0 100644 --- a/service/kinesisvideoarchivedmedia/api_op_GetHLSStreamingSessionURL.go +++ b/service/kinesisvideoarchivedmedia/api_op_GetHLSStreamingSessionURL.go @@ -18,24 +18,24 @@ import ( // Amazon Kinesis video stream has the following requirements for providing data // through HLS: // -// * The media must contain h.264 or h.265 encoded video and, +// * The media must contain h.264 or h.265 encoded video and, // optionally, AAC encoded audio. Specifically, the codec ID of track 1 should be // V_MPEG/ISO/AVC (for h.264) or V_MPEG/ISO/HEVC (for h.265). Optionally, the codec // ID of track 2 should be A_AAC. // -// * Data retention must be greater than 0. +// * Data retention must be greater than 0. // -// -// * The video track of each fragment must contain codec private data in the -// Advanced Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 +// * The +// video track of each fragment must contain codec private data in the Advanced +// Video Coding (AVC) for H.264 format or HEVC for H.265 format (MPEG-4 // specification ISO/IEC 14496-15 (https://www.iso.org/standard/55980.html)). For // information about adapting stream data to a given format, see NAL Adaptation // Flags // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/producer-reference-nal.html). // -// -// * The audio track (if present) of each fragment must contain codec private data -// in the AAC format (AAC specification ISO/IEC 13818-7 +// * +// The audio track (if present) of each fragment must contain codec private data in +// the AAC format (AAC specification ISO/IEC 13818-7 // (https://www.iso.org/standard/43345.html)). // // Kinesis Video Streams HLS sessions @@ -45,75 +45,74 @@ import ( // specification (https://tools.ietf.org/html/draft-pantos-http-live-streaming-23). // The following procedure shows how to use HLS with Kinesis Video Streams: // -// * -// Get an endpoint using GetDataEndpoint +// * Get +// an endpoint using GetDataEndpoint // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_GetDataEndpoint.html), // specifying GET_HLS_STREAMING_SESSION_URL for the APIName parameter. // -// * -// Retrieve the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams -// creates an HLS streaming session to be used for accessing content in a stream -// using the HLS protocol. GetHLSStreamingSessionURL returns an authenticated URL -// (that includes an encrypted session token) for the session's HLS master playlist -// (the root resource needed for streaming with HLS). Don't share or store this -// token where an unauthorized entity could access it. The token provides access to -// the content of the stream. Safeguard the token with the same measures that you -// would use with your AWS credentials. The media that is made available through -// the playlist consists only of the requested stream, time range, and format. No -// other media data (such as frames outside the requested window or alternate -// bitrates) is made available. -// -// * Provide the URL (containing the encrypted -// session token) for the HLS master playlist to a media player that supports the -// HLS protocol. Kinesis Video Streams makes the HLS media playlist, initialization -// fragment, and media fragments available through the master playlist URL. The -// initialization fragment contains the codec private data for the stream, and -// other data needed to set up the video or audio decoder and renderer. The media -// fragments contain H.264-encoded video frames or AAC-encoded audio samples. +// * Retrieve +// the HLS URL using GetHLSStreamingSessionURL. Kinesis Video Streams creates an +// HLS streaming session to be used for accessing content in a stream using the HLS +// protocol. GetHLSStreamingSessionURL returns an authenticated URL (that includes +// an encrypted session token) for the session's HLS master playlist (the root +// resource needed for streaming with HLS). Don't share or store this token where +// an unauthorized entity could access it. The token provides access to the content +// of the stream. Safeguard the token with the same measures that you would use +// with your AWS credentials. The media that is made available through the playlist +// consists only of the requested stream, time range, and format. No other media +// data (such as frames outside the requested window or alternate bitrates) is made +// available. // +// * Provide the URL (containing the encrypted session token) for the +// HLS master playlist to a media player that supports the HLS protocol. Kinesis +// Video Streams makes the HLS media playlist, initialization fragment, and media +// fragments available through the master playlist URL. The initialization fragment +// contains the codec private data for the stream, and other data needed to set up +// the video or audio decoder and renderer. The media fragments contain +// H.264-encoded video frames or AAC-encoded audio samples. // -// * The media player receives the authenticated URL and requests stream metadata -// and media data normally. When the media player requests data, it calls the -// following actions: +// * The media player +// receives the authenticated URL and requests stream metadata and media data +// normally. When the media player requests data, it calls the following +// actions: // -// * GetHLSMasterPlaylist: Retrieves an HLS master -// playlist, which contains a URL for the GetHLSMediaPlaylist action for each -// track, and additional metadata for the media player, including estimated bitrate -// and resolution. +// * GetHLSMasterPlaylist: Retrieves an HLS master playlist, which +// contains a URL for the GetHLSMediaPlaylist action for each track, and additional +// metadata for the media player, including estimated bitrate and resolution. // -// * GetHLSMediaPlaylist: Retrieves an HLS media playlist, -// which contains a URL to access the MP4 initialization fragment with the -// GetMP4InitFragment action, and URLs to access the MP4 media fragments with the -// GetMP4MediaFragment actions. The HLS media playlist also contains metadata about -// the stream that the player needs to play it, such as whether the PlaybackMode is -// LIVE or ON_DEMAND. The HLS media playlist is typically static for sessions with -// a PlaybackType of ON_DEMAND. The HLS media playlist is continually updated with -// new fragments for sessions with a PlaybackType of LIVE. There is a distinct HLS -// media playlist for the video track and the audio track (if applicable) that -// contains MP4 media URLs for the specific track. +// * +// GetHLSMediaPlaylist: Retrieves an HLS media playlist, which contains a URL to +// access the MP4 initialization fragment with the GetMP4InitFragment action, and +// URLs to access the MP4 media fragments with the GetMP4MediaFragment actions. The +// HLS media playlist also contains metadata about the stream that the player needs +// to play it, such as whether the PlaybackMode is LIVE or ON_DEMAND. The HLS media +// playlist is typically static for sessions with a PlaybackType of ON_DEMAND. The +// HLS media playlist is continually updated with new fragments for sessions with a +// PlaybackType of LIVE. There is a distinct HLS media playlist for the video track +// and the audio track (if applicable) that contains MP4 media URLs for the +// specific track. // -// * GetMP4InitFragment: -// Retrieves the MP4 initialization fragment. The media player typically loads the -// initialization fragment before loading any media fragments. This fragment -// contains the "fytp" and "moov" MP4 atoms, and the child atoms that are needed to -// initialize the media player decoder. The initialization fragment does not -// correspond to a fragment in a Kinesis video stream. It contains only the codec -// private data for the stream and respective track, which the media player needs -// to decode the media frames. +// * GetMP4InitFragment: Retrieves the MP4 initialization +// fragment. The media player typically loads the initialization fragment before +// loading any media fragments. This fragment contains the "fytp" and "moov" MP4 +// atoms, and the child atoms that are needed to initialize the media player +// decoder. The initialization fragment does not correspond to a fragment in a +// Kinesis video stream. It contains only the codec private data for the stream and +// respective track, which the media player needs to decode the media frames. // -// * GetMP4MediaFragment: Retrieves MP4 media -// fragments. These fragments contain the "moof" and "mdat" MP4 atoms and their -// child atoms, containing the encoded fragment's media frames and their -// timestamps. After the first media fragment is made available in a streaming -// session, any fragments that don't contain the same codec private data cause an -// error to be returned when those different media fragments are loaded. Therefore, -// the codec private data should not change between fragments in a session. This -// also means that the session fails if the fragments in a stream change from -// having only video to having both audio and video. Data retrieved with this -// action is billable. See Pricing +// * +// GetMP4MediaFragment: Retrieves MP4 media fragments. These fragments contain the +// "moof" and "mdat" MP4 atoms and their child atoms, containing the encoded +// fragment's media frames and their timestamps. After the first media fragment is +// made available in a streaming session, any fragments that don't contain the same +// codec private data cause an error to be returned when those different media +// fragments are loaded. Therefore, the codec private data should not change +// between fragments in a session. This also means that the session fails if the +// fragments in a stream change from having only video to having both audio and +// video. Data retrieved with this action is billable. See Pricing // (https://aws.amazon.com/kinesis/video-streams/pricing/) for details. // -// * +// * // GetTSFragment: Retrieves MPEG TS fragments containing both initialization and // media data for all tracks in the stream. If the ContainerFormat is MPEG_TS, this // API is used instead of GetMP4InitFragment and GetMP4MediaFragment to retrieve @@ -124,12 +123,12 @@ import ( // The following // restrictions apply to HLS sessions: // -// * A streaming session URL should not be +// * A streaming session URL should not be // shared between players. The service might throttle a session if multiple media // players are sharing it. For connection limits, see Kinesis Video Streams Limits // (http://docs.aws.amazon.com/kinesisvideostreams/latest/dg/limits.html). // -// * A +// * A // Kinesis video stream can have a maximum of ten active HLS streaming sessions. If // a new session is created when the maximum number of sessions is already active, // the oldest (earliest created) session is closed. The number of active GetMedia @@ -152,19 +151,19 @@ import ( // Video Streams archived media API, in addition to the HTTP status code and the // response body, it includes the following pieces of information: // -// * +// * // x-amz-ErrorType HTTP header – contains a more specific error type in addition to // what the HTTP status code provides. // -// * x-amz-RequestId HTTP header – if you -// want to report an issue to AWS, the support team can better diagnose the problem -// if given the Request Id. +// * x-amz-RequestId HTTP header – if you want +// to report an issue to AWS, the support team can better diagnose the problem if +// given the Request Id. // -// Both the HTTP status code and the ErrorType header can -// be utilized to make programmatic decisions about whether errors are retry-able -// and under what conditions, as well as provide information on what actions the -// client programmer might need to take in order to successfully try again. For -// more information, see the Errors section at the bottom of this topic, as well as +// Both the HTTP status code and the ErrorType header can be +// utilized to make programmatic decisions about whether errors are retry-able and +// under what conditions, as well as provide information on what actions the client +// programmer might need to take in order to successfully try again. For more +// information, see the Errors section at the bottom of this topic, as well as // Common Errors // (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/CommonErrors.html). func (c *Client) GetHLSStreamingSessionURL(ctx context.Context, params *GetHLSStreamingSessionURLInput, optFns ...func(*Options)) (*GetHLSStreamingSessionURLOutput, error) { @@ -206,24 +205,23 @@ type GetHLSStreamingSessionURLInput struct { // fragment being played immediately after the previous fragment. The following // modes are supported: // - // * ALWAYS: a discontinuity marker is placed between - // every fragment in the HLS media playlist. It is recommended to use a value of - // ALWAYS if the fragment timestamps are not accurate. + // * ALWAYS: a discontinuity marker is placed between every + // fragment in the HLS media playlist. It is recommended to use a value of ALWAYS + // if the fragment timestamps are not accurate. // - // * NEVER: no - // discontinuity markers are placed anywhere. It is recommended to use a value of - // NEVER to ensure the media player timeline most accurately maps to the producer - // timestamps. + // * NEVER: no discontinuity markers + // are placed anywhere. It is recommended to use a value of NEVER to ensure the + // media player timeline most accurately maps to the producer timestamps. // - // * ON_DISCONTIUNITY: a discontinuity marker is placed between - // fragments that have a gap or overlap of more than 50 milliseconds. For most - // playback scenarios, it is recommended to use a value of ON_DISCONTINUITY so that - // the media player timeline is only reset when there is a significant issue with - // the media timeline (e.g. a missing fragment). + // * + // ON_DISCONTIUNITY: a discontinuity marker is placed between fragments that have a + // gap or overlap of more than 50 milliseconds. For most playback scenarios, it is + // recommended to use a value of ON_DISCONTINUITY so that the media player timeline + // is only reset when there is a significant issue with the media timeline (e.g. a + // missing fragment). // - // The default is ALWAYS when - // HLSFragmentSelector is set to SERVER_TIMESTAMP, and NEVER when it is set to - // PRODUCER_TIMESTAMP. + // The default is ALWAYS when HLSFragmentSelector is set to + // SERVER_TIMESTAMP, and NEVER when it is set to PRODUCER_TIMESTAMP. DiscontinuityMode types.HLSDiscontinuityMode // Specifies when the fragment start timestamps should be included in the HLS media @@ -271,7 +269,7 @@ type GetHLSStreamingSessionURLInput struct { // Whether to retrieve live, live replay, or archived, on-demand data. Features of // the three types of sessions include the following: // - // * LIVE : For sessions of + // * LIVE : For sessions of // this type, the HLS media playlist is continually updated with the latest // fragments as they become available. We recommend that the media player retrieve // a new playlist on a one-second interval. When this type of session is played in @@ -285,7 +283,7 @@ type GetHLSStreamingSessionURLInput struct { // becomes available after a subsequent fragment is added to the playlist, the // older fragment is not added, and the gap is not filled. // - // * LIVE_REPLAY : For + // * LIVE_REPLAY : For // sessions of this type, the HLS media playlist is updated similarly to how it is // updated for LIVE mode except that it starts by including fragments from a given // start time. Instead of fragments being added as they are ingested, fragments are @@ -297,9 +295,9 @@ type GetHLSStreamingSessionURLInput struct { // also useful to stream previously archived media without being limited by the // 1,000 fragment limit in the ON_DEMAND mode. // - // * ON_DEMAND : For sessions of - // this type, the HLS media playlist contains all the fragments for the session, up - // to the number that is specified in MaxMediaPlaylistFragmentResults. The playlist + // * ON_DEMAND : For sessions of this + // type, the HLS media playlist contains all the fragments for the session, up to + // the number that is specified in MaxMediaPlaylistFragmentResults. The playlist // must be retrieved only once for each session. When this type of session is // played in a media player, the user interface typically displays a scrubber // control for choosing the position in the playback window to display. diff --git a/service/kinesisvideoarchivedmedia/api_op_GetMediaForFragmentList.go b/service/kinesisvideoarchivedmedia/api_op_GetMediaForFragmentList.go index 4e38679f30e..fa9aa9e6dcc 100644 --- a/service/kinesisvideoarchivedmedia/api_op_GetMediaForFragmentList.go +++ b/service/kinesisvideoarchivedmedia/api_op_GetMediaForFragmentList.go @@ -18,31 +18,31 @@ import ( // (https://docs.aws.amazon.com/cli/latest/reference/). The following limits apply // when using the GetMediaForFragmentList API: // -// * A client can call +// * A client can call // GetMediaForFragmentList up to five times per second per stream. // -// * Kinesis -// Video Streams sends media data at a rate of up to 25 megabytes per second (or -// 200 megabits per second) during a GetMediaForFragmentList session. +// * Kinesis Video +// Streams sends media data at a rate of up to 25 megabytes per second (or 200 +// megabits per second) during a GetMediaForFragmentList session. // -// If an error -// is thrown after invoking a Kinesis Video Streams archived media API, in addition -// to the HTTP status code and the response body, it includes the following pieces -// of information: +// If an error is +// thrown after invoking a Kinesis Video Streams archived media API, in addition to +// the HTTP status code and the response body, it includes the following pieces of +// information: // -// * x-amz-ErrorType HTTP header – contains a more specific -// error type in addition to what the HTTP status code provides. +// * x-amz-ErrorType HTTP header – contains a more specific error +// type in addition to what the HTTP status code provides. // -// * -// x-amz-RequestId HTTP header – if you want to report an issue to AWS, the support -// team can better diagnose the problem if given the Request Id. +// * x-amz-RequestId HTTP +// header – if you want to report an issue to AWS, the support team can better +// diagnose the problem if given the Request Id. // -// Both the HTTP -// status code and the ErrorType header can be utilized to make programmatic -// decisions about whether errors are retry-able and under what conditions, as well -// as provide information on what actions the client programmer might need to take -// in order to successfully try again. For more information, see the Errors section -// at the bottom of this topic, as well as Common Errors +// Both the HTTP status code and the +// ErrorType header can be utilized to make programmatic decisions about whether +// errors are retry-able and under what conditions, as well as provide information +// on what actions the client programmer might need to take in order to +// successfully try again. For more information, see the Errors section at the +// bottom of this topic, as well as Common Errors // (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/CommonErrors.html). func (c *Client) GetMediaForFragmentList(ctx context.Context, params *GetMediaForFragmentListInput, optFns ...func(*Options)) (*GetMediaForFragmentListOutput, error) { if params == nil { @@ -84,27 +84,27 @@ type GetMediaForFragmentListOutput struct { // The chunks that Kinesis Video Streams returns in the GetMediaForFragmentList // call also include the following additional Matroska (MKV) tags: // - // * + // * // AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk. // - // * + // * // AWS_KINESISVIDEO_SERVER_SIDE_TIMESTAMP - Server-side timestamp of the // fragment. // - // * AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side - // timestamp of the fragment. + // * AWS_KINESISVIDEO_PRODUCER_SIDE_TIMESTAMP - Producer-side timestamp + // of the fragment. // - // The following tags will be included if an exception - // occurs: + // The following tags will be included if an exception occurs: // - // * AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment - // that threw the exception + // * + // AWS_KINESISVIDEO_FRAGMENT_NUMBER - The number of the fragment that threw the + // exception // - // * AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The - // integer code of the exception + // * AWS_KINESISVIDEO_EXCEPTION_ERROR_CODE - The integer code of the + // exception // - // * AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text - // description of the exception + // * AWS_KINESISVIDEO_EXCEPTION_MESSAGE - A text description of the + // exception Payload io.ReadCloser // Metadata pertaining to the operation's result. diff --git a/service/kinesisvideoarchivedmedia/api_op_ListFragments.go b/service/kinesisvideoarchivedmedia/api_op_ListFragments.go index 499b2f36c51..a917a484980 100644 --- a/service/kinesisvideoarchivedmedia/api_op_ListFragments.go +++ b/service/kinesisvideoarchivedmedia/api_op_ListFragments.go @@ -23,11 +23,11 @@ import ( // status code and the response body, it includes the following pieces of // information: // -// * x-amz-ErrorType HTTP header – contains a more specific error +// * x-amz-ErrorType HTTP header – contains a more specific error // type in addition to what the HTTP status code provides. // -// * x-amz-RequestId -// HTTP header – if you want to report an issue to AWS, the support team can better +// * x-amz-RequestId HTTP +// header – if you want to report an issue to AWS, the support team can better // diagnose the problem if given the Request Id. // // Both the HTTP status code and the diff --git a/service/kinesisvideoarchivedmedia/types/enums.go b/service/kinesisvideoarchivedmedia/types/enums.go index edac5f6ed79..476abd89d5c 100644 --- a/service/kinesisvideoarchivedmedia/types/enums.go +++ b/service/kinesisvideoarchivedmedia/types/enums.go @@ -6,8 +6,8 @@ type ClipFragmentSelectorType string // Enum values for ClipFragmentSelectorType const ( - ClipFragmentSelectorTypeProducer_timestamp ClipFragmentSelectorType = "PRODUCER_TIMESTAMP" - ClipFragmentSelectorTypeServer_timestamp ClipFragmentSelectorType = "SERVER_TIMESTAMP" + ClipFragmentSelectorTypeProducerTimestamp ClipFragmentSelectorType = "PRODUCER_TIMESTAMP" + ClipFragmentSelectorTypeServerTimestamp ClipFragmentSelectorType = "SERVER_TIMESTAMP" ) // Values returns all known values for ClipFragmentSelectorType. Note that this can @@ -24,8 +24,8 @@ type ContainerFormat string // Enum values for ContainerFormat const ( - ContainerFormatFragmented_mp4 ContainerFormat = "FRAGMENTED_MP4" - ContainerFormatMpeg_ts ContainerFormat = "MPEG_TS" + ContainerFormatFragmentedMp4 ContainerFormat = "FRAGMENTED_MP4" + ContainerFormatMpegTs ContainerFormat = "MPEG_TS" ) // Values returns all known values for ContainerFormat. Note that this can be @@ -78,8 +78,8 @@ type DASHFragmentSelectorType string // Enum values for DASHFragmentSelectorType const ( - DASHFragmentSelectorTypeProducer_timestamp DASHFragmentSelectorType = "PRODUCER_TIMESTAMP" - DASHFragmentSelectorTypeServer_timestamp DASHFragmentSelectorType = "SERVER_TIMESTAMP" + DASHFragmentSelectorTypeProducerTimestamp DASHFragmentSelectorType = "PRODUCER_TIMESTAMP" + DASHFragmentSelectorTypeServerTimestamp DASHFragmentSelectorType = "SERVER_TIMESTAMP" ) // Values returns all known values for DASHFragmentSelectorType. Note that this can @@ -96,9 +96,9 @@ type DASHPlaybackMode string // Enum values for DASHPlaybackMode const ( - DASHPlaybackModeLive DASHPlaybackMode = "LIVE" - DASHPlaybackModeLive_replay DASHPlaybackMode = "LIVE_REPLAY" - DASHPlaybackModeOn_demand DASHPlaybackMode = "ON_DEMAND" + DASHPlaybackModeLive DASHPlaybackMode = "LIVE" + DASHPlaybackModeLiveReplay DASHPlaybackMode = "LIVE_REPLAY" + DASHPlaybackModeOnDemand DASHPlaybackMode = "ON_DEMAND" ) // Values returns all known values for DASHPlaybackMode. Note that this can be @@ -116,8 +116,8 @@ type FragmentSelectorType string // Enum values for FragmentSelectorType const ( - FragmentSelectorTypeProducer_timestamp FragmentSelectorType = "PRODUCER_TIMESTAMP" - FragmentSelectorTypeServer_timestamp FragmentSelectorType = "SERVER_TIMESTAMP" + FragmentSelectorTypeProducerTimestamp FragmentSelectorType = "PRODUCER_TIMESTAMP" + FragmentSelectorTypeServerTimestamp FragmentSelectorType = "SERVER_TIMESTAMP" ) // Values returns all known values for FragmentSelectorType. Note that this can be @@ -134,9 +134,9 @@ type HLSDiscontinuityMode string // Enum values for HLSDiscontinuityMode const ( - HLSDiscontinuityModeAlways HLSDiscontinuityMode = "ALWAYS" - HLSDiscontinuityModeNever HLSDiscontinuityMode = "NEVER" - HLSDiscontinuityModeOn_discontinuity HLSDiscontinuityMode = "ON_DISCONTINUITY" + HLSDiscontinuityModeAlways HLSDiscontinuityMode = "ALWAYS" + HLSDiscontinuityModeNever HLSDiscontinuityMode = "NEVER" + HLSDiscontinuityModeOnDiscontinuity HLSDiscontinuityMode = "ON_DISCONTINUITY" ) // Values returns all known values for HLSDiscontinuityMode. Note that this can be @@ -172,8 +172,8 @@ type HLSFragmentSelectorType string // Enum values for HLSFragmentSelectorType const ( - HLSFragmentSelectorTypeProducer_timestamp HLSFragmentSelectorType = "PRODUCER_TIMESTAMP" - HLSFragmentSelectorTypeServer_timestamp HLSFragmentSelectorType = "SERVER_TIMESTAMP" + HLSFragmentSelectorTypeProducerTimestamp HLSFragmentSelectorType = "PRODUCER_TIMESTAMP" + HLSFragmentSelectorTypeServerTimestamp HLSFragmentSelectorType = "SERVER_TIMESTAMP" ) // Values returns all known values for HLSFragmentSelectorType. Note that this can @@ -190,9 +190,9 @@ type HLSPlaybackMode string // Enum values for HLSPlaybackMode const ( - HLSPlaybackModeLive HLSPlaybackMode = "LIVE" - HLSPlaybackModeLive_replay HLSPlaybackMode = "LIVE_REPLAY" - HLSPlaybackModeOn_demand HLSPlaybackMode = "ON_DEMAND" + HLSPlaybackModeLive HLSPlaybackMode = "LIVE" + HLSPlaybackModeLiveReplay HLSPlaybackMode = "LIVE_REPLAY" + HLSPlaybackModeOnDemand HLSPlaybackMode = "ON_DEMAND" ) // Values returns all known values for HLSPlaybackMode. Note that this can be diff --git a/service/kinesisvideoarchivedmedia/types/types.go b/service/kinesisvideoarchivedmedia/types/types.go index b490a92a987..c487d3889c8 100644 --- a/service/kinesisvideoarchivedmedia/types/types.go +++ b/service/kinesisvideoarchivedmedia/types/types.go @@ -136,18 +136,18 @@ type Fragment struct { // and less than or equal to the end time are returned. For example, if a stream // contains fragments with the following start timestamps: // -// * 00:00:00 +// * 00:00:00 // -// * +// * // 00:00:02 // -// * 00:00:04 +// * 00:00:04 // -// * 00:00:06 +// * 00:00:06 // -// A fragment selector range with a start -// time of 00:00:01 and end time of 00:00:04 would return the fragments with start -// times of 00:00:02 and 00:00:04. +// A fragment selector range with a start time of +// 00:00:01 and end time of 00:00:04 would return the fragments with start times of +// 00:00:02 and 00:00:04. type FragmentSelector struct { // The origin of the timestamps to use (Server or Producer). diff --git a/service/kinesisvideomedia/api_op_GetMedia.go b/service/kinesisvideomedia/api_op_GetMedia.go index cf032bacfe2..b20e310bd1d 100644 --- a/service/kinesisvideomedia/api_op_GetMedia.go +++ b/service/kinesisvideomedia/api_op_GetMedia.go @@ -26,10 +26,10 @@ import ( // you specify in the request. The following limits apply when using the GetMedia // API: // -// * A client can call GetMedia up to five times per second per stream. +// * A client can call GetMedia up to five times per second per stream. // -// -// * Kinesis Video Streams sends media data at a rate of up to 25 megabytes per +// * +// Kinesis Video Streams sends media data at a rate of up to 25 megabytes per // second (or 200 megabits per second) during a GetMedia session. // // If an error is @@ -37,11 +37,11 @@ import ( // status code and the response body, it includes the following pieces of // information: // -// * x-amz-ErrorType HTTP header – contains a more specific error +// * x-amz-ErrorType HTTP header – contains a more specific error // type in addition to what the HTTP status code provides. // -// * x-amz-RequestId -// HTTP header – if you want to report an issue to AWS, the support team can better +// * x-amz-RequestId HTTP +// header – if you want to report an issue to AWS, the support team can better // diagnose the problem if given the Request Id. // // Both the HTTP status code and the @@ -92,62 +92,61 @@ type GetMediaOutput struct { // Kinesis Video Streams returns in the GetMedia call also include the following // additional Matroska (MKV) tags: // - // * AWS_KINESISVIDEO_CONTINUATION_TOKEN - // (UTF-8 string) - In the event your GetMedia call terminates, you can use this + // * AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 + // string) - In the event your GetMedia call terminates, you can use this // continuation token in your next request to get the next chunk where the last // request terminated. // - // * AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - + // * AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - // Client applications can use this tag value to determine how far behind the chunk // returned in the response is from the latest chunk on the stream. // - // * + // * // AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk. // - // * + // * // AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server timestamp of the fragment. // - // * + // * // AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer timestamp of the fragment. // // The // following tags will be present if an error occurs: // - // * + // * // AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused // GetMedia to stop. // - // * AWS_KINESISVIDEO_ERROR_ID: Integer code of the - // error. + // * AWS_KINESISVIDEO_ERROR_ID: Integer code of the error. // - // The error codes are as follows: + // The + // error codes are as follows: // - // * 3002 - Error writing to the - // stream + // * 3002 - Error writing to the stream // - // * 4000 - Requested fragment is not found + // * 4000 - + // Requested fragment is not found // - // * 4500 - Access denied - // for the stream's KMS key + // * 4500 - Access denied for the stream's KMS + // key // - // * 4501 - Stream's KMS key is disabled + // * 4501 - Stream's KMS key is disabled // - // * 4502 - // - Validation error on the stream's KMS key + // * 4502 - Validation error on the + // stream's KMS key // - // * 4503 - KMS key specified in - // the stream is unavailable + // * 4503 - KMS key specified in the stream is unavailable // - // * 4504 - Invalid usage of the KMS key specified - // in the stream + // * + // 4504 - Invalid usage of the KMS key specified in the stream // - // * 4505 - Invalid state of the KMS key specified in the - // stream + // * 4505 - Invalid + // state of the KMS key specified in the stream // - // * 4506 - Unable to find the KMS key specified in the stream + // * 4506 - Unable to find the KMS + // key specified in the stream // - // * - // 5000 - Internal error + // * 5000 - Internal error Payload io.ReadCloser // Metadata pertaining to the operation's result. diff --git a/service/kinesisvideomedia/types/enums.go b/service/kinesisvideomedia/types/enums.go index 0c14a97688f..d2335d2c31c 100644 --- a/service/kinesisvideomedia/types/enums.go +++ b/service/kinesisvideomedia/types/enums.go @@ -6,12 +6,12 @@ type StartSelectorType string // Enum values for StartSelectorType const ( - StartSelectorTypeFragment_number StartSelectorType = "FRAGMENT_NUMBER" - StartSelectorTypeServer_timestamp StartSelectorType = "SERVER_TIMESTAMP" - StartSelectorTypeProducer_timestamp StartSelectorType = "PRODUCER_TIMESTAMP" - StartSelectorTypeNow StartSelectorType = "NOW" - StartSelectorTypeEarliest StartSelectorType = "EARLIEST" - StartSelectorTypeContinuation_token StartSelectorType = "CONTINUATION_TOKEN" + StartSelectorTypeFragmentNumber StartSelectorType = "FRAGMENT_NUMBER" + StartSelectorTypeServerTimestamp StartSelectorType = "SERVER_TIMESTAMP" + StartSelectorTypeProducerTimestamp StartSelectorType = "PRODUCER_TIMESTAMP" + StartSelectorTypeNow StartSelectorType = "NOW" + StartSelectorTypeEarliest StartSelectorType = "EARLIEST" + StartSelectorTypeContinuationToken StartSelectorType = "CONTINUATION_TOKEN" ) // Values returns all known values for StartSelectorType. Note that this can be diff --git a/service/kinesisvideomedia/types/types.go b/service/kinesisvideomedia/types/types.go index f57ad446716..00cd18b2be3 100644 --- a/service/kinesisvideomedia/types/types.go +++ b/service/kinesisvideomedia/types/types.go @@ -10,14 +10,14 @@ import ( // to start returning media data. You have the following options to identify the // starting chunk: // -// * Choose the latest (or oldest) chunk. +// * Choose the latest (or oldest) chunk. // -// * Identify a -// specific chunk. You can identify a specific chunk either by providing a fragment -// number or timestamp (server or producer). +// * Identify a specific +// chunk. You can identify a specific chunk either by providing a fragment number +// or timestamp (server or producer). // -// * Each chunk's metadata includes -// a continuation token as a Matroska (MKV) tag +// * Each chunk's metadata includes a +// continuation token as a Matroska (MKV) tag // (AWS_KINESISVIDEO_CONTINUATION_TOKEN). If your previous GetMedia request // terminated, you can use this tag value in your next GetMedia request. The API // then starts returning chunks starting where the last API ended. @@ -26,26 +26,25 @@ type StartSelector struct { // Identifies the fragment on the Kinesis video stream where you want to start // getting the data from. // - // * NOW - Start with the latest chunk on the stream. + // * NOW - Start with the latest chunk on the stream. // + // * + // EARLIEST - Start with earliest available chunk on the stream. // - // * EARLIEST - Start with earliest available chunk on the stream. + // * FRAGMENT_NUMBER + // - Start with the chunk after a specific fragment. You must also specify the + // AfterFragmentNumber parameter. // - // * - // FRAGMENT_NUMBER - Start with the chunk after a specific fragment. You must also - // specify the AfterFragmentNumber parameter. + // * PRODUCER_TIMESTAMP or SERVER_TIMESTAMP - Start + // with the chunk containing a fragment with the specified producer or server + // timestamp. You specify the timestamp by adding StartTimestamp. // - // * PRODUCER_TIMESTAMP or - // SERVER_TIMESTAMP - Start with the chunk containing a fragment with the specified - // producer or server timestamp. You specify the timestamp by adding - // StartTimestamp. + // * + // CONTINUATION_TOKEN - Read using the specified continuation token. // - // * CONTINUATION_TOKEN - Read using the specified - // continuation token. - // - // If you choose the NOW, EARLIEST, or CONTINUATION_TOKEN as - // the startSelectorType, you don't provide any additional information in the - // startSelector. + // If you choose + // the NOW, EARLIEST, or CONTINUATION_TOKEN as the startSelectorType, you don't + // provide any additional information in the startSelector. // // This member is required. StartSelectorType StartSelectorType diff --git a/service/kms/api_op_CancelKeyDeletion.go b/service/kms/api_op_CancelKeyDeletion.go index 3c37bde0e7d..8717e70d220 100644 --- a/service/kms/api_op_CancelKeyDeletion.go +++ b/service/kms/api_op_CancelKeyDeletion.go @@ -42,9 +42,9 @@ type CancelKeyDeletionInput struct { // deletion. Specify the key ID or the Amazon Resource Name (ARN) of the CMK. For // example: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_CreateAlias.go b/service/kms/api_op_CreateAlias.go index 1cf22c3f660..efa82c07187 100644 --- a/service/kms/api_op_CreateAlias.go +++ b/service/kms/api_op_CreateAlias.go @@ -25,43 +25,43 @@ import ( // get the alias that you created, use the ListAliases operation. To use aliases // successfully, be aware of the following information. // -// * Each alias points to +// * Each alias points to // only one CMK at a time, although a single CMK can have multiple aliases. The // alias and its associated CMK must be in the same AWS account and Region. // -// * -// You can associate an alias with any customer managed CMK in the same AWS account -// and Region. However, you do not have permission to associate an alias with an -// AWS managed CMK +// * You +// can associate an alias with any customer managed CMK in the same AWS account and +// Region. However, you do not have permission to associate an alias with an AWS +// managed CMK // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) // or an AWS owned CMK // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk). // -// -// * To change the CMK associated with an alias, use the UpdateAlias operation. The +// * +// To change the CMK associated with an alias, use the UpdateAlias operation. The // current CMK and the new CMK must be the same type (both symmetric or both // asymmetric) and they must have the same key usage (ENCRYPT_DECRYPT or // SIGN_VERIFY). This restriction prevents cryptographic errors in code that uses // aliases. // -// * The alias name must begin with alias/ followed by a name, such -// as alias/ExampleAlias. It can contain only alphanumeric characters, forward -// slashes (/), underscores (_), and dashes (-). The alias name cannot begin with +// * The alias name must begin with alias/ followed by a name, such as +// alias/ExampleAlias. It can contain only alphanumeric characters, forward slashes +// (/), underscores (_), and dashes (-). The alias name cannot begin with // alias/aws/. The alias/aws/ prefix is reserved for AWS managed CMKs // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). // -// -// * The alias name must be unique within an AWS Region. However, you can use the +// * +// The alias name must be unique within an AWS Region. However, you can use the // same alias name in multiple Regions of the same AWS account. Each instance of // the alias is associated with a CMK in its Region. // -// * After you create an -// alias, you cannot change its alias name. However, you can use the DeleteAlias -// operation to delete the alias and then create a new alias with the desired -// name. +// * After you create an alias, +// you cannot change its alias name. However, you can use the DeleteAlias operation +// to delete the alias and then create a new alias with the desired name. // -// * You can use an alias name or alias ARN to identify a CMK in AWS KMS -// cryptographic operations +// * You +// can use an alias name or alias ARN to identify a CMK in AWS KMS cryptographic +// operations // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) // and in the DescribeKey operation. However, you cannot use alias names or alias // ARNs in API operations that manage CMKs, such as DisableKey or GetKeyPolicy. For diff --git a/service/kms/api_op_CreateGrant.go b/service/kms/api_op_CreateGrant.go index 6dc2222b6d7..0d844329358 100644 --- a/service/kms/api_op_CreateGrant.go +++ b/service/kms/api_op_CreateGrant.go @@ -23,14 +23,14 @@ import ( // operation that the CMK does not support, CreateGrant fails with a // ValidationException. // -// * Grants for symmetric CMKs cannot allow operations -// that are not supported for symmetric CMKs, including Sign, Verify, and -// GetPublicKey. (There are limited exceptions to this rule for legacy operations, -// but you should not create a grant for an operation that AWS KMS does not -// support.) +// * Grants for symmetric CMKs cannot allow operations that +// are not supported for symmetric CMKs, including Sign, Verify, and GetPublicKey. +// (There are limited exceptions to this rule for legacy operations, but you should +// not create a grant for an operation that AWS KMS does not support.) // -// * Grants for asymmetric CMKs cannot allow operations that are not -// supported for asymmetric CMKs, including operations that generate data keys +// * Grants +// for asymmetric CMKs cannot allow operations that are not supported for +// asymmetric CMKs, including operations that generate data keys // (https://docs.aws.amazon.com/kms/latest/APIReference/API_GenerateDataKey) or // data key pairs // (https://docs.aws.amazon.com/kms/latest/APIReference/API_GenerateDataKeyPair), @@ -41,12 +41,12 @@ import ( // CMKs in custom key stores // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). // -// -// * Grants for asymmetric CMKs with a KeyUsage of ENCRYPT_DECRYPT cannot allow the +// * +// Grants for asymmetric CMKs with a KeyUsage of ENCRYPT_DECRYPT cannot allow the // Sign or Verify operations. Grants for asymmetric CMKs with a KeyUsage of // SIGN_VERIFY cannot allow the Encrypt or Decrypt operations. // -// * Grants for +// * Grants for // asymmetric CMKs cannot include an encryption context grant constraint. An // encryption context is not supported on asymmetric CMKs. // @@ -96,10 +96,10 @@ type CreateGrantInput struct { // to. Specify the key ID or the Amazon Resource Name (ARN) of the CMK. To specify // a CMK in a different AWS account, you must use the key ARN. For example: // - // * - // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key + // ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_CreateKey.go b/service/kms/api_op_CreateKey.go index 1c79ad0099e..dc17da1928b 100644 --- a/service/kms/api_op_CreateKey.go +++ b/service/kms/api_op_CreateKey.go @@ -17,18 +17,18 @@ import ( // CMK in a different AWS account. You can use the CreateKey operation to create // symmetric or asymmetric CMKs. // -// * Symmetric CMKs contain a 256-bit symmetric -// key that never leaves AWS KMS unencrypted. To use the CMK, you must call AWS -// KMS. You can use a symmetric CMK to encrypt and decrypt small amounts of data, -// but they are typically used to generate data keys +// * Symmetric CMKs contain a 256-bit symmetric key +// that never leaves AWS KMS unencrypted. To use the CMK, you must call AWS KMS. +// You can use a symmetric CMK to encrypt and decrypt small amounts of data, but +// they are typically used to generate data keys // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // and data keys pairs // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-key-pairs). // For details, see GenerateDataKey and GenerateDataKeyPair. // -// * Asymmetric CMKs -// can contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key -// in an asymmetric CMK never leaves AWS KMS unencrypted. However, you can use the +// * Asymmetric CMKs can +// contain an RSA key pair or an Elliptic Curve (ECC) key pair. The private key in +// an asymmetric CMK never leaves AWS KMS unencrypted. However, you can use the // GetPublicKey operation to download the public key so it can be used outside of // AWS KMS. CMKs with RSA key pairs can be used to encrypt or decrypt data or sign // and verify messages (but not both). CMKs with ECC key pairs can be used only to @@ -133,35 +133,34 @@ type CreateKeyInput struct { // the AWS Key Management Service Developer Guide. AWS KMS supports the following // key specs for CMKs: // - // * Symmetric key (default) + // * Symmetric key (default) // - // * SYMMETRIC_DEFAULT + // * SYMMETRIC_DEFAULT // (AES-256-GCM) // - // * Asymmetric RSA key pairs + // * Asymmetric RSA key pairs // - // * RSA_2048 + // * RSA_2048 // - // * - // RSA_3072 + // * RSA_3072 // - // * RSA_4096 + // * RSA_4096 // - // * Asymmetric NIST-recommended elliptic curve - // key pairs + // * + // Asymmetric NIST-recommended elliptic curve key pairs // - // * ECC_NIST_P256 (secp256r1) + // * ECC_NIST_P256 + // (secp256r1) // - // * ECC_NIST_P384 - // (secp384r1) + // * ECC_NIST_P384 (secp384r1) // - // * ECC_NIST_P521 (secp521r1) + // * ECC_NIST_P521 (secp521r1) // - // * Other asymmetric - // elliptic curve key pairs + // * Other + // asymmetric elliptic curve key pairs // - // * ECC_SECG_P256K1 (secp256k1), commonly used - // for cryptocurrencies. + // * ECC_SECG_P256K1 (secp256k1), commonly + // used for cryptocurrencies. CustomerMasterKeySpec types.CustomerMasterKeySpec // A description of the CMK. Use a description that helps you decide whether the @@ -174,14 +173,14 @@ type CreateKeyInput struct { // parameter is required only for asymmetric CMKs. You can't change the KeyUsage // value after the CMK is created. Select only one valid value. // - // * For - // symmetric CMKs, omit the parameter or specify ENCRYPT_DECRYPT. + // * For symmetric + // CMKs, omit the parameter or specify ENCRYPT_DECRYPT. // - // * For - // asymmetric CMKs with RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY. + // * For asymmetric CMKs with + // RSA key material, specify ENCRYPT_DECRYPT or SIGN_VERIFY. // - // - // * For asymmetric CMKs with ECC key material, specify SIGN_VERIFY. + // * For asymmetric CMKs + // with ECC key material, specify SIGN_VERIFY. KeyUsage types.KeyUsageType // The source of the key material for the CMK. You cannot change the origin after @@ -203,21 +202,21 @@ type CreateKeyInput struct { // The key policy to attach to the CMK. If you provide a key policy, it must meet // the following criteria: // - // * If you don't set BypassPolicyLockoutSafetyCheck - // to true, the key policy must allow the principal that is making the CreateKey + // * If you don't set BypassPolicyLockoutSafetyCheck to + // true, the key policy must allow the principal that is making the CreateKey // request to make a subsequent PutKeyPolicy request on the CMK. This reduces the // risk that the CMK becomes unmanageable. For more information, refer to the // scenario in the Default Key Policy // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section of the AWS Key Management Service Developer Guide . // - // * Each - // statement in the key policy must contain one or more principals. The principals - // in the key policy must exist and be visible to AWS KMS. When you create a new - // AWS principal (for example, an IAM user or role), you might need to enforce a - // delay before including the new principal in a key policy because the new - // principal might not be immediately visible to AWS KMS. For more information, see - // Changes that I make are not always immediately visible + // * Each statement in + // the key policy must contain one or more principals. The principals in the key + // policy must exist and be visible to AWS KMS. When you create a new AWS principal + // (for example, an IAM user or role), you might need to enforce a delay before + // including the new principal in a key policy because the new principal might not + // be immediately visible to AWS KMS. For more information, see Changes that I make + // are not always immediately visible // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the AWS Identity and Access Management User Guide. // diff --git a/service/kms/api_op_Decrypt.go b/service/kms/api_op_Decrypt.go index 8518422d575..4c4ab62aff0 100644 --- a/service/kms/api_op_Decrypt.go +++ b/service/kms/api_op_Decrypt.go @@ -14,16 +14,16 @@ import ( // Decrypts ciphertext that was encrypted by a AWS KMS customer master key (CMK) // using any of the following operations: // -// * Encrypt +// * Encrypt // -// * GenerateDataKey +// * GenerateDataKey // +// * +// GenerateDataKeyPair // -// * GenerateDataKeyPair +// * GenerateDataKeyWithoutPlaintext // -// * GenerateDataKeyWithoutPlaintext -// -// * +// * // GenerateDataKeyPairWithoutPlaintext // // You can use this operation to decrypt @@ -116,16 +116,16 @@ type DecryptInput struct { // specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or alias // ARN. When using an alias name, prefix it with "alias/". For example: // - // * Key - // ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_DeleteImportedKeyMaterial.go b/service/kms/api_op_DeleteImportedKeyMaterial.go index d7d93b15963..ea98f0dd063 100644 --- a/service/kms/api_op_DeleteImportedKeyMaterial.go +++ b/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -44,10 +44,10 @@ type DeleteImportedKeyMaterialInput struct { // of the CMK must be EXTERNAL. Specify the key ID or the Amazon Resource Name // (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_DescribeKey.go b/service/kms/api_op_DescribeKey.go index b22792cc4e8..ae0187a931c 100644 --- a/service/kms/api_op_DescribeKey.go +++ b/service/kms/api_op_DescribeKey.go @@ -26,25 +26,25 @@ import ( // signing) and the encryption algorithms or signing algorithms that the CMK // supports. DescribeKey does not return the following information: // -// * Aliases +// * Aliases // associated with the CMK. To get this information, use ListAliases. // -// * -// Whether automatic key rotation is enabled on the CMK. To get this information, -// use GetKeyRotationStatus. Also, some key states prevent a CMK from being +// * Whether +// automatic key rotation is enabled on the CMK. To get this information, use +// GetKeyRotationStatus. Also, some key states prevent a CMK from being // automatically rotated. For details, see How Automatic Key Rotation Works // (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) // in AWS Key Management Service Developer Guide. // -// * Tags on the CMK. To get -// this information, use ListResourceTags. +// * Tags on the CMK. To get this +// information, use ListResourceTags. // -// * Key policies and grants on the -// CMK. To get this information, use GetKeyPolicy and ListGrants. +// * Key policies and grants on the CMK. To get +// this information, use GetKeyPolicy and ListGrants. // -// If you call the -// DescribeKey operation on a predefined AWS alias, that is, an AWS alias with no -// key ID, AWS KMS creates an AWS managed CMK +// If you call the DescribeKey +// operation on a predefined AWS alias, that is, an AWS alias with no key ID, AWS +// KMS creates an AWS managed CMK // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys). // Then, it associates the alias with the new CMK, and returns the KeyId and Arn of // the new CMK in the response. To perform this operation on a CMK in a different @@ -76,16 +76,16 @@ type DescribeKeyInput struct { // prefix it with "alias/". To specify a CMK in a different AWS account, you must // use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_DisableKey.go b/service/kms/api_op_DisableKey.go index 9431b917983..183923f37d0 100644 --- a/service/kms/api_op_DisableKey.go +++ b/service/kms/api_op_DisableKey.go @@ -42,10 +42,10 @@ type DisableKeyInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_DisableKeyRotation.go b/service/kms/api_op_DisableKeyRotation.go index c23555b9da9..266f5cbc418 100644 --- a/service/kms/api_op_DisableKeyRotation.go +++ b/service/kms/api_op_DisableKeyRotation.go @@ -47,10 +47,10 @@ type DisableKeyRotationInput struct { // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). // Specify the key ID or the Amazon Resource Name (ARN) of the CMK. For example: // + // * + // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_EnableKey.go b/service/kms/api_op_EnableKey.go index bf246d0dbc3..039fc18727d 100644 --- a/service/kms/api_op_EnableKey.go +++ b/service/kms/api_op_EnableKey.go @@ -38,10 +38,10 @@ type EnableKeyInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_EnableKeyRotation.go b/service/kms/api_op_EnableKeyRotation.go index 4482e20d2f4..6a6369005f9 100644 --- a/service/kms/api_op_EnableKeyRotation.go +++ b/service/kms/api_op_EnableKeyRotation.go @@ -43,10 +43,10 @@ type EnableKeyRotationInput struct { // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). // Specify the key ID or the Amazon Resource Name (ARN) of the CMK. For example: // + // * + // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_Encrypt.go b/service/kms/api_op_Encrypt.go index b7f0bd4c0a8..f0e772fe35f 100644 --- a/service/kms/api_op_Encrypt.go +++ b/service/kms/api_op_Encrypt.go @@ -14,24 +14,24 @@ import ( // Encrypts plaintext into ciphertext by using a customer master key (CMK). The // Encrypt operation has two primary use cases: // -// * You can encrypt small -// amounts of arbitrary data, such as a personal identifier or database password, -// or other sensitive information. +// * You can encrypt small amounts of +// arbitrary data, such as a personal identifier or database password, or other +// sensitive information. // -// * You can use the Encrypt operation to move -// encrypted data from one AWS Region to another. For example, in Region A, -// generate a data key and use the plaintext key to encrypt your data. Then, in -// Region A, use the Encrypt operation to encrypt the plaintext data key under a -// CMK in Region B. Now, you can move the encrypted data and the encrypted data key -// to Region B. When necessary, you can decrypt the encrypted data key and the -// encrypted data entirely within in Region B. +// * You can use the Encrypt operation to move encrypted +// data from one AWS Region to another. For example, in Region A, generate a data +// key and use the plaintext key to encrypt your data. Then, in Region A, use the +// Encrypt operation to encrypt the plaintext data key under a CMK in Region B. +// Now, you can move the encrypted data and the encrypted data key to Region B. +// When necessary, you can decrypt the encrypted data key and the encrypted data +// entirely within in Region B. // -// You don't need to use the Encrypt -// operation to encrypt a data key. The GenerateDataKey and GenerateDataKeyPair -// operations return a plaintext data key and an encrypted copy of that data key. -// When you encrypt data, you must specify a symmetric or asymmetric CMK to use in -// the encryption operation. The CMK must have a KeyUsage value of ENCRYPT_DECRYPT. -// To find the KeyUsage of a CMK, use the DescribeKey operation. If you use a +// You don't need to use the Encrypt operation to +// encrypt a data key. The GenerateDataKey and GenerateDataKeyPair operations +// return a plaintext data key and an encrypted copy of that data key. When you +// encrypt data, you must specify a symmetric or asymmetric CMK to use in the +// encryption operation. The CMK must have a KeyUsage value of ENCRYPT_DECRYPT. To +// find the KeyUsage of a CMK, use the DescribeKey operation. If you use a // symmetric CMK, you can use an encryption context to add additional security to // your encryption operation. If you specify an EncryptionContext when encrypting // data, you must specify the same encryption context (a case-sensitive exact @@ -52,35 +52,34 @@ import ( // of the data that you can encrypt varies with the type of CMK and the encryption // algorithm that you choose. // -// * Symmetric CMKs +// * Symmetric CMKs // -// * SYMMETRIC_DEFAULT: -// 4096 bytes +// * SYMMETRIC_DEFAULT: 4096 bytes // -// * RSA_2048 +// * +// RSA_2048 // -// * RSAES_OAEP_SHA_1: 214 bytes +// * RSAES_OAEP_SHA_1: 214 bytes // -// * -// RSAES_OAEP_SHA_256: 190 bytes +// * RSAES_OAEP_SHA_256: 190 bytes // -// * RSA_3072 +// * +// RSA_3072 // -// * RSAES_OAEP_SHA_1: 342 -// bytes +// * RSAES_OAEP_SHA_1: 342 bytes // -// * RSAES_OAEP_SHA_256: 318 bytes +// * RSAES_OAEP_SHA_256: 318 bytes // -// * RSA_4096 +// * +// RSA_4096 // -// * -// RSAES_OAEP_SHA_1: 470 bytes +// * RSAES_OAEP_SHA_1: 470 bytes // -// * RSAES_OAEP_SHA_256: 446 bytes +// * RSAES_OAEP_SHA_256: 446 bytes // -// The CMK -// that you use for this operation must be in a compatible key state. For details, -// see How Key State Affects Use of a Customer Master Key +// The +// CMK that you use for this operation must be in a compatible key state. For +// details, see How Key State Affects Use of a Customer Master Key // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the // AWS Key Management Service Developer Guide. To perform this operation on a CMK // in a different AWS account, specify the key ARN or alias ARN in the value of the @@ -107,16 +106,16 @@ type EncryptInput struct { // alias name, prefix it with "alias/". To specify a CMK in a different AWS // account, you must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_GenerateDataKey.go b/service/kms/api_op_GenerateDataKey.go index 42de18d77ad..6abfdeee1cb 100644 --- a/service/kms/api_op_GenerateDataKey.go +++ b/service/kms/api_op_GenerateDataKey.go @@ -47,25 +47,25 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) // to do these tasks for you. To encrypt data outside of AWS KMS: // -// * Use the +// * Use the // GenerateDataKey operation to get a data key. // -// * Use the plaintext data key -// (in the Plaintext field of the response) to encrypt your data outside of AWS -// KMS. Then erase the plaintext data key from memory. +// * Use the plaintext data key (in +// the Plaintext field of the response) to encrypt your data outside of AWS KMS. +// Then erase the plaintext data key from memory. // -// * Store the encrypted -// data key (in the CiphertextBlob field of the response) with the encrypted -// data. +// * Store the encrypted data key +// (in the CiphertextBlob field of the response) with the encrypted data. // -// To decrypt data outside of AWS KMS: +// To +// decrypt data outside of AWS KMS: // -// * Use the Decrypt operation to -// decrypt the encrypted data key. The operation returns a plaintext copy of the -// data key. +// * Use the Decrypt operation to decrypt the +// encrypted data key. The operation returns a plaintext copy of the data key. // -// * Use the plaintext data key to decrypt data outside of AWS KMS, -// then erase the plaintext data key from memory. +// * +// Use the plaintext data key to decrypt data outside of AWS KMS, then erase the +// plaintext data key from memory. func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) { if params == nil { params = &GenerateDataKeyInput{} @@ -88,16 +88,16 @@ type GenerateDataKeyInput struct { // alias name, prefix it with "alias/". To specify a CMK in a different AWS // account, you must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_GenerateDataKeyPair.go b/service/kms/api_op_GenerateDataKeyPair.go index ee388891dc0..d3d783863e5 100644 --- a/service/kms/api_op_GenerateDataKeyPair.go +++ b/service/kms/api_op_GenerateDataKeyPair.go @@ -67,16 +67,16 @@ type GenerateDataKeyPairInput struct { // an alias name, prefix it with "alias/". To specify a CMK in a different AWS // account, you must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go index 28b6ce15085..d269a866c8e 100644 --- a/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go +++ b/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -61,15 +61,15 @@ type GenerateDataKeyPairWithoutPlaintextInput struct { // name, or alias ARN. When using an alias name, prefix it with "alias/". For // example: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go index e6a1e6e201a..5c6e0d8b854 100644 --- a/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go +++ b/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -67,15 +67,15 @@ type GenerateDataKeyWithoutPlaintextInput struct { // CMK in a different AWS account, you must use the key ARN or alias ARN. For // example: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_GetKeyPolicy.go b/service/kms/api_op_GetKeyPolicy.go index 8f21db3d5e9..40ee733d876 100644 --- a/service/kms/api_op_GetKeyPolicy.go +++ b/service/kms/api_op_GetKeyPolicy.go @@ -32,10 +32,10 @@ type GetKeyPolicyInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_GetKeyRotationStatus.go b/service/kms/api_op_GetKeyRotationStatus.go index 58fbb920f2d..fc05b8e1f49 100644 --- a/service/kms/api_op_GetKeyRotationStatus.go +++ b/service/kms/api_op_GetKeyRotationStatus.go @@ -23,17 +23,17 @@ import ( // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the // AWS Key Management Service Developer Guide. // -// * Disabled: The key rotation -// status does not change when you disable a CMK. However, while the CMK is -// disabled, AWS KMS does not rotate the backing key. +// * Disabled: The key rotation status +// does not change when you disable a CMK. However, while the CMK is disabled, AWS +// KMS does not rotate the backing key. // -// * Pending deletion: -// While a CMK is pending deletion, its key rotation status is false and AWS KMS -// does not rotate the backing key. If you cancel the deletion, the original key -// rotation status is restored. +// * Pending deletion: While a CMK is pending +// deletion, its key rotation status is false and AWS KMS does not rotate the +// backing key. If you cancel the deletion, the original key rotation status is +// restored. // -// To perform this operation on a CMK in a different -// AWS account, specify the key ARN in the value of the KeyId parameter. +// To perform this operation on a CMK in a different AWS account, +// specify the key ARN in the value of the KeyId parameter. func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) { if params == nil { params = &GetKeyRotationStatusInput{} @@ -55,10 +55,10 @@ type GetKeyRotationStatusInput struct { // Amazon Resource Name (ARN) of the CMK. To specify a CMK in a different AWS // account, you must use the key ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_GetParametersForImport.go b/service/kms/api_op_GetParametersForImport.go index e494b4d6981..623f4ba1563 100644 --- a/service/kms/api_op_GetParametersForImport.go +++ b/service/kms/api_op_GetParametersForImport.go @@ -54,10 +54,10 @@ type GetParametersForImportInput struct { // Origin of the CMK must be EXTERNAL. Specify the key ID or the Amazon Resource // Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_GetPublicKey.go b/service/kms/api_op_GetPublicKey.go index a9318bce91e..7cc5b09fcc3 100644 --- a/service/kms/api_op_GetPublicKey.go +++ b/service/kms/api_op_GetPublicKey.go @@ -30,17 +30,17 @@ import ( // To help you use the public key safely outside of AWS KMS, GetPublicKey returns // important information about the public key in the response, including: // -// * +// * // CustomerMasterKeySpec // (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-CustomerMasterKeySpec): -// The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521. -// +// The type of key material in the public key, such as RSA_4096 or +// ECC_NIST_P521. // // * KeyUsage // (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage): // Whether the key is used for encryption or signing. // -// * EncryptionAlgorithms +// * EncryptionAlgorithms // (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) // or SigningAlgorithms // (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms): @@ -79,16 +79,16 @@ type GetPublicKeyInput struct { // an alias name, prefix it with "alias/". To specify a CMK in a different AWS // account, you must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_ImportKeyMaterial.go b/service/kms/api_op_ImportKeyMaterial.go index f7209ea2e17..3680eacf0f5 100644 --- a/service/kms/api_op_ImportKeyMaterial.go +++ b/service/kms/api_op_ImportKeyMaterial.go @@ -27,32 +27,32 @@ import ( // token from the same GetParametersForImport response. When calling this // operation, you must specify the following values: // -// * The key ID or key ARN -// of a CMK with no key material. Its Origin must be EXTERNAL. To create a CMK with -// no key material, call CreateKey and set the value of its Origin parameter to +// * The key ID or key ARN of a +// CMK with no key material. Its Origin must be EXTERNAL. To create a CMK with no +// key material, call CreateKey and set the value of its Origin parameter to // EXTERNAL. To get the Origin of a CMK, call DescribeKey.) // -// * The encrypted -// key material. To get the public key to encrypt the key material, call +// * The encrypted key +// material. To get the public key to encrypt the key material, call // GetParametersForImport. // -// * The import token that GetParametersForImport +// * The import token that GetParametersForImport // returned. You must use a public key and token from the same // GetParametersForImport response. // -// * Whether the key material expires and if -// so, when. If you set an expiration date, AWS KMS deletes the key material from -// the CMK on the specified date, and the CMK becomes unusable. To use the CMK -// again, you must reimport the same key material. The only way to change an -// expiration date is by reimporting the same key material and specifying a new -// expiration date. +// * Whether the key material expires and if so, +// when. If you set an expiration date, AWS KMS deletes the key material from the +// CMK on the specified date, and the CMK becomes unusable. To use the CMK again, +// you must reimport the same key material. The only way to change an expiration +// date is by reimporting the same key material and specifying a new expiration +// date. // -// When this operation is successful, the key state of the CMK -// changes from PendingImport to Enabled, and you can use the CMK. If this -// operation fails, use the exception to help determine the problem. If the error -// is related to the key material, the import token, or wrapping key, use -// GetParametersForImport to get a new public key and import token for the CMK and -// repeat the import procedure. For help, see How To Import Key Material +// When this operation is successful, the key state of the CMK changes from +// PendingImport to Enabled, and you can use the CMK. If this operation fails, use +// the exception to help determine the problem. If the error is related to the key +// material, the import token, or wrapping key, use GetParametersForImport to get a +// new public key and import token for the CMK and repeat the import procedure. For +// help, see How To Import Key Material // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) // in the AWS Key Management Service Developer Guide. The CMK that you use for this // operation must be in a compatible key state. For details, see How Key State @@ -95,10 +95,10 @@ type ImportKeyMaterialInput struct { // parameter of the corresponding GetParametersForImport request. Specify the key // ID or the Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_ListGrants.go b/service/kms/api_op_ListGrants.go index 9581de37d2d..e009ab3f261 100644 --- a/service/kms/api_op_ListGrants.go +++ b/service/kms/api_op_ListGrants.go @@ -40,10 +40,10 @@ type ListGrantsInput struct { // Amazon Resource Name (ARN) of the CMK. To specify a CMK in a different AWS // account, you must use the key ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_ListKeyPolicies.go b/service/kms/api_op_ListKeyPolicies.go index 1bab223cc53..7a2abfbe148 100644 --- a/service/kms/api_op_ListKeyPolicies.go +++ b/service/kms/api_op_ListKeyPolicies.go @@ -34,10 +34,10 @@ type ListKeyPoliciesInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_ListResourceTags.go b/service/kms/api_op_ListResourceTags.go index 5934455516e..d9616e67afa 100644 --- a/service/kms/api_op_ListResourceTags.go +++ b/service/kms/api_op_ListResourceTags.go @@ -33,10 +33,10 @@ type ListResourceTagsInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_PutKeyPolicy.go b/service/kms/api_op_PutKeyPolicy.go index c63527e68e7..6fd8ad60f46 100644 --- a/service/kms/api_op_PutKeyPolicy.go +++ b/service/kms/api_op_PutKeyPolicy.go @@ -35,10 +35,10 @@ type PutKeyPolicyInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To @@ -50,16 +50,16 @@ type PutKeyPolicyInput struct { // The key policy to attach to the CMK. The key policy must meet the following // criteria: // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the - // key policy must allow the principal that is making the PutKeyPolicy request to - // make a subsequent PutKeyPolicy request on the CMK. This reduces the risk that - // the CMK becomes unmanageable. For more information, refer to the scenario in the - // Default Key Policy + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key + // policy must allow the principal that is making the PutKeyPolicy request to make + // a subsequent PutKeyPolicy request on the CMK. This reduces the risk that the CMK + // becomes unmanageable. For more information, refer to the scenario in the Default + // Key Policy // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) // section of the AWS Key Management Service Developer Guide. // - // * Each statement - // in the key policy must contain one or more principals. The principals in the key + // * Each statement in + // the key policy must contain one or more principals. The principals in the key // policy must exist and be visible to AWS KMS. When you create a new AWS principal // (for example, an IAM user or role), you might need to enforce a delay before // including the new principal in a key policy because the new principal might not diff --git a/service/kms/api_op_ReEncrypt.go b/service/kms/api_op_ReEncrypt.go index f134aee19ec..8e69449bc20 100644 --- a/service/kms/api_op_ReEncrypt.go +++ b/service/kms/api_op_ReEncrypt.go @@ -33,43 +33,42 @@ import ( // When you use the ReEncrypt operation, you need to provide information for the // decrypt operation and the subsequent encrypt operation. // -// * If your -// ciphertext was encrypted under an asymmetric CMK, you must identify the source -// CMK, that is, the CMK that encrypted the ciphertext. You must also supply the -// encryption algorithm that was used. This information is required to decrypt the -// data. +// * If your ciphertext +// was encrypted under an asymmetric CMK, you must identify the source CMK, that +// is, the CMK that encrypted the ciphertext. You must also supply the encryption +// algorithm that was used. This information is required to decrypt the data. // -// * It is optional, but you can specify a source CMK even when the -// ciphertext was encrypted under a symmetric CMK. This ensures that the ciphertext -// is decrypted only by using a particular CMK. If the CMK that you specify cannot -// decrypt the ciphertext, the ReEncrypt operation fails. +// * It +// is optional, but you can specify a source CMK even when the ciphertext was +// encrypted under a symmetric CMK. This ensures that the ciphertext is decrypted +// only by using a particular CMK. If the CMK that you specify cannot decrypt the +// ciphertext, the ReEncrypt operation fails. // -// * To reencrypt the -// data, you must specify the destination CMK, that is, the CMK that re-encrypts -// the data after it is decrypted. You can select a symmetric or asymmetric CMK. If -// the destination CMK is an asymmetric CMK, you must also provide the encryption -// algorithm. The algorithm that you choose must be compatible with the CMK. When -// you use an asymmetric CMK to encrypt or reencrypt data, be sure to record the -// CMK and encryption algorithm that you choose. You will be required to provide -// the same CMK and encryption algorithm when you decrypt the data. If the CMK and -// algorithm do not match the values used to encrypt the data, the decrypt -// operation fails. You are not required to supply the CMK ID and encryption -// algorithm when you decrypt with symmetric CMKs because AWS KMS stores this -// information in the ciphertext blob. AWS KMS cannot store metadata in ciphertext -// generated with asymmetric keys. The standard format for asymmetric key -// ciphertext does not include configurable fields. +// * To reencrypt the data, you must +// specify the destination CMK, that is, the CMK that re-encrypts the data after it +// is decrypted. You can select a symmetric or asymmetric CMK. If the destination +// CMK is an asymmetric CMK, you must also provide the encryption algorithm. The +// algorithm that you choose must be compatible with the CMK. When you use an +// asymmetric CMK to encrypt or reencrypt data, be sure to record the CMK and +// encryption algorithm that you choose. You will be required to provide the same +// CMK and encryption algorithm when you decrypt the data. If the CMK and algorithm +// do not match the values used to encrypt the data, the decrypt operation fails. +// You are not required to supply the CMK ID and encryption algorithm when you +// decrypt with symmetric CMKs because AWS KMS stores this information in the +// ciphertext blob. AWS KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. // -// Unlike other AWS KMS API -// operations, ReEncrypt callers must have two permissions: +// Unlike other AWS KMS API operations, ReEncrypt +// callers must have two permissions: // -// * -// kms:ReEncryptFrom permission on the source CMK +// * kms:ReEncryptFrom permission on the source +// CMK // -// * kms:ReEncryptTo permission -// on the destination CMK +// * kms:ReEncryptTo permission on the destination CMK // -// To permit reencryption from or to a CMK, include the -// "kms:ReEncrypt*" permission in your key policy +// To permit reencryption +// from or to a CMK, include the "kms:ReEncrypt*" permission in your key policy // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). This // permission is automatically included in the key policy when you use the console // to create a CMK. But you must include it manually when you create a CMK @@ -107,16 +106,16 @@ type ReEncryptInput struct { // an alias name, prefix it with "alias/". To specify a CMK in a different AWS // account, you must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key @@ -180,16 +179,16 @@ type ReEncryptInput struct { // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, or // alias ARN. When using an alias name, prefix it with "alias/". For example: // + // * + // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/api_op_RetireGrant.go b/service/kms/api_op_RetireGrant.go index 11424e46c2f..3d0fdd049aa 100644 --- a/service/kms/api_op_RetireGrant.go +++ b/service/kms/api_op_RetireGrant.go @@ -14,20 +14,20 @@ import ( // You should revoke a grant when you intend to actively deny operations that // depend on it. The following are permitted to call this API: // -// * The AWS -// account (root user) under which the grant was created +// * The AWS account +// (root user) under which the grant was created // -// * The -// RetiringPrincipal, if present in the grant +// * The RetiringPrincipal, if +// present in the grant // -// * The GranteePrincipal, if -// RetireGrant is an operation specified in the grant +// * The GranteePrincipal, if RetireGrant is an operation +// specified in the grant // -// You must identify the grant -// to retire by its grant token or by a combination of the grant ID and the Amazon -// Resource Name (ARN) of the customer master key (CMK). A grant token is a unique -// variable-length base64-encoded string. A grant ID is a 64 character unique -// identifier of a grant. The CreateGrant operation returns both. +// You must identify the grant to retire by its grant token +// or by a combination of the grant ID and the Amazon Resource Name (ARN) of the +// customer master key (CMK). A grant token is a unique variable-length +// base64-encoded string. A grant ID is a 64 character unique identifier of a +// grant. The CreateGrant operation returns both. func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) { if params == nil { params = &RetireGrantInput{} @@ -48,7 +48,7 @@ type RetireGrantInput struct { // Unique identifier of the grant to retire. The grant ID is returned in the // response to a CreateGrant operation. // - // * Grant ID Example - + // * Grant ID Example - // 0123456789012345678901234567890123456789012345678901234567890123 GrantId *string diff --git a/service/kms/api_op_RevokeGrant.go b/service/kms/api_op_RevokeGrant.go index e65d394a930..a2d05619e00 100644 --- a/service/kms/api_op_RevokeGrant.go +++ b/service/kms/api_op_RevokeGrant.go @@ -40,10 +40,10 @@ type RevokeGrantInput struct { // Specify the key ID or the Amazon Resource Name (ARN) of the CMK. To specify a // CMK in a different AWS account, you must use the key ARN. For example: // - // * - // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key + // ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_ScheduleKeyDeletion.go b/service/kms/api_op_ScheduleKeyDeletion.go index 517d60dbfa1..a8a3f92c959 100644 --- a/service/kms/api_op_ScheduleKeyDeletion.go +++ b/service/kms/api_op_ScheduleKeyDeletion.go @@ -57,10 +57,10 @@ type ScheduleKeyDeletionInput struct { // The unique identifier of the customer master key (CMK) to delete. Specify the // key ID or the Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_Sign.go b/service/kms/api_op_Sign.go index 2676c77d9b8..fa8b96000b3 100644 --- a/service/kms/api_op_Sign.go +++ b/service/kms/api_op_Sign.go @@ -25,28 +25,27 @@ import ( // private key and that the message hasn't changed since it was signed. To use the // Sign operation, provide the following information: // -// * Use the KeyId -// parameter to identify an asymmetric CMK with a KeyUsage value of SIGN_VERIFY. To -// get the KeyUsage value of a CMK, use the DescribeKey operation. The caller must -// have kms:Sign permission on the CMK. +// * Use the KeyId parameter to +// identify an asymmetric CMK with a KeyUsage value of SIGN_VERIFY. To get the +// KeyUsage value of a CMK, use the DescribeKey operation. The caller must have +// kms:Sign permission on the CMK. // -// * Use the Message parameter to specify -// the message or message digest to sign. You can submit messages of up to 4096 -// bytes. To sign a larger message, generate a hash digest of the message, and then +// * Use the Message parameter to specify the +// message or message digest to sign. You can submit messages of up to 4096 bytes. +// To sign a larger message, generate a hash digest of the message, and then // provide the hash digest in the Message parameter. To indicate whether the // message is a full message or a digest, use the MessageType parameter. // -// * -// Choose a signing algorithm that is compatible with the CMK. +// * Choose +// a signing algorithm that is compatible with the CMK. // -// When signing a -// message, be sure to record the CMK and the signing algorithm. This information -// is required to verify the signature. To verify the signature that this operation -// generates, use the Verify operation. Or use the GetPublicKey operation to -// download the public key and then use the public key to verify the signature -// outside of AWS KMS. The CMK that you use for this operation must be in a -// compatible key state. For details, see How Key State Affects Use of a Customer -// Master Key +// When signing a message, be +// sure to record the CMK and the signing algorithm. This information is required +// to verify the signature. To verify the signature that this operation generates, +// use the Verify operation. Or use the GetPublicKey operation to download the +// public key and then use the public key to verify the signature outside of AWS +// KMS. The CMK that you use for this operation must be in a compatible key state. +// For details, see How Key State Affects Use of a Customer Master Key // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the // AWS Key Management Service Developer Guide. func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) { @@ -73,16 +72,16 @@ type SignInput struct { // alias name, prefix it with "alias/". To specify a CMK in a different AWS // account, you must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key @@ -126,12 +125,12 @@ type SignOutput struct { // The cryptographic signature that was generated for the message. // - // * When used + // * When used // with the supported RSA signing algorithms, the encoding of this value is defined // by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). // - // * When used - // with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this + // * When used with + // the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this // value is a DER-encoded object as defined by ANS X9.62–2005 and RFC 3279 Section // 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). This is the most // commonly used signature format and is appropriate for most uses. diff --git a/service/kms/api_op_TagResource.go b/service/kms/api_op_TagResource.go index 958ed2cf3e5..9b497240744 100644 --- a/service/kms/api_op_TagResource.go +++ b/service/kms/api_op_TagResource.go @@ -44,10 +44,10 @@ type TagResourceInput struct { // A unique identifier for the CMK you are tagging. Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_UntagResource.go b/service/kms/api_op_UntagResource.go index a63c440caca..c730668d0cd 100644 --- a/service/kms/api_op_UntagResource.go +++ b/service/kms/api_op_UntagResource.go @@ -37,10 +37,10 @@ type UntagResourceInput struct { // A unique identifier for the CMK from which you are removing tags. Specify the // key ID or the Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_UpdateAlias.go b/service/kms/api_op_UpdateAlias.go index 05207ee01ca..636f9867b00 100644 --- a/service/kms/api_op_UpdateAlias.go +++ b/service/kms/api_op_UpdateAlias.go @@ -60,10 +60,9 @@ type UpdateAliasInput struct { // the same key usage. Specify the key ID or the Amazon Resource Name (ARN) of the // CMK. For example: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key - // ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_UpdateCustomKeyStore.go b/service/kms/api_op_UpdateCustomKeyStore.go index 444a42df0d6..0bceebec2c4 100644 --- a/service/kms/api_op_UpdateCustomKeyStore.go +++ b/service/kms/api_op_UpdateCustomKeyStore.go @@ -19,12 +19,12 @@ import ( // key store, use the DescribeCustomKeyStores operation. Use the parameters of // UpdateCustomKeyStore to edit your keystore settings. // -// * Use the +// * Use the // NewCustomKeyStoreName parameter to change the friendly name of the custom key // store to the value that you specify. // -// * Use the KeyStorePassword parameter -// tell AWS KMS the current password of the kmsuser crypto user (CU) +// * Use the KeyStorePassword parameter tell +// AWS KMS the current password of the kmsuser crypto user (CU) // (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) // in the associated AWS CloudHSM cluster. You can use this parameter to fix // connection failures @@ -33,14 +33,14 @@ import ( // kmsuser password has changed. This value does not change the password in the AWS // CloudHSM cluster. // -// * Use the CloudHsmClusterId parameter to associate the -// custom key store with a different, but related, AWS CloudHSM cluster. You can -// use this parameter to repair a custom key store if its AWS CloudHSM cluster -// becomes corrupted or is deleted, or when you need to create or restore a cluster -// from a backup. +// * Use the CloudHsmClusterId parameter to associate the custom +// key store with a different, but related, AWS CloudHSM cluster. You can use this +// parameter to repair a custom key store if its AWS CloudHSM cluster becomes +// corrupted or is deleted, or when you need to create or restore a cluster from a +// backup. // -// If the operation succeeds, it returns a JSON object with no -// properties. This operation is part of the Custom Key Store feature +// If the operation succeeds, it returns a JSON object with no properties. +// This operation is part of the Custom Key Store feature // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) // feature in AWS KMS, which combines the convenience and extensive integration of // AWS KMS with the isolation and control of a single-tenant key store. diff --git a/service/kms/api_op_UpdateKeyDescription.go b/service/kms/api_op_UpdateKeyDescription.go index f6add5543bb..8a3b65579a8 100644 --- a/service/kms/api_op_UpdateKeyDescription.go +++ b/service/kms/api_op_UpdateKeyDescription.go @@ -42,10 +42,10 @@ type UpdateKeyDescriptionInput struct { // A unique identifier for the customer master key (CMK). Specify the key ID or the // Amazon Resource Name (ARN) of the CMK. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // To diff --git a/service/kms/api_op_Verify.go b/service/kms/api_op_Verify.go index ccda7e81512..4b636a766f8 100644 --- a/service/kms/api_op_Verify.go +++ b/service/kms/api_op_Verify.go @@ -59,16 +59,16 @@ type VerifyInput struct { // name, prefix it with "alias/". To specify a CMK in a different AWS account, you // must use the key ARN or alias ARN. For example: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias // // To get the key ID and key diff --git a/service/kms/doc.go b/service/kms/doc.go index f6068ef937d..464a63b0e75 100644 --- a/service/kms/doc.go +++ b/service/kms/doc.go @@ -37,17 +37,17 @@ // Resources For more information about credentials and request signing, see the // following: // -// * AWS Security Credentials +// * AWS Security Credentials // (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) - // This topic provides general information about the types of credentials used for // accessing AWS. // -// * Temporary Security Credentials +// * Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) - // This section of the IAM User Guide describes how to create and use temporary // security credentials. // -// * Signature Version 4 Signing Process +// * Signature Version 4 Signing Process // (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) - This // set of topics walks you through the process of signing a request using an access // key ID and a secret access key. @@ -57,12 +57,12 @@ // most applications. You will likely perform operations other than these, such as // creating keys and assigning policies, by using the console. // -// * Encrypt +// * Encrypt // +// * +// Decrypt // -// * Decrypt +// * GenerateDataKey // -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext +// * GenerateDataKeyWithoutPlaintext package kms diff --git a/service/kms/types/enums.go b/service/kms/types/enums.go index 7792500843c..dc38e8a2428 100644 --- a/service/kms/types/enums.go +++ b/service/kms/types/enums.go @@ -6,9 +6,9 @@ type AlgorithmSpec string // Enum values for AlgorithmSpec const ( - AlgorithmSpecRsaes_pkcs1_v1_5 AlgorithmSpec = "RSAES_PKCS1_V1_5" - AlgorithmSpecRsaes_oaep_sha_1 AlgorithmSpec = "RSAES_OAEP_SHA_1" - AlgorithmSpecRsaes_oaep_sha_256 AlgorithmSpec = "RSAES_OAEP_SHA_256" + AlgorithmSpecRsaesPkcs1V15 AlgorithmSpec = "RSAES_PKCS1_V1_5" + AlgorithmSpecRsaesOaepSha1 AlgorithmSpec = "RSAES_OAEP_SHA_1" + AlgorithmSpecRsaesOaepSha256 AlgorithmSpec = "RSAES_OAEP_SHA_256" ) // Values returns all known values for AlgorithmSpec. Note that this can be @@ -26,15 +26,15 @@ type ConnectionErrorCodeType string // Enum values for ConnectionErrorCodeType const ( - ConnectionErrorCodeTypeInvalid_credentials ConnectionErrorCodeType = "INVALID_CREDENTIALS" - ConnectionErrorCodeTypeCluster_not_found ConnectionErrorCodeType = "CLUSTER_NOT_FOUND" - ConnectionErrorCodeTypeNetwork_errors ConnectionErrorCodeType = "NETWORK_ERRORS" - ConnectionErrorCodeTypeInternal_error ConnectionErrorCodeType = "INTERNAL_ERROR" - ConnectionErrorCodeTypeInsufficient_cloudhsm_hsms ConnectionErrorCodeType = "INSUFFICIENT_CLOUDHSM_HSMS" - ConnectionErrorCodeTypeUser_locked_out ConnectionErrorCodeType = "USER_LOCKED_OUT" - ConnectionErrorCodeTypeUser_not_found ConnectionErrorCodeType = "USER_NOT_FOUND" - ConnectionErrorCodeTypeUser_logged_in ConnectionErrorCodeType = "USER_LOGGED_IN" - ConnectionErrorCodeTypeSubnet_not_found ConnectionErrorCodeType = "SUBNET_NOT_FOUND" + ConnectionErrorCodeTypeInvalidCredentials ConnectionErrorCodeType = "INVALID_CREDENTIALS" + ConnectionErrorCodeTypeClusterNotFound ConnectionErrorCodeType = "CLUSTER_NOT_FOUND" + ConnectionErrorCodeTypeNetworkErrors ConnectionErrorCodeType = "NETWORK_ERRORS" + ConnectionErrorCodeTypeInternalError ConnectionErrorCodeType = "INTERNAL_ERROR" + ConnectionErrorCodeTypeInsufficientCloudhsmHsms ConnectionErrorCodeType = "INSUFFICIENT_CLOUDHSM_HSMS" + ConnectionErrorCodeTypeUserLockedOut ConnectionErrorCodeType = "USER_LOCKED_OUT" + ConnectionErrorCodeTypeUserNotFound ConnectionErrorCodeType = "USER_NOT_FOUND" + ConnectionErrorCodeTypeUserLoggedIn ConnectionErrorCodeType = "USER_LOGGED_IN" + ConnectionErrorCodeTypeSubnetNotFound ConnectionErrorCodeType = "SUBNET_NOT_FOUND" ) // Values returns all known values for ConnectionErrorCodeType. Note that this can @@ -82,14 +82,14 @@ type CustomerMasterKeySpec string // Enum values for CustomerMasterKeySpec const ( - CustomerMasterKeySpecRsa_2048 CustomerMasterKeySpec = "RSA_2048" - CustomerMasterKeySpecRsa_3072 CustomerMasterKeySpec = "RSA_3072" - CustomerMasterKeySpecRsa_4096 CustomerMasterKeySpec = "RSA_4096" - CustomerMasterKeySpecEcc_nist_p256 CustomerMasterKeySpec = "ECC_NIST_P256" - CustomerMasterKeySpecEcc_nist_p384 CustomerMasterKeySpec = "ECC_NIST_P384" - CustomerMasterKeySpecEcc_nist_p521 CustomerMasterKeySpec = "ECC_NIST_P521" - CustomerMasterKeySpecEcc_secg_p256k1 CustomerMasterKeySpec = "ECC_SECG_P256K1" - CustomerMasterKeySpecSymmetric_default CustomerMasterKeySpec = "SYMMETRIC_DEFAULT" + CustomerMasterKeySpecRsa2048 CustomerMasterKeySpec = "RSA_2048" + CustomerMasterKeySpecRsa3072 CustomerMasterKeySpec = "RSA_3072" + CustomerMasterKeySpecRsa4096 CustomerMasterKeySpec = "RSA_4096" + CustomerMasterKeySpecEccNistP256 CustomerMasterKeySpec = "ECC_NIST_P256" + CustomerMasterKeySpecEccNistP384 CustomerMasterKeySpec = "ECC_NIST_P384" + CustomerMasterKeySpecEccNistP521 CustomerMasterKeySpec = "ECC_NIST_P521" + CustomerMasterKeySpecEccSecgP256k1 CustomerMasterKeySpec = "ECC_SECG_P256K1" + CustomerMasterKeySpecSymmetricDefault CustomerMasterKeySpec = "SYMMETRIC_DEFAULT" ) // Values returns all known values for CustomerMasterKeySpec. Note that this can be @@ -112,13 +112,13 @@ type DataKeyPairSpec string // Enum values for DataKeyPairSpec const ( - DataKeyPairSpecRsa_2048 DataKeyPairSpec = "RSA_2048" - DataKeyPairSpecRsa_3072 DataKeyPairSpec = "RSA_3072" - DataKeyPairSpecRsa_4096 DataKeyPairSpec = "RSA_4096" - DataKeyPairSpecEcc_nist_p256 DataKeyPairSpec = "ECC_NIST_P256" - DataKeyPairSpecEcc_nist_p384 DataKeyPairSpec = "ECC_NIST_P384" - DataKeyPairSpecEcc_nist_p521 DataKeyPairSpec = "ECC_NIST_P521" - DataKeyPairSpecEcc_secg_p256k1 DataKeyPairSpec = "ECC_SECG_P256K1" + DataKeyPairSpecRsa2048 DataKeyPairSpec = "RSA_2048" + DataKeyPairSpecRsa3072 DataKeyPairSpec = "RSA_3072" + DataKeyPairSpecRsa4096 DataKeyPairSpec = "RSA_4096" + DataKeyPairSpecEccNistP256 DataKeyPairSpec = "ECC_NIST_P256" + DataKeyPairSpecEccNistP384 DataKeyPairSpec = "ECC_NIST_P384" + DataKeyPairSpecEccNistP521 DataKeyPairSpec = "ECC_NIST_P521" + DataKeyPairSpecEccSecgP256k1 DataKeyPairSpec = "ECC_SECG_P256K1" ) // Values returns all known values for DataKeyPairSpec. Note that this can be @@ -140,8 +140,8 @@ type DataKeySpec string // Enum values for DataKeySpec const ( - DataKeySpecAes_256 DataKeySpec = "AES_256" - DataKeySpecAes_128 DataKeySpec = "AES_128" + DataKeySpecAes256 DataKeySpec = "AES_256" + DataKeySpecAes128 DataKeySpec = "AES_128" ) // Values returns all known values for DataKeySpec. Note that this can be expanded @@ -158,9 +158,9 @@ type EncryptionAlgorithmSpec string // Enum values for EncryptionAlgorithmSpec const ( - EncryptionAlgorithmSpecSymmetric_default EncryptionAlgorithmSpec = "SYMMETRIC_DEFAULT" - EncryptionAlgorithmSpecRsaes_oaep_sha_1 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_1" - EncryptionAlgorithmSpecRsaes_oaep_sha_256 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_256" + EncryptionAlgorithmSpecSymmetricDefault EncryptionAlgorithmSpec = "SYMMETRIC_DEFAULT" + EncryptionAlgorithmSpecRsaesOaepSha1 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_1" + EncryptionAlgorithmSpecRsaesOaepSha256 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_256" ) // Values returns all known values for EncryptionAlgorithmSpec. Note that this can @@ -178,8 +178,8 @@ type ExpirationModelType string // Enum values for ExpirationModelType const ( - ExpirationModelTypeKey_material_expires ExpirationModelType = "KEY_MATERIAL_EXPIRES" - ExpirationModelTypeKey_material_does_not_expire ExpirationModelType = "KEY_MATERIAL_DOES_NOT_EXPIRE" + ExpirationModelTypeKeyMaterialExpires ExpirationModelType = "KEY_MATERIAL_EXPIRES" + ExpirationModelTypeKeyMaterialDoesNotExpire ExpirationModelType = "KEY_MATERIAL_DOES_NOT_EXPIRE" ) // Values returns all known values for ExpirationModelType. Note that this can be @@ -280,8 +280,8 @@ type KeyUsageType string // Enum values for KeyUsageType const ( - KeyUsageTypeSign_verify KeyUsageType = "SIGN_VERIFY" - KeyUsageTypeEncrypt_decrypt KeyUsageType = "ENCRYPT_DECRYPT" + KeyUsageTypeSignVerify KeyUsageType = "SIGN_VERIFY" + KeyUsageTypeEncryptDecrypt KeyUsageType = "ENCRYPT_DECRYPT" ) // Values returns all known values for KeyUsageType. Note that this can be expanded @@ -316,9 +316,9 @@ type OriginType string // Enum values for OriginType const ( - OriginTypeAws_kms OriginType = "AWS_KMS" - OriginTypeExternal OriginType = "EXTERNAL" - OriginTypeAws_cloudhsm OriginType = "AWS_CLOUDHSM" + OriginTypeAwsKms OriginType = "AWS_KMS" + OriginTypeExternal OriginType = "EXTERNAL" + OriginTypeAwsCloudhsm OriginType = "AWS_CLOUDHSM" ) // Values returns all known values for OriginType. Note that this can be expanded @@ -336,15 +336,15 @@ type SigningAlgorithmSpec string // Enum values for SigningAlgorithmSpec const ( - SigningAlgorithmSpecRsassa_pss_sha_256 SigningAlgorithmSpec = "RSASSA_PSS_SHA_256" - SigningAlgorithmSpecRsassa_pss_sha_384 SigningAlgorithmSpec = "RSASSA_PSS_SHA_384" - SigningAlgorithmSpecRsassa_pss_sha_512 SigningAlgorithmSpec = "RSASSA_PSS_SHA_512" - SigningAlgorithmSpecRsassa_pkcs1_v1_5_sha_256 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_256" - SigningAlgorithmSpecRsassa_pkcs1_v1_5_sha_384 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_384" - SigningAlgorithmSpecRsassa_pkcs1_v1_5_sha_512 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_512" - SigningAlgorithmSpecEcdsa_sha_256 SigningAlgorithmSpec = "ECDSA_SHA_256" - SigningAlgorithmSpecEcdsa_sha_384 SigningAlgorithmSpec = "ECDSA_SHA_384" - SigningAlgorithmSpecEcdsa_sha_512 SigningAlgorithmSpec = "ECDSA_SHA_512" + SigningAlgorithmSpecRsassaPssSha256 SigningAlgorithmSpec = "RSASSA_PSS_SHA_256" + SigningAlgorithmSpecRsassaPssSha384 SigningAlgorithmSpec = "RSASSA_PSS_SHA_384" + SigningAlgorithmSpecRsassaPssSha512 SigningAlgorithmSpec = "RSASSA_PSS_SHA_512" + SigningAlgorithmSpecRsassaPkcs1V15Sha256 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_256" + SigningAlgorithmSpecRsassaPkcs1V15Sha384 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_384" + SigningAlgorithmSpecRsassaPkcs1V15Sha512 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_512" + SigningAlgorithmSpecEcdsaSha256 SigningAlgorithmSpec = "ECDSA_SHA_256" + SigningAlgorithmSpecEcdsaSha384 SigningAlgorithmSpec = "ECDSA_SHA_384" + SigningAlgorithmSpecEcdsaSha512 SigningAlgorithmSpec = "ECDSA_SHA_512" ) // Values returns all known values for SigningAlgorithmSpec. Note that this can be @@ -368,7 +368,7 @@ type WrappingKeySpec string // Enum values for WrappingKeySpec const ( - WrappingKeySpecRsa_2048 WrappingKeySpec = "RSA_2048" + WrappingKeySpecRsa2048 WrappingKeySpec = "RSA_2048" ) // Values returns all known values for WrappingKeySpec. Note that this can be diff --git a/service/kms/types/errors.go b/service/kms/types/errors.go index c73b8410325..e887372b098 100644 --- a/service/kms/types/errors.go +++ b/service/kms/types/errors.go @@ -52,11 +52,11 @@ func (e *CloudHsmClusterInUseException) ErrorFault() smithy.ErrorFault { return // The request was rejected because the associated AWS CloudHSM cluster did not // meet the configuration requirements for a custom key store. // -// * The cluster -// must be configured with private subnets in at least two different Availability -// Zones in the Region. +// * The cluster must +// be configured with private subnets in at least two different Availability Zones +// in the Region. // -// * The security group for the cluster +// * The security group for the cluster // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) // (cloudhsm-cluster--sg) must include inbound rules and outbound rules that allow // TCP traffic on ports 2223-2225. The Source in the inbound rules and the @@ -67,8 +67,8 @@ func (e *CloudHsmClusterInUseException) ErrorFault() smithy.ErrorFault { return // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) // operation. // -// * The cluster must contain at least as many HSMs as the -// operation requires. To add HSMs, use the AWS CloudHSM CreateHsm +// * The cluster must contain at least as many HSMs as the operation +// requires. To add HSMs, use the AWS CloudHSM CreateHsm // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) // operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey // operations, the AWS CloudHSM cluster must have at least two active HSMs, each in @@ -205,16 +205,16 @@ func (e *CustomKeyStoreHasCMKsException) ErrorFault() smithy.ErrorFault { return // DescribeCustomKeyStores operation. This exception is thrown under the following // conditions: // -// * You requested the CreateKey or GenerateRandom operation in a +// * You requested the CreateKey or GenerateRandom operation in a // custom key store that is not connected. These operations are valid only when the // custom key store ConnectionState is CONNECTED. // -// * You requested the +// * You requested the // UpdateCustomKeyStore or DeleteCustomKeyStore operation on a custom key store // that is not disconnected. This operation is valid only when the custom key store // ConnectionState is DISCONNECTED. // -// * You requested the ConnectCustomKeyStore +// * You requested the ConnectCustomKeyStore // operation on a custom key store with a ConnectionState of DISCONNECTING or // FAILED. This operation is valid for all other ConnectionState values. type CustomKeyStoreInvalidStateException struct { @@ -501,19 +501,19 @@ func (e *InvalidImportTokenException) ErrorFault() smithy.ErrorFault { return sm // The request was rejected for one of the following reasons: // -// * The KeyUsage -// value of the CMK is incompatible with the API operation. +// * The KeyUsage value +// of the CMK is incompatible with the API operation. // -// * The encryption -// algorithm or signing algorithm specified for the operation is incompatible with -// the type of key material in the CMK (CustomerMasterKeySpec). +// * The encryption algorithm +// or signing algorithm specified for the operation is incompatible with the type +// of key material in the CMK (CustomerMasterKeySpec). // -// For encrypting, -// decrypting, re-encrypting, and generating data keys, the KeyUsage must be -// ENCRYPT_DECRYPT. For signing and verifying, the KeyUsage must be SIGN_VERIFY. To -// find the KeyUsage of a CMK, use the DescribeKey operation. To find the -// encryption or signing algorithms supported for a particular CMK, use the -// DescribeKey operation. +// For encrypting, decrypting, +// re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. +// For signing and verifying, the KeyUsage must be SIGN_VERIFY. To find the +// KeyUsage of a CMK, use the DescribeKey operation. To find the encryption or +// signing algorithms supported for a particular CMK, use the DescribeKey +// operation. type InvalidKeyUsageException struct { Message *string } diff --git a/service/kms/types/types.go b/service/kms/types/types.go index 8f7932cb820..e629efc94d2 100644 --- a/service/kms/types/types.go +++ b/service/kms/types/types.go @@ -32,59 +32,60 @@ type CustomKeyStoresListEntry struct { // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) // in AWS Key Management Service Developer Guide. Valid values are: // - // * + // * // CLUSTER_NOT_FOUND - AWS KMS cannot find the AWS CloudHSM cluster with the // specified cluster ID. // - // * INSUFFICIENT_CLOUDHSM_HSMS - The associated AWS + // * INSUFFICIENT_CLOUDHSM_HSMS - The associated AWS // CloudHSM cluster does not contain any active HSMs. To connect a custom key store - // to its AWS CloudHSM cluster, the cluster must contain at least one active HSM. + // to its AWS CloudHSM cluster, the cluster must contain at least one active + // HSM. // + // * INTERNAL_ERROR - AWS KMS could not complete the request due to an + // internal error. Retry the request. For ConnectCustomKeyStore requests, + // disconnect the custom key store before trying to connect again. // - // * INTERNAL_ERROR - AWS KMS could not complete the request due to an internal - // error. Retry the request. For ConnectCustomKeyStore requests, disconnect the - // custom key store before trying to connect again. - // - // * INVALID_CREDENTIALS - - // AWS KMS does not have the correct password for the kmsuser crypto user in the - // AWS CloudHSM cluster. Before you can connect your custom key store to its AWS - // CloudHSM cluster, you must change the kmsuser account password and update the - // key store password value for the custom key store. - // - // * NETWORK_ERRORS - - // Network errors are preventing AWS KMS from connecting to the custom key store. + // * + // INVALID_CREDENTIALS - AWS KMS does not have the correct password for the kmsuser + // crypto user in the AWS CloudHSM cluster. Before you can connect your custom key + // store to its AWS CloudHSM cluster, you must change the kmsuser account password + // and update the key store password value for the custom key store. // + // * + // NETWORK_ERRORS - Network errors are preventing AWS KMS from connecting to the + // custom key store. // - // * SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster configuration was - // deleted. If AWS KMS cannot find all of the subnets in the cluster configuration, - // attempts to connect the custom key store to the AWS CloudHSM cluster fail. To - // fix this error, create a cluster from a recent backup and associate it with your - // custom key store. (This process creates a new cluster configuration with a VPC - // and private subnets.) For details, see How to Fix a Connection Failure + // * SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster + // configuration was deleted. If AWS KMS cannot find all of the subnets in the + // cluster configuration, attempts to connect the custom key store to the AWS + // CloudHSM cluster fail. To fix this error, create a cluster from a recent backup + // and associate it with your custom key store. (This process creates a new cluster + // configuration with a VPC and private subnets.) For details, see How to Fix a + // Connection Failure // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) // in the AWS Key Management Service Developer Guide. // - // * USER_LOCKED_OUT - The + // * USER_LOCKED_OUT - The // kmsuser CU account is locked out of the associated AWS CloudHSM cluster due to // too many failed password attempts. Before you can connect your custom key store // to its AWS CloudHSM cluster, you must change the kmsuser account password and // update the key store password value for the custom key store. // - // * - // USER_LOGGED_IN - The kmsuser CU account is logged into the the associated AWS - // CloudHSM cluster. This prevents AWS KMS from rotating the kmsuser account - // password and logging into the cluster. Before you can connect your custom key - // store to its AWS CloudHSM cluster, you must log the kmsuser CU out of the - // cluster. If you changed the kmsuser password to log into the cluster, you must - // also and update the key store password value for the custom key store. For help, - // see How to Log Out and Reconnect + // * USER_LOGGED_IN + // - The kmsuser CU account is logged into the the associated AWS CloudHSM cluster. + // This prevents AWS KMS from rotating the kmsuser account password and logging + // into the cluster. Before you can connect your custom key store to its AWS + // CloudHSM cluster, you must log the kmsuser CU out of the cluster. If you changed + // the kmsuser password to log into the cluster, you must also and update the key + // store password value for the custom key store. For help, see How to Log Out and + // Reconnect // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) // in the AWS Key Management Service Developer Guide. // - // * USER_NOT_FOUND - AWS - // KMS cannot find a kmsuser CU account in the associated AWS CloudHSM cluster. - // Before you can connect your custom key store to its AWS CloudHSM cluster, you - // must create a kmsuser CU account in the cluster, and then update the key store + // * USER_NOT_FOUND - AWS KMS + // cannot find a kmsuser CU account in the associated AWS CloudHSM cluster. Before + // you can connect your custom key store to its AWS CloudHSM cluster, you must + // create a kmsuser CU account in the cluster, and then update the key store // password value for the custom key store. ConnectionErrorCode ConnectionErrorCodeType diff --git a/service/lakeformation/types/enums.go b/service/lakeformation/types/enums.go index 6c0737c8818..7257ccb0189 100644 --- a/service/lakeformation/types/enums.go +++ b/service/lakeformation/types/enums.go @@ -6,17 +6,17 @@ type ComparisonOperator string // Enum values for ComparisonOperator const ( - ComparisonOperatorEq ComparisonOperator = "EQ" - ComparisonOperatorNe ComparisonOperator = "NE" - ComparisonOperatorLe ComparisonOperator = "LE" - ComparisonOperatorLt ComparisonOperator = "LT" - ComparisonOperatorGe ComparisonOperator = "GE" - ComparisonOperatorGt ComparisonOperator = "GT" - ComparisonOperatorContains ComparisonOperator = "CONTAINS" - ComparisonOperatorNot_contains ComparisonOperator = "NOT_CONTAINS" - ComparisonOperatorBegins_with ComparisonOperator = "BEGINS_WITH" - ComparisonOperatorIn ComparisonOperator = "IN" - ComparisonOperatorBetween ComparisonOperator = "BETWEEN" + ComparisonOperatorEq ComparisonOperator = "EQ" + ComparisonOperatorNe ComparisonOperator = "NE" + ComparisonOperatorLe ComparisonOperator = "LE" + ComparisonOperatorLt ComparisonOperator = "LT" + ComparisonOperatorGe ComparisonOperator = "GE" + ComparisonOperatorGt ComparisonOperator = "GT" + ComparisonOperatorContains ComparisonOperator = "CONTAINS" + ComparisonOperatorNotContains ComparisonOperator = "NOT_CONTAINS" + ComparisonOperatorBeginsWith ComparisonOperator = "BEGINS_WITH" + ComparisonOperatorIn ComparisonOperator = "IN" + ComparisonOperatorBetween ComparisonOperator = "BETWEEN" ) // Values returns all known values for ComparisonOperator. Note that this can be @@ -42,10 +42,10 @@ type DataLakeResourceType string // Enum values for DataLakeResourceType const ( - DataLakeResourceTypeCatalog DataLakeResourceType = "CATALOG" - DataLakeResourceTypeDatabase DataLakeResourceType = "DATABASE" - DataLakeResourceTypeTable DataLakeResourceType = "TABLE" - DataLakeResourceTypeData_location DataLakeResourceType = "DATA_LOCATION" + DataLakeResourceTypeCatalog DataLakeResourceType = "CATALOG" + DataLakeResourceTypeDatabase DataLakeResourceType = "DATABASE" + DataLakeResourceTypeTable DataLakeResourceType = "TABLE" + DataLakeResourceTypeDataLocation DataLakeResourceType = "DATA_LOCATION" ) // Values returns all known values for DataLakeResourceType. Note that this can be @@ -64,9 +64,9 @@ type FieldNameString string // Enum values for FieldNameString const ( - FieldNameStringResource_arn FieldNameString = "RESOURCE_ARN" - FieldNameStringRole_arn FieldNameString = "ROLE_ARN" - FieldNameStringLast_modified FieldNameString = "LAST_MODIFIED" + FieldNameStringResourceArn FieldNameString = "RESOURCE_ARN" + FieldNameStringRoleArn FieldNameString = "ROLE_ARN" + FieldNameStringLastModified FieldNameString = "LAST_MODIFIED" ) // Values returns all known values for FieldNameString. Note that this can be @@ -84,16 +84,16 @@ type Permission string // Enum values for Permission const ( - PermissionAll Permission = "ALL" - PermissionSelect Permission = "SELECT" - PermissionAlter Permission = "ALTER" - PermissionDrop Permission = "DROP" - PermissionDelete Permission = "DELETE" - PermissionInsert Permission = "INSERT" - PermissionDescribe Permission = "DESCRIBE" - PermissionCreate_database Permission = "CREATE_DATABASE" - PermissionCreate_table Permission = "CREATE_TABLE" - PermissionData_location_access Permission = "DATA_LOCATION_ACCESS" + PermissionAll Permission = "ALL" + PermissionSelect Permission = "SELECT" + PermissionAlter Permission = "ALTER" + PermissionDrop Permission = "DROP" + PermissionDelete Permission = "DELETE" + PermissionInsert Permission = "INSERT" + PermissionDescribe Permission = "DESCRIBE" + PermissionCreateDatabase Permission = "CREATE_DATABASE" + PermissionCreateTable Permission = "CREATE_TABLE" + PermissionDataLocationAccess Permission = "DATA_LOCATION_ACCESS" ) // Values returns all known values for Permission. Note that this can be expanded diff --git a/service/lambda/api_op_AddPermission.go b/service/lambda/api_op_AddPermission.go index c311e632ff1..5e56db8bd86 100644 --- a/service/lambda/api_op_AddPermission.go +++ b/service/lambda/api_op_AddPermission.go @@ -49,18 +49,18 @@ type AddPermissionInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_CreateAlias.go b/service/lambda/api_op_CreateAlias.go index 3884b355232..477f1d05531 100644 --- a/service/lambda/api_op_CreateAlias.go +++ b/service/lambda/api_op_CreateAlias.go @@ -37,18 +37,17 @@ type CreateAliasInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_CreateEventSourceMapping.go b/service/lambda/api_op_CreateEventSourceMapping.go index 37817716630..f6646643f68 100644 --- a/service/lambda/api_op_CreateEventSourceMapping.go +++ b/service/lambda/api_op_CreateEventSourceMapping.go @@ -16,42 +16,41 @@ import ( // reads items from the event source and triggers the function. For details about // each event source type, see the following topics. // -// * Using AWS Lambda with +// * Using AWS Lambda with // Amazon DynamoDB (https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html) // -// -// * Using AWS Lambda with Amazon Kinesis +// * +// Using AWS Lambda with Amazon Kinesis // (https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html) // -// * Using -// AWS Lambda with Amazon SQS +// * Using AWS +// Lambda with Amazon SQS // (https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) // -// * Using AWS -// Lambda with Amazon MSK +// * Using AWS Lambda +// with Amazon MSK // (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) // // The following // error handling options are only available for stream sources (DynamoDB and // Kinesis): // -// * BisectBatchOnFunctionError - If the function returns an error, +// * BisectBatchOnFunctionError - If the function returns an error, // split the batch in two and retry. // -// * DestinationConfig - Send discarded -// records to an Amazon SQS queue or Amazon SNS topic. +// * DestinationConfig - Send discarded records +// to an Amazon SQS queue or Amazon SNS topic. // -// * -// MaximumRecordAgeInSeconds - Discard records older than the specified age. -// Default -1 (infinite). Minimum 60. Maximum 604800. +// * MaximumRecordAgeInSeconds - +// Discard records older than the specified age. Default -1 (infinite). Minimum 60. +// Maximum 604800. // -// * MaximumRetryAttempts - -// Discard records after the specified number of retries. Default -1 (infinite). -// Minimum 0. Maximum 10000. When infinite, failed records will be retried until -// the record expires. +// * MaximumRetryAttempts - Discard records after the specified +// number of retries. Default -1 (infinite). Minimum 0. Maximum 10000. When +// infinite, failed records will be retried until the record expires. // -// * ParallelizationFactor - Process multiple batches from -// each shard concurrently. +// * +// ParallelizationFactor - Process multiple batches from each shard concurrently. func (c *Client) CreateEventSourceMapping(ctx context.Context, params *CreateEventSourceMappingInput, optFns ...func(*Options)) (*CreateEventSourceMappingOutput, error) { if params == nil { params = &CreateEventSourceMappingInput{} @@ -71,54 +70,53 @@ type CreateEventSourceMappingInput struct { // The Amazon Resource Name (ARN) of the event source. // - // * Amazon Kinesis - The - // ARN of the data stream or a stream consumer. + // * Amazon Kinesis - The ARN + // of the data stream or a stream consumer. // - // * Amazon DynamoDB Streams - - // The ARN of the stream. + // * Amazon DynamoDB Streams - The ARN of + // the stream. // - // * Amazon Simple Queue Service - The ARN of the - // queue. + // * Amazon Simple Queue Service - The ARN of the queue. // - // * Amazon Managed Streaming for Apache Kafka - The ARN of the - // cluster. + // * Amazon + // Managed Streaming for Apache Kafka - The ARN of the cluster. // // This member is required. EventSourceArn *string // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. - // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * Function name - MyFunction. // - // * Version or - // Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // + // * + // Version or Alias ARN - + // arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. // - // * Partial ARN - 123456789012:function:MyFunction. + // * Partial ARN - + // 123456789012:function:MyFunction. // - // The length constraint applies - // only to the full ARN. If you specify only the function name, it's limited to 64 - // characters in length. + // The length constraint applies only to the + // full ARN. If you specify only the function name, it's limited to 64 characters + // in length. // // This member is required. FunctionName *string // The maximum number of items to retrieve in a single batch. // - // * Amazon Kinesis - // - Default 100. Max 10,000. + // * Amazon Kinesis - + // Default 100. Max 10,000. // - // * Amazon DynamoDB Streams - Default 100. Max - // 1,000. + // * Amazon DynamoDB Streams - Default 100. Max 1,000. // - // * Amazon Simple Queue Service - Default 10. Max 10. + // * + // Amazon Simple Queue Service - Default 10. Max 10. // - // * Amazon - // Managed Streaming for Apache Kafka - Default 100. Max 10,000. + // * Amazon Managed Streaming + // for Apache Kafka - Default 100. Max 10,000. BatchSize *int32 // (Streams) If the function returns an error, split the batch in two and retry. diff --git a/service/lambda/api_op_CreateFunction.go b/service/lambda/api_op_CreateFunction.go index 8a52032c103..c352d0fa79d 100644 --- a/service/lambda/api_op_CreateFunction.go +++ b/service/lambda/api_op_CreateFunction.go @@ -67,18 +67,17 @@ type CreateFunctionInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_DeleteAlias.go b/service/lambda/api_op_DeleteAlias.go index 4f717831148..aea1adf5831 100644 --- a/service/lambda/api_op_DeleteAlias.go +++ b/service/lambda/api_op_DeleteAlias.go @@ -31,18 +31,17 @@ type DeleteAliasInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_DeleteFunction.go b/service/lambda/api_op_DeleteFunction.go index a14c5f520bc..3a156a01b88 100644 --- a/service/lambda/api_op_DeleteFunction.go +++ b/service/lambda/api_op_DeleteFunction.go @@ -35,13 +35,13 @@ type DeleteFunctionInput struct { // The name of the Lambda function or version. Name formats // - // * Function name - + // * Function name - // my-function (name-only), my-function:1 (with version). // - // * Function ARN - + // * Function ARN - // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - + // * Partial ARN - // 123456789012:function:my-function. // // You can append a version number or alias to diff --git a/service/lambda/api_op_DeleteFunctionConcurrency.go b/service/lambda/api_op_DeleteFunctionConcurrency.go index d09e19d5f3a..f352e227cc7 100644 --- a/service/lambda/api_op_DeleteFunctionConcurrency.go +++ b/service/lambda/api_op_DeleteFunctionConcurrency.go @@ -30,18 +30,17 @@ type DeleteFunctionConcurrencyInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_DeleteFunctionEventInvokeConfig.go b/service/lambda/api_op_DeleteFunctionEventInvokeConfig.go index 151af203b4c..855c3f9019d 100644 --- a/service/lambda/api_op_DeleteFunctionEventInvokeConfig.go +++ b/service/lambda/api_op_DeleteFunctionEventInvokeConfig.go @@ -32,18 +32,18 @@ type DeleteFunctionEventInvokeConfigInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_DeleteProvisionedConcurrencyConfig.go b/service/lambda/api_op_DeleteProvisionedConcurrencyConfig.go index 89d0b976f60..cc2f79c1b9c 100644 --- a/service/lambda/api_op_DeleteProvisionedConcurrencyConfig.go +++ b/service/lambda/api_op_DeleteProvisionedConcurrencyConfig.go @@ -30,18 +30,17 @@ type DeleteProvisionedConcurrencyConfigInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_GetAlias.go b/service/lambda/api_op_GetAlias.go index f77d32cbec3..c697fcbc897 100644 --- a/service/lambda/api_op_GetAlias.go +++ b/service/lambda/api_op_GetAlias.go @@ -32,18 +32,17 @@ type GetAliasInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_GetFunction.go b/service/lambda/api_op_GetFunction.go index b34a21c0ef1..3d6fe5cebf7 100644 --- a/service/lambda/api_op_GetFunction.go +++ b/service/lambda/api_op_GetFunction.go @@ -33,18 +33,18 @@ type GetFunctionInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_GetFunctionConcurrency.go b/service/lambda/api_op_GetFunctionConcurrency.go index 1dd003834b5..c210c389e89 100644 --- a/service/lambda/api_op_GetFunctionConcurrency.go +++ b/service/lambda/api_op_GetFunctionConcurrency.go @@ -31,18 +31,17 @@ type GetFunctionConcurrencyInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_GetFunctionConfiguration.go b/service/lambda/api_op_GetFunctionConfiguration.go index 6846191e25f..3ac0f97bae0 100644 --- a/service/lambda/api_op_GetFunctionConfiguration.go +++ b/service/lambda/api_op_GetFunctionConfiguration.go @@ -34,18 +34,18 @@ type GetFunctionConfigurationInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_GetFunctionEventInvokeConfig.go b/service/lambda/api_op_GetFunctionEventInvokeConfig.go index 49f8f868c37..d07044800a3 100644 --- a/service/lambda/api_op_GetFunctionEventInvokeConfig.go +++ b/service/lambda/api_op_GetFunctionEventInvokeConfig.go @@ -34,18 +34,18 @@ type GetFunctionEventInvokeConfigInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string @@ -59,15 +59,15 @@ type GetFunctionEventInvokeConfigOutput struct { // A destination for events after they have been sent to a function for processing. // Destinations // - // * Function - The Amazon Resource Name (ARN) of a Lambda + // * Function - The Amazon Resource Name (ARN) of a Lambda // function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of an SQS queue. // - // * Topic - The ARN of an - // SNS topic. + // * Topic - The ARN of an SNS + // topic. // - // * Event Bus - The ARN of an Amazon EventBridge event bus. + // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *types.DestinationConfig // The Amazon Resource Name (ARN) of the function. diff --git a/service/lambda/api_op_GetPolicy.go b/service/lambda/api_op_GetPolicy.go index 10ce40b5ed6..8e26c9a4a92 100644 --- a/service/lambda/api_op_GetPolicy.go +++ b/service/lambda/api_op_GetPolicy.go @@ -32,18 +32,18 @@ type GetPolicyInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_GetProvisionedConcurrencyConfig.go b/service/lambda/api_op_GetProvisionedConcurrencyConfig.go index e079cf08628..5ca4c36e1b9 100644 --- a/service/lambda/api_op_GetProvisionedConcurrencyConfig.go +++ b/service/lambda/api_op_GetProvisionedConcurrencyConfig.go @@ -32,18 +32,17 @@ type GetProvisionedConcurrencyConfigInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_Invoke.go b/service/lambda/api_op_Invoke.go index c124a830147..7c763d81b35 100644 --- a/service/lambda/api_op_Invoke.go +++ b/service/lambda/api_op_Invoke.go @@ -66,18 +66,18 @@ type InvokeInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string @@ -88,18 +88,17 @@ type InvokeInput struct { // Choose from the following options. // - // * RequestResponse (default) - Invoke the + // * RequestResponse (default) - Invoke the // function synchronously. Keep the connection open until the function returns a // response or times out. The API response includes the function response and // additional data. // - // * Event - Invoke the function asynchronously. Send events - // that fail multiple times to the function's dead-letter queue (if it's - // configured). The API response only includes a status code. + // * Event - Invoke the function asynchronously. Send events that + // fail multiple times to the function's dead-letter queue (if it's configured). + // The API response only includes a status code. // - // * DryRun - - // Validate parameter values and verify that the user or role has permission to - // invoke the function. + // * DryRun - Validate parameter + // values and verify that the user or role has permission to invoke the function. InvocationType types.InvocationType // Set to Tail to include the execution log in the response. diff --git a/service/lambda/api_op_InvokeAsync.go b/service/lambda/api_op_InvokeAsync.go index 8f67b68aa1d..46b10519c66 100644 --- a/service/lambda/api_op_InvokeAsync.go +++ b/service/lambda/api_op_InvokeAsync.go @@ -32,18 +32,17 @@ type InvokeAsyncInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_ListAliases.go b/service/lambda/api_op_ListAliases.go index 85ca5102b3a..e9f108e803c 100644 --- a/service/lambda/api_op_ListAliases.go +++ b/service/lambda/api_op_ListAliases.go @@ -33,18 +33,17 @@ type ListAliasesInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_ListEventSourceMappings.go b/service/lambda/api_op_ListEventSourceMappings.go index 16a7aedd571..cf6b422107d 100644 --- a/service/lambda/api_op_ListEventSourceMappings.go +++ b/service/lambda/api_op_ListEventSourceMappings.go @@ -32,36 +32,35 @@ type ListEventSourceMappingsInput struct { // The Amazon Resource Name (ARN) of the event source. // - // * Amazon Kinesis - The - // ARN of the data stream or a stream consumer. + // * Amazon Kinesis - The ARN + // of the data stream or a stream consumer. // - // * Amazon DynamoDB Streams - - // The ARN of the stream. + // * Amazon DynamoDB Streams - The ARN of + // the stream. // - // * Amazon Simple Queue Service - The ARN of the - // queue. + // * Amazon Simple Queue Service - The ARN of the queue. // - // * Amazon Managed Streaming for Apache Kafka - The ARN of the - // cluster. + // * Amazon + // Managed Streaming for Apache Kafka - The ARN of the cluster. EventSourceArn *string // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Version or - // Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. + // * + // Version or Alias ARN - + // arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. // + // * Partial ARN - + // 123456789012:function:MyFunction. // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies - // only to the full ARN. If you specify only the function name, it's limited to 64 - // characters in length. + // The length constraint applies only to the + // full ARN. If you specify only the function name, it's limited to 64 characters + // in length. FunctionName *string // A pagination token returned by a previous call. diff --git a/service/lambda/api_op_ListFunctionEventInvokeConfigs.go b/service/lambda/api_op_ListFunctionEventInvokeConfigs.go index 7c432814ee6..a0f731cf4be 100644 --- a/service/lambda/api_op_ListFunctionEventInvokeConfigs.go +++ b/service/lambda/api_op_ListFunctionEventInvokeConfigs.go @@ -33,18 +33,17 @@ type ListFunctionEventInvokeConfigsInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_ListProvisionedConcurrencyConfigs.go b/service/lambda/api_op_ListProvisionedConcurrencyConfigs.go index bfd17c9e0fb..f08e45ef293 100644 --- a/service/lambda/api_op_ListProvisionedConcurrencyConfigs.go +++ b/service/lambda/api_op_ListProvisionedConcurrencyConfigs.go @@ -31,18 +31,17 @@ type ListProvisionedConcurrencyConfigsInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_ListVersionsByFunction.go b/service/lambda/api_op_ListVersionsByFunction.go index c5527a20eaf..3fea2ddc48f 100644 --- a/service/lambda/api_op_ListVersionsByFunction.go +++ b/service/lambda/api_op_ListVersionsByFunction.go @@ -34,18 +34,17 @@ type ListVersionsByFunctionInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_PublishLayerVersion.go b/service/lambda/api_op_PublishLayerVersion.go index 36f12aea277..2a95acc6ac8 100644 --- a/service/lambda/api_op_PublishLayerVersion.go +++ b/service/lambda/api_op_PublishLayerVersion.go @@ -53,14 +53,14 @@ type PublishLayerVersionInput struct { // The layer's software license. It can be any of the following: // - // * An SPDX - // license identifier (https://spdx.org/licenses/). For example, MIT. + // * An SPDX license + // identifier (https://spdx.org/licenses/). For example, MIT. // - // * The - // URL of a license hosted on the internet. For example, + // * The URL of a + // license hosted on the internet. For example, // https://opensource.org/licenses/MIT. // - // * The full text of the license. + // * The full text of the license. LicenseInfo *string } diff --git a/service/lambda/api_op_PublishVersion.go b/service/lambda/api_op_PublishVersion.go index e55b28767a9..f4f666c6dc1 100644 --- a/service/lambda/api_op_PublishVersion.go +++ b/service/lambda/api_op_PublishVersion.go @@ -38,18 +38,17 @@ type PublishVersionInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_PutFunctionConcurrency.go b/service/lambda/api_op_PutFunctionConcurrency.go index 6246251dd1e..24d1c41e072 100644 --- a/service/lambda/api_op_PutFunctionConcurrency.go +++ b/service/lambda/api_op_PutFunctionConcurrency.go @@ -40,18 +40,17 @@ type PutFunctionConcurrencyInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_PutFunctionEventInvokeConfig.go b/service/lambda/api_op_PutFunctionEventInvokeConfig.go index 381cc9cffc9..ad488a60e5d 100644 --- a/service/lambda/api_op_PutFunctionEventInvokeConfig.go +++ b/service/lambda/api_op_PutFunctionEventInvokeConfig.go @@ -47,18 +47,18 @@ type PutFunctionEventInvokeConfigInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string @@ -66,15 +66,15 @@ type PutFunctionEventInvokeConfigInput struct { // A destination for events after they have been sent to a function for processing. // Destinations // - // * Function - The Amazon Resource Name (ARN) of a Lambda + // * Function - The Amazon Resource Name (ARN) of a Lambda // function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of an SQS queue. // - // * Topic - The ARN of an - // SNS topic. + // * Topic - The ARN of an SNS + // topic. // - // * Event Bus - The ARN of an Amazon EventBridge event bus. + // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *types.DestinationConfig // The maximum age of a request that Lambda sends to a function for processing. @@ -92,15 +92,15 @@ type PutFunctionEventInvokeConfigOutput struct { // A destination for events after they have been sent to a function for processing. // Destinations // - // * Function - The Amazon Resource Name (ARN) of a Lambda + // * Function - The Amazon Resource Name (ARN) of a Lambda // function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of an SQS queue. // - // * Topic - The ARN of an - // SNS topic. + // * Topic - The ARN of an SNS + // topic. // - // * Event Bus - The ARN of an Amazon EventBridge event bus. + // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *types.DestinationConfig // The Amazon Resource Name (ARN) of the function. diff --git a/service/lambda/api_op_PutProvisionedConcurrencyConfig.go b/service/lambda/api_op_PutProvisionedConcurrencyConfig.go index 94391a7743c..63b6b68f50e 100644 --- a/service/lambda/api_op_PutProvisionedConcurrencyConfig.go +++ b/service/lambda/api_op_PutProvisionedConcurrencyConfig.go @@ -31,18 +31,17 @@ type PutProvisionedConcurrencyConfigInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_RemovePermission.go b/service/lambda/api_op_RemovePermission.go index 164c8f4cc4a..bbd355537b5 100644 --- a/service/lambda/api_op_RemovePermission.go +++ b/service/lambda/api_op_RemovePermission.go @@ -31,18 +31,18 @@ type RemovePermissionInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_UpdateAlias.go b/service/lambda/api_op_UpdateAlias.go index c7958535017..4299cb91f47 100644 --- a/service/lambda/api_op_UpdateAlias.go +++ b/service/lambda/api_op_UpdateAlias.go @@ -32,18 +32,17 @@ type UpdateAliasInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Partial ARN - - // 123456789012:function:MyFunction. + // * + // Partial ARN - 123456789012:function:MyFunction. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_UpdateEventSourceMapping.go b/service/lambda/api_op_UpdateEventSourceMapping.go index 0a83c68b7f3..77c8ab217e5 100644 --- a/service/lambda/api_op_UpdateEventSourceMapping.go +++ b/service/lambda/api_op_UpdateEventSourceMapping.go @@ -17,23 +17,22 @@ import ( // following error handling options are only available for stream sources (DynamoDB // and Kinesis): // -// * BisectBatchOnFunctionError - If the function returns an -// error, split the batch in two and retry. +// * BisectBatchOnFunctionError - If the function returns an error, +// split the batch in two and retry. // -// * DestinationConfig - Send -// discarded records to an Amazon SQS queue or Amazon SNS topic. +// * DestinationConfig - Send discarded records +// to an Amazon SQS queue or Amazon SNS topic. // -// * -// MaximumRecordAgeInSeconds - Discard records older than the specified age. -// Default -1 (infinite). Minimum 60. Maximum 604800. +// * MaximumRecordAgeInSeconds - +// Discard records older than the specified age. Default -1 (infinite). Minimum 60. +// Maximum 604800. // -// * MaximumRetryAttempts - -// Discard records after the specified number of retries. Default -1 (infinite). -// Minimum 0. Maximum 10000. When infinite, failed records will be retried until -// the record expires. +// * MaximumRetryAttempts - Discard records after the specified +// number of retries. Default -1 (infinite). Minimum 0. Maximum 10000. When +// infinite, failed records will be retried until the record expires. // -// * ParallelizationFactor - Process multiple batches from -// each shard concurrently. +// * +// ParallelizationFactor - Process multiple batches from each shard concurrently. func (c *Client) UpdateEventSourceMapping(ctx context.Context, params *UpdateEventSourceMappingInput, optFns ...func(*Options)) (*UpdateEventSourceMappingOutput, error) { if params == nil { params = &UpdateEventSourceMappingInput{} @@ -58,16 +57,16 @@ type UpdateEventSourceMappingInput struct { // The maximum number of items to retrieve in a single batch. // - // * Amazon Kinesis - // - Default 100. Max 10,000. + // * Amazon Kinesis - + // Default 100. Max 10,000. // - // * Amazon DynamoDB Streams - Default 100. Max - // 1,000. + // * Amazon DynamoDB Streams - Default 100. Max 1,000. // - // * Amazon Simple Queue Service - Default 10. Max 10. + // * + // Amazon Simple Queue Service - Default 10. Max 10. // - // * Amazon - // Managed Streaming for Apache Kafka - Default 100. Max 10,000. + // * Amazon Managed Streaming + // for Apache Kafka - Default 100. Max 10,000. BatchSize *int32 // (Streams) If the function returns an error, split the batch in two and retry. @@ -83,21 +82,21 @@ type UpdateEventSourceMappingInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // MyFunction. + // * Function name - MyFunction. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:MyFunction. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction. // - // * Version or - // Alias ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. + // * + // Version or Alias ARN - + // arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD. // + // * Partial ARN - + // 123456789012:function:MyFunction. // - // * Partial ARN - 123456789012:function:MyFunction. - // - // The length constraint applies - // only to the full ARN. If you specify only the function name, it's limited to 64 - // characters in length. + // The length constraint applies only to the + // full ARN. If you specify only the function name, it's limited to 64 characters + // in length. FunctionName *string // (Streams) The maximum amount of time to gather records before invoking the diff --git a/service/lambda/api_op_UpdateFunctionCode.go b/service/lambda/api_op_UpdateFunctionCode.go index a1d8db0547a..c8a1d8cd176 100644 --- a/service/lambda/api_op_UpdateFunctionCode.go +++ b/service/lambda/api_op_UpdateFunctionCode.go @@ -33,18 +33,17 @@ type UpdateFunctionCodeInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_UpdateFunctionConfiguration.go b/service/lambda/api_op_UpdateFunctionConfiguration.go index 76fba5c02d0..9e791f71bb1 100644 --- a/service/lambda/api_op_UpdateFunctionConfiguration.go +++ b/service/lambda/api_op_UpdateFunctionConfiguration.go @@ -44,18 +44,17 @@ type UpdateFunctionConfigurationInput struct { // The name of the Lambda function. Name formats // - // * Function name - - // my-function. + // * Function name - my-function. // - // * Function ARN - - // arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * + // Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - - // 123456789012:function:my-function. + // * + // Partial ARN - 123456789012:function:my-function. // - // The length constraint applies only to the - // full ARN. If you specify only the function name, it is limited to 64 characters - // in length. + // The length constraint applies + // only to the full ARN. If you specify only the function name, it is limited to 64 + // characters in length. // // This member is required. FunctionName *string diff --git a/service/lambda/api_op_UpdateFunctionEventInvokeConfig.go b/service/lambda/api_op_UpdateFunctionEventInvokeConfig.go index 50ef8c17023..bcd86672718 100644 --- a/service/lambda/api_op_UpdateFunctionEventInvokeConfig.go +++ b/service/lambda/api_op_UpdateFunctionEventInvokeConfig.go @@ -34,18 +34,18 @@ type UpdateFunctionEventInvokeConfigInput struct { // The name of the Lambda function, version, or alias. Name formats // - // * Function + // * Function // name - my-function (name-only), my-function:v1 (with alias). // - // * Function ARN - // - arn:aws:lambda:us-west-2:123456789012:function:my-function. + // * Function ARN - + // arn:aws:lambda:us-west-2:123456789012:function:my-function. // - // * Partial ARN - // - 123456789012:function:my-function. + // * Partial ARN - + // 123456789012:function:my-function. // - // You can append a version number or alias - // to any of the formats. The length constraint applies only to the full ARN. If - // you specify only the function name, it is limited to 64 characters in length. + // You can append a version number or alias to + // any of the formats. The length constraint applies only to the full ARN. If you + // specify only the function name, it is limited to 64 characters in length. // // This member is required. FunctionName *string @@ -53,15 +53,15 @@ type UpdateFunctionEventInvokeConfigInput struct { // A destination for events after they have been sent to a function for processing. // Destinations // - // * Function - The Amazon Resource Name (ARN) of a Lambda + // * Function - The Amazon Resource Name (ARN) of a Lambda // function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of an SQS queue. // - // * Topic - The ARN of an - // SNS topic. + // * Topic - The ARN of an SNS + // topic. // - // * Event Bus - The ARN of an Amazon EventBridge event bus. + // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *types.DestinationConfig // The maximum age of a request that Lambda sends to a function for processing. @@ -79,15 +79,15 @@ type UpdateFunctionEventInvokeConfigOutput struct { // A destination for events after they have been sent to a function for processing. // Destinations // - // * Function - The Amazon Resource Name (ARN) of a Lambda + // * Function - The Amazon Resource Name (ARN) of a Lambda // function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of an SQS queue. // - // * Topic - The ARN of an - // SNS topic. + // * Topic - The ARN of an SNS + // topic. // - // * Event Bus - The ARN of an Amazon EventBridge event bus. + // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *types.DestinationConfig // The Amazon Resource Name (ARN) of the function. diff --git a/service/lambda/types/enums.go b/service/lambda/types/enums.go index b06b990b104..18b67299476 100644 --- a/service/lambda/types/enums.go +++ b/service/lambda/types/enums.go @@ -6,9 +6,9 @@ type EventSourcePosition string // Enum values for EventSourcePosition const ( - EventSourcePositionTrim_horizon EventSourcePosition = "TRIM_HORIZON" - EventSourcePositionLatest EventSourcePosition = "LATEST" - EventSourcePositionAt_timestamp EventSourcePosition = "AT_TIMESTAMP" + EventSourcePositionTrimHorizon EventSourcePosition = "TRIM_HORIZON" + EventSourcePositionLatest EventSourcePosition = "LATEST" + EventSourcePositionAtTimestamp EventSourcePosition = "AT_TIMESTAMP" ) // Values returns all known values for EventSourcePosition. Note that this can be @@ -128,9 +128,9 @@ type ProvisionedConcurrencyStatusEnum string // Enum values for ProvisionedConcurrencyStatusEnum const ( - ProvisionedConcurrencyStatusEnumIn_progress ProvisionedConcurrencyStatusEnum = "IN_PROGRESS" - ProvisionedConcurrencyStatusEnumReady ProvisionedConcurrencyStatusEnum = "READY" - ProvisionedConcurrencyStatusEnumFailed ProvisionedConcurrencyStatusEnum = "FAILED" + ProvisionedConcurrencyStatusEnumInProgress ProvisionedConcurrencyStatusEnum = "IN_PROGRESS" + ProvisionedConcurrencyStatusEnumReady ProvisionedConcurrencyStatusEnum = "READY" + ProvisionedConcurrencyStatusEnumFailed ProvisionedConcurrencyStatusEnum = "FAILED" ) // Values returns all known values for ProvisionedConcurrencyStatusEnum. Note that diff --git a/service/lambda/types/types.go b/service/lambda/types/types.go index 5d4245e9085..3d1fc761d67 100644 --- a/service/lambda/types/types.go +++ b/service/lambda/types/types.go @@ -330,15 +330,15 @@ type FunctionEventInvokeConfig struct { // A destination for events after they have been sent to a function for processing. // Destinations // - // * Function - The Amazon Resource Name (ARN) of a Lambda + // * Function - The Amazon Resource Name (ARN) of a Lambda // function. // - // * Queue - The ARN of an SQS queue. + // * Queue - The ARN of an SQS queue. // - // * Topic - The ARN of an - // SNS topic. + // * Topic - The ARN of an SNS + // topic. // - // * Event Bus - The ARN of an Amazon EventBridge event bus. + // * Event Bus - The ARN of an Amazon EventBridge event bus. DestinationConfig *DestinationConfig // The Amazon Resource Name (ARN) of the function. diff --git a/service/lexmodelbuildingservice/api_op_GetBotChannelAssociation.go b/service/lexmodelbuildingservice/api_op_GetBotChannelAssociation.go index 78dbd9bf7f3..28880af7695 100644 --- a/service/lexmodelbuildingservice/api_op_GetBotChannelAssociation.go +++ b/service/lexmodelbuildingservice/api_op_GetBotChannelAssociation.go @@ -78,14 +78,14 @@ type GetBotChannelAssociationOutput struct { // The status of the bot channel. // - // * CREATED - The channel has been created and - // is ready for use. + // * CREATED - The channel has been created and is + // ready for use. // - // * IN_PROGRESS - Channel creation is in progress. + // * IN_PROGRESS - Channel creation is in progress. // - // * - // FAILED - There was an error creating the channel. For information about the - // reason for the failure, see the failureReason field. + // * FAILED - + // There was an error creating the channel. For information about the reason for + // the failure, see the failureReason field. Status types.ChannelStatus // The type of the messaging platform. diff --git a/service/lexmodelbuildingservice/api_op_GetBots.go b/service/lexmodelbuildingservice/api_op_GetBots.go index 01a016f09d5..a25f2da2307 100644 --- a/service/lexmodelbuildingservice/api_op_GetBots.go +++ b/service/lexmodelbuildingservice/api_op_GetBots.go @@ -13,16 +13,15 @@ import ( // Returns bot information as follows: // -// * If you provide the nameContains -// field, the response includes information for the $LATEST version of all bots -// whose name contains the specified string. +// * If you provide the nameContains field, +// the response includes information for the $LATEST version of all bots whose name +// contains the specified string. // -// * If you don't specify the -// nameContains field, the operation returns information about the $LATEST version -// of all of your bots. +// * If you don't specify the nameContains field, +// the operation returns information about the $LATEST version of all of your +// bots. // -// This operation requires permission for the lex:GetBots -// action. +// This operation requires permission for the lex:GetBots action. func (c *Client) GetBots(ctx context.Context, params *GetBotsInput, optFns ...func(*Options)) (*GetBotsOutput, error) { if params == nil { params = &GetBotsInput{} diff --git a/service/lexmodelbuildingservice/api_op_GetExport.go b/service/lexmodelbuildingservice/api_op_GetExport.go index e218f7fbac2..020b6ce3c60 100644 --- a/service/lexmodelbuildingservice/api_op_GetExport.go +++ b/service/lexmodelbuildingservice/api_op_GetExport.go @@ -54,13 +54,12 @@ type GetExportOutput struct { // The status of the export. // - // * IN_PROGRESS - The export is in progress. + // * IN_PROGRESS - The export is in progress. // - // * - // READY - The export is complete. + // * READY - + // The export is complete. // - // * FAILED - The export could not be - // completed. + // * FAILED - The export could not be completed. ExportStatus types.ExportStatus // The format of the exported data. diff --git a/service/lexmodelbuildingservice/api_op_GetIntents.go b/service/lexmodelbuildingservice/api_op_GetIntents.go index b645c49ef6c..dbef4142119 100644 --- a/service/lexmodelbuildingservice/api_op_GetIntents.go +++ b/service/lexmodelbuildingservice/api_op_GetIntents.go @@ -13,15 +13,15 @@ import ( // Returns intent information as follows: // -// * If you specify the nameContains -// field, returns the $LATEST version of all intents that contain the specified -// string. +// * If you specify the nameContains field, +// returns the $LATEST version of all intents that contain the specified string. // -// * If you don't specify the nameContains field, returns information -// about the $LATEST version of all intents. +// * +// If you don't specify the nameContains field, returns information about the +// $LATEST version of all intents. // -// The operation requires permission for -// the lex:GetIntents action. +// The operation requires permission for the +// lex:GetIntents action. func (c *Client) GetIntents(ctx context.Context, params *GetIntentsInput, optFns ...func(*Options)) (*GetIntentsOutput, error) { if params == nil { params = &GetIntentsInput{} diff --git a/service/lexmodelbuildingservice/api_op_GetSlotTypes.go b/service/lexmodelbuildingservice/api_op_GetSlotTypes.go index 6af1861c985..cb552fa6c84 100644 --- a/service/lexmodelbuildingservice/api_op_GetSlotTypes.go +++ b/service/lexmodelbuildingservice/api_op_GetSlotTypes.go @@ -13,11 +13,11 @@ import ( // Returns slot type information as follows: // -// * If you specify the nameContains +// * If you specify the nameContains // field, returns the $LATEST version of all slot types that contain the specified // string. // -// * If you don't specify the nameContains field, returns information +// * If you don't specify the nameContains field, returns information // about the $LATEST version of all slot types. // // The operation requires permission diff --git a/service/lexmodelbuildingservice/api_op_PutBot.go b/service/lexmodelbuildingservice/api_op_PutBot.go index d637f8ac779..2acd9aa1d19 100644 --- a/service/lexmodelbuildingservice/api_op_PutBot.go +++ b/service/lexmodelbuildingservice/api_op_PutBot.go @@ -117,18 +117,18 @@ type PutBotInput struct { // you don't define a clarification prompt, at runtime Amazon Lex will return a 400 // Bad Request exception in three cases: // - // * Follow-up prompt - When the user + // * Follow-up prompt - When the user // responds to a follow-up prompt but does not provide an intent. For example, in // response to a follow-up prompt that says "Would you like anything else today?" // the user says "Yes." Amazon Lex will return a 400 Bad Request exception because // it does not have a clarification prompt to send to the user to get an intent. // - // - // * Lambda function - When using a Lambda function, you return an ElicitIntent + // * + // Lambda function - When using a Lambda function, you return an ElicitIntent // dialog type. Since Amazon Lex does not have a clarification prompt to get an // intent from the user, it returns a 400 Bad Request exception. // - // * PutSession + // * PutSession // operation - When using the PutSession operation, you send an ElicitIntent dialog // type. Since Amazon Lex does not have a clarification prompt to get an intent // from the user, it returns a 400 Bad Request exception. @@ -155,30 +155,29 @@ type PutBotInput struct { // parameter to true, your bot has access to accuracy improvements. The Regions // where you can set the enableModelImprovements parameter to true are: // - // * US - // East (N. Virginia) (us-east-1) + // * US East + // (N. Virginia) (us-east-1) // - // * US West (Oregon) (us-west-2) + // * US West (Oregon) (us-west-2) // - // * Asia - // Pacific (Sydney) (ap-southeast-2) + // * Asia Pacific + // (Sydney) (ap-southeast-2) // - // * EU (Ireland) (eu-west-1) + // * EU (Ireland) (eu-west-1) // - // In other - // Regions, the enableModelImprovements parameter is set to true by default. In - // these Regions setting the parameter to false throws a ValidationException - // exception. + // In other Regions, the + // enableModelImprovements parameter is set to true by default. In these Regions + // setting the parameter to false throws a ValidationException exception. // - // * Asia Pacific (Singapore) (ap-southeast-1) + // * Asia + // Pacific (Singapore) (ap-southeast-1) // - // * Asia Pacific - // (Tokyo) (ap-northeast-1) + // * Asia Pacific (Tokyo) (ap-northeast-1) // - // * EU (Frankfurt) (eu-central-1) + // * + // EU (Frankfurt) (eu-central-1) // - // * EU (London) - // (eu-west-2) + // * EU (London) (eu-west-2) EnableModelImprovements *bool // The maximum time in seconds that Amazon Lex retains the data gathered in a @@ -207,15 +206,15 @@ type PutBotInput struct { // if they are configured for the bot. You must set the enableModelImprovements // parameter to true to use confidence scores. // - // * US East (N. Virginia) + // * US East (N. Virginia) // (us-east-1) // - // * US West (Oregon) (us-west-2) + // * US West (Oregon) (us-west-2) // - // * Asia Pacific (Sydney) + // * Asia Pacific (Sydney) // (ap-southeast-2) // - // * EU (Ireland) (eu-west-1) + // * EU (Ireland) (eu-west-1) // // In other Regions, the // enableModelImprovements parameter is set to true by default. For example, @@ -224,14 +223,14 @@ type PutBotInput struct { // following confidence scores: IntentA (0.70), IntentB (0.60), IntentC (0.50). The // response from the PostText operation would be: // - // * AMAZON.FallbackIntent - // + // * AMAZON.FallbackIntent // - // * IntentA + // * + // IntentA // - // * IntentB + // * IntentB // - // * IntentC + // * IntentC NluIntentConfidenceThreshold *float64 // If you set the processBehavior element to BUILD, Amazon Lex builds the bot so diff --git a/service/lexmodelbuildingservice/api_op_PutIntent.go b/service/lexmodelbuildingservice/api_op_PutIntent.go index 9b1922373e8..a3e500b6322 100644 --- a/service/lexmodelbuildingservice/api_op_PutIntent.go +++ b/service/lexmodelbuildingservice/api_op_PutIntent.go @@ -17,46 +17,45 @@ import ( // bot, for example, you would create an OrderPizza intent. To create an intent or // replace an existing intent, you must provide the following: // -// * Intent name. -// For example, OrderPizza. +// * Intent name. For +// example, OrderPizza. // -// * Sample utterances. For example, "Can I order a -// pizza, please." and "I want to order a pizza." +// * Sample utterances. For example, "Can I order a pizza, +// please." and "I want to order a pizza." // -// * Information to be -// gathered. You specify slot types for the information that your bot will request -// from the user. You can specify standard slot types, such as a date or a time, or -// custom slot types such as the size and crust of a pizza. +// * Information to be gathered. You +// specify slot types for the information that your bot will request from the user. +// You can specify standard slot types, such as a date or a time, or custom slot +// types such as the size and crust of a pizza. // -// * How the intent -// will be fulfilled. You can provide a Lambda function or configure the intent to -// return the intent information to the client application. If you use a Lambda -// function, when all of the intent information is available, Amazon Lex invokes -// your Lambda function. If you configure your intent to return the intent -// information to the client application. +// * How the intent will be +// fulfilled. You can provide a Lambda function or configure the intent to return +// the intent information to the client application. If you use a Lambda function, +// when all of the intent information is available, Amazon Lex invokes your Lambda +// function. If you configure your intent to return the intent information to the +// client application. // -// You can specify other optional -// information in the request, such as: +// You can specify other optional information in the request, +// such as: // -// * A confirmation prompt to ask the -// user to confirm an intent. For example, "Shall I order your pizza?" +// * A confirmation prompt to ask the user to confirm an intent. For +// example, "Shall I order your pizza?" // -// * A -// conclusion statement to send to the user after the intent has been fulfilled. -// For example, "I placed your pizza order." +// * A conclusion statement to send to the +// user after the intent has been fulfilled. For example, "I placed your pizza +// order." // -// * A follow-up prompt that asks -// the user for additional activity. For example, asking "Do you want to order a -// drink with your pizza?" +// * A follow-up prompt that asks the user for additional activity. For +// example, asking "Do you want to order a drink with your pizza?" // -// If you specify an existing intent name to update the -// intent, Amazon Lex replaces the values in the $LATEST version of the intent with -// the values in the request. Amazon Lex removes fields that you don't provide in -// the request. If you don't specify the required fields, Amazon Lex throws an -// exception. When you update the $LATEST version of an intent, the status field of -// any bot that uses the $LATEST version of the intent is set to NOT_BUILT. For -// more information, see how-it-works. This operation requires permissions for the -// lex:PutIntent action. +// If you specify +// an existing intent name to update the intent, Amazon Lex replaces the values in +// the $LATEST version of the intent with the values in the request. Amazon Lex +// removes fields that you don't provide in the request. If you don't specify the +// required fields, Amazon Lex throws an exception. When you update the $LATEST +// version of an intent, the status field of any bot that uses the $LATEST version +// of the intent is set to NOT_BUILT. For more information, see how-it-works. This +// operation requires permissions for the lex:PutIntent action. func (c *Client) PutIntent(ctx context.Context, params *PutIntentInput, optFns ...func(*Options)) (*PutIntentOutput, error) { if params == nil { params = &PutIntentInput{} @@ -132,22 +131,22 @@ type PutIntentInput struct { // the user to order a drink. The action that Amazon Lex takes depends on the // user's response, as follows: // - // * If the user says "Yes" it responds with the + // * If the user says "Yes" it responds with the // clarification prompt that is configured for the bot. // - // * if the user says - // "Yes" and continues with an utterance that triggers an intent it starts a - // conversation for the intent. + // * if the user says "Yes" + // and continues with an utterance that triggers an intent it starts a conversation + // for the intent. // - // * If the user says "No" it responds with the - // rejection statement configured for the the follow-up prompt. + // * If the user says "No" it responds with the rejection + // statement configured for the the follow-up prompt. // - // * If it - // doesn't recognize the utterance it repeats the follow-up prompt again. + // * If it doesn't recognize + // the utterance it repeats the follow-up prompt again. // - // The - // followUpPrompt field and the conclusionStatement field are mutually exclusive. - // You can specify only one. + // The followUpPrompt field + // and the conclusionStatement field are mutually exclusive. You can specify only + // one. FollowUpPrompt *types.FollowUpPrompt // Required. Describes how the intent is fulfilled. For example, after a user diff --git a/service/lexmodelbuildingservice/api_op_PutSlotType.go b/service/lexmodelbuildingservice/api_op_PutSlotType.go index 9d24fa6271f..95e1c972ce6 100644 --- a/service/lexmodelbuildingservice/api_op_PutSlotType.go +++ b/service/lexmodelbuildingservice/api_op_PutSlotType.go @@ -91,16 +91,16 @@ type PutSlotTypeInput struct { // Determines the slot resolution strategy that Amazon Lex uses to return slot type // values. The field can be set to one of the following values: // - // * - // ORIGINAL_VALUE - Returns the value entered by the user, if the user value is - // similar to the slot value. + // * ORIGINAL_VALUE - + // Returns the value entered by the user, if the user value is similar to the slot + // value. // - // * TOP_RESOLUTION - If there is a resolution list - // for the slot, return the first value in the resolution list as the slot type - // value. If there is no resolution list, null is returned. + // * TOP_RESOLUTION - If there is a resolution list for the slot, return + // the first value in the resolution list as the slot type value. If there is no + // resolution list, null is returned. // - // If you don't specify - // the valueSelectionStrategy, the default is ORIGINAL_VALUE. + // If you don't specify the + // valueSelectionStrategy, the default is ORIGINAL_VALUE. ValueSelectionStrategy types.SlotValueSelectionStrategy } diff --git a/service/lexmodelbuildingservice/api_op_StartImport.go b/service/lexmodelbuildingservice/api_op_StartImport.go index 72188ae75e2..94f3b6d9082 100644 --- a/service/lexmodelbuildingservice/api_op_StartImport.go +++ b/service/lexmodelbuildingservice/api_op_StartImport.go @@ -33,13 +33,13 @@ type StartImportInput struct { // Specifies the action that the StartImport operation should take when there is an // existing resource with the same name. // - // * FAIL_ON_CONFLICT - The import - // operation is stopped on the first conflict between a resource in the import file - // and an existing resource. The name of the resource causing the conflict is in - // the failureReason field of the response to the GetImport operation. - // OVERWRITE_LATEST - The import operation proceeds even if there is a conflict - // with an existing resource. The $LASTEST version of the existing resource is - // overwritten with the data from the import file. + // * FAIL_ON_CONFLICT - The import operation + // is stopped on the first conflict between a resource in the import file and an + // existing resource. The name of the resource causing the conflict is in the + // failureReason field of the response to the GetImport operation. OVERWRITE_LATEST + // - The import operation proceeds even if there is a conflict with an existing + // resource. The $LASTEST version of the existing resource is overwritten with the + // data from the import file. // // This member is required. MergeStrategy types.MergeStrategy @@ -54,10 +54,10 @@ type StartImportInput struct { // Specifies the type of resource to export. Each resource also exports any // resources that it depends on. // - // * A bot exports dependent intents. + // * A bot exports dependent intents. // - // * An - // intent exports dependent slot types. + // * An intent + // exports dependent slot types. // // This member is required. ResourceType types.ResourceType diff --git a/service/lexmodelbuildingservice/types/enums.go b/service/lexmodelbuildingservice/types/enums.go index eeb566bee18..fbd0ea53d66 100644 --- a/service/lexmodelbuildingservice/types/enums.go +++ b/service/lexmodelbuildingservice/types/enums.go @@ -6,9 +6,9 @@ type ChannelStatus string // Enum values for ChannelStatus const ( - ChannelStatusIn_progress ChannelStatus = "IN_PROGRESS" - ChannelStatusCreated ChannelStatus = "CREATED" - ChannelStatusFailed ChannelStatus = "FAILED" + ChannelStatusInProgress ChannelStatus = "IN_PROGRESS" + ChannelStatusCreated ChannelStatus = "CREATED" + ChannelStatusFailed ChannelStatus = "FAILED" ) // Values returns all known values for ChannelStatus. Note that this can be @@ -26,10 +26,10 @@ type ChannelType string // Enum values for ChannelType const ( - ChannelTypeFacebook ChannelType = "Facebook" - ChannelTypeSlack ChannelType = "Slack" - ChannelTypeTwilio_sms ChannelType = "Twilio-Sms" - ChannelTypeKik ChannelType = "Kik" + ChannelTypeFacebook ChannelType = "Facebook" + ChannelTypeSlack ChannelType = "Slack" + ChannelTypeTwilioSms ChannelType = "Twilio-Sms" + ChannelTypeKik ChannelType = "Kik" ) // Values returns all known values for ChannelType. Note that this can be expanded @@ -48,9 +48,9 @@ type ContentType string // Enum values for ContentType const ( - ContentTypePlain_text ContentType = "PlainText" - ContentTypeSsml ContentType = "SSML" - ContentTypeCustom_payload ContentType = "CustomPayload" + ContentTypePlainText ContentType = "PlainText" + ContentTypeSsml ContentType = "SSML" + ContentTypeCustomPayload ContentType = "CustomPayload" ) // Values returns all known values for ContentType. Note that this can be expanded @@ -68,8 +68,8 @@ type Destination string // Enum values for Destination const ( - DestinationCloudwatch_logs Destination = "CLOUDWATCH_LOGS" - DestinationS3 Destination = "S3" + DestinationCloudwatchLogs Destination = "CLOUDWATCH_LOGS" + DestinationS3 Destination = "S3" ) // Values returns all known values for Destination. Note that this can be expanded @@ -86,9 +86,9 @@ type ExportStatus string // Enum values for ExportStatus const ( - ExportStatusIn_progress ExportStatus = "IN_PROGRESS" - ExportStatusReady ExportStatus = "READY" - ExportStatusFailed ExportStatus = "FAILED" + ExportStatusInProgress ExportStatus = "IN_PROGRESS" + ExportStatusReady ExportStatus = "READY" + ExportStatusFailed ExportStatus = "FAILED" ) // Values returns all known values for ExportStatus. Note that this can be expanded @@ -106,8 +106,8 @@ type ExportType string // Enum values for ExportType const ( - ExportTypeAlexa_skills_kit ExportType = "ALEXA_SKILLS_KIT" - ExportTypeLex ExportType = "LEX" + ExportTypeAlexaSkillsKit ExportType = "ALEXA_SKILLS_KIT" + ExportTypeLex ExportType = "LEX" ) // Values returns all known values for ExportType. Note that this can be expanded @@ -124,8 +124,8 @@ type FulfillmentActivityType string // Enum values for FulfillmentActivityType const ( - FulfillmentActivityTypeReturn_intent FulfillmentActivityType = "ReturnIntent" - FulfillmentActivityTypeCode_hook FulfillmentActivityType = "CodeHook" + FulfillmentActivityTypeReturnIntent FulfillmentActivityType = "ReturnIntent" + FulfillmentActivityTypeCodeHook FulfillmentActivityType = "CodeHook" ) // Values returns all known values for FulfillmentActivityType. Note that this can @@ -142,9 +142,9 @@ type ImportStatus string // Enum values for ImportStatus const ( - ImportStatusIn_progress ImportStatus = "IN_PROGRESS" - ImportStatusComplete ImportStatus = "COMPLETE" - ImportStatusFailed ImportStatus = "FAILED" + ImportStatusInProgress ImportStatus = "IN_PROGRESS" + ImportStatusComplete ImportStatus = "COMPLETE" + ImportStatusFailed ImportStatus = "FAILED" ) // Values returns all known values for ImportStatus. Note that this can be expanded @@ -162,11 +162,11 @@ type Locale string // Enum values for Locale const ( - LocaleDe_de Locale = "de-DE" - LocaleEn_au Locale = "en-AU" - LocaleEn_gb Locale = "en-GB" - LocaleEn_us Locale = "en-US" - LocaleEs_us Locale = "es-US" + LocaleDeDe Locale = "de-DE" + LocaleEnAu Locale = "en-AU" + LocaleEnGb Locale = "en-GB" + LocaleEnUs Locale = "en-US" + LocaleEsUs Locale = "es-US" ) // Values returns all known values for Locale. Note that this can be expanded in @@ -204,8 +204,8 @@ type MergeStrategy string // Enum values for MergeStrategy const ( - MergeStrategyOverwrite_latest MergeStrategy = "OVERWRITE_LATEST" - MergeStrategyFail_on_conflict MergeStrategy = "FAIL_ON_CONFLICT" + MergeStrategyOverwriteLatest MergeStrategy = "OVERWRITE_LATEST" + MergeStrategyFailOnConflict MergeStrategy = "FAIL_ON_CONFLICT" ) // Values returns all known values for MergeStrategy. Note that this can be @@ -222,8 +222,8 @@ type ObfuscationSetting string // Enum values for ObfuscationSetting const ( - ObfuscationSettingNone ObfuscationSetting = "NONE" - ObfuscationSettingDefault_obfuscation ObfuscationSetting = "DEFAULT_OBFUSCATION" + ObfuscationSettingNone ObfuscationSetting = "NONE" + ObfuscationSettingDefaultObfuscation ObfuscationSetting = "DEFAULT_OBFUSCATION" ) // Values returns all known values for ObfuscationSetting. Note that this can be @@ -280,9 +280,9 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeBot ResourceType = "BOT" - ResourceTypeIntent ResourceType = "INTENT" - ResourceTypeSlot_type ResourceType = "SLOT_TYPE" + ResourceTypeBot ResourceType = "BOT" + ResourceTypeIntent ResourceType = "INTENT" + ResourceTypeSlotType ResourceType = "SLOT_TYPE" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -318,8 +318,8 @@ type SlotValueSelectionStrategy string // Enum values for SlotValueSelectionStrategy const ( - SlotValueSelectionStrategyOriginal_value SlotValueSelectionStrategy = "ORIGINAL_VALUE" - SlotValueSelectionStrategyTop_resolution SlotValueSelectionStrategy = "TOP_RESOLUTION" + SlotValueSelectionStrategyOriginalValue SlotValueSelectionStrategy = "ORIGINAL_VALUE" + SlotValueSelectionStrategyTopResolution SlotValueSelectionStrategy = "TOP_RESOLUTION" ) // Values returns all known values for SlotValueSelectionStrategy. Note that this @@ -336,11 +336,11 @@ type Status string // Enum values for Status const ( - StatusBuilding Status = "BUILDING" - StatusReady Status = "READY" - StatusReady_basic_testing Status = "READY_BASIC_TESTING" - StatusFailed Status = "FAILED" - StatusNot_built Status = "NOT_BUILT" + StatusBuilding Status = "BUILDING" + StatusReady Status = "READY" + StatusReadyBasicTesting Status = "READY_BASIC_TESTING" + StatusFailed Status = "FAILED" + StatusNotBuilt Status = "NOT_BUILT" ) // Values returns all known values for Status. Note that this can be expanded in diff --git a/service/lexmodelbuildingservice/types/types.go b/service/lexmodelbuildingservice/types/types.go index 7c5877ac5d3..5ba7d75b3a1 100644 --- a/service/lexmodelbuildingservice/types/types.go +++ b/service/lexmodelbuildingservice/types/types.go @@ -66,14 +66,14 @@ type BotChannelAssociation struct { // The status of the bot channel. // - // * CREATED - The channel has been created and - // is ready for use. + // * CREATED - The channel has been created and is + // ready for use. // - // * IN_PROGRESS - Channel creation is in progress. + // * IN_PROGRESS - Channel creation is in progress. // - // * - // FAILED - There was an error creating the channel. For information about the - // reason for the failure, see the failureReason field. + // * FAILED - + // There was an error creating the channel. For information about the reason for + // the failure, see the failureReason field. Status ChannelStatus // Specifies the type of association by indicating the type of channel being @@ -189,11 +189,11 @@ type ConversationLogsResponse struct { // slot type that specifies the type of crust that the pizza should have. The slot // type could include the values // -// * thick +// * thick // -// * thin +// * thin // -// * stuffed +// * stuffed type EnumerationValue struct { // The value of the slot type. @@ -231,14 +231,14 @@ type FollowUpPrompt struct { // function; you don't need to upgrade your client application. Consider the // following examples: // -// * In a pizza ordering application, after the user -// provides all of the information for placing an order, you use a Lambda function -// to place an order with a pizzeria. +// * In a pizza ordering application, after the user provides +// all of the information for placing an order, you use a Lambda function to place +// an order with a pizzeria. // -// * In a gaming application, when a user -// says "pick up a rock," this information must go back to the client application -// so that it can perform the operation and update the graphics. In this case, you -// want Amazon Lex to return the intent data to the client. +// * In a gaming application, when a user says "pick up +// a rock," this information must go back to the client application so that it can +// perform the operation and update the graphics. In this case, you want Amazon Lex +// to return the intent data to the client. type FulfillmentActivity struct { // How the intent should be fulfilled, either by running a Lambda function or by @@ -506,20 +506,20 @@ type SlotTypeRegexConfiguration struct { // regular expression. Amazon Lex supports the following characters in the regular // expression: // - // * A-Z, a-z + // * A-Z, a-z // - // * 0-9 + // * 0-9 // - // * Unicode characters ("\ - // u") + // * Unicode characters ("\ u") // - // Represent Unicode characters with four digits, for example "\u0041" or - // "\u005A". The following regular expression operators are not supported: + // Represent Unicode + // characters with four digits, for example "\u0041" or "\u005A". The following + // regular expression operators are not supported: // - // * - // Infinite repeaters: *, +, or {x,} with no upper bound. + // * Infinite repeaters: *, +, or + // {x,} with no upper bound. // - // * Wild card (.) + // * Wild card (.) // // This member is required. Pattern *string diff --git a/service/lexruntimeservice/api_op_PostContent.go b/service/lexruntimeservice/api_op_PostContent.go index 08b4597834c..6c4613133cb 100644 --- a/service/lexruntimeservice/api_op_PostContent.go +++ b/service/lexruntimeservice/api_op_PostContent.go @@ -20,47 +20,46 @@ import ( // applications. In response, Amazon Lex returns the next message to convey to the // user. Consider the following example messages: // -// * For a user input "I would -// like a pizza," Amazon Lex might return a response with a message eliciting slot -// data (for example, PizzaSize): "What size pizza would you like?". +// * For a user input "I would like +// a pizza," Amazon Lex might return a response with a message eliciting slot data +// (for example, PizzaSize): "What size pizza would you like?". // -// * After -// the user provides all of the pizza order information, Amazon Lex might return a -// response with a message to get user confirmation: "Order the pizza?". +// * After the user +// provides all of the pizza order information, Amazon Lex might return a response +// with a message to get user confirmation: "Order the pizza?". // -// * -// After the user replies "Yes" to the confirmation prompt, Amazon Lex might return -// a conclusion statement: "Thank you, your cheese pizza has been ordered.". +// * After the user +// replies "Yes" to the confirmation prompt, Amazon Lex might return a conclusion +// statement: "Thank you, your cheese pizza has been ordered.". // -// Not -// all Amazon Lex messages require a response from the user. For example, -// conclusion statements do not require a response. Some messages require only a -// yes or no response. In addition to the message, Amazon Lex provides additional -// context about the message in the response that you can use to enhance client -// behavior, such as displaying the appropriate client user interface. Consider the -// following examples: +// Not all Amazon Lex +// messages require a response from the user. For example, conclusion statements do +// not require a response. Some messages require only a yes or no response. In +// addition to the message, Amazon Lex provides additional context about the +// message in the response that you can use to enhance client behavior, such as +// displaying the appropriate client user interface. Consider the following +// examples: // -// * If the message is to elicit slot data, Amazon Lex -// returns the following context information: +// * If the message is to elicit slot data, Amazon Lex returns the +// following context information: // -// * x-amz-lex-dialog-state -// header set to ElicitSlot +// * x-amz-lex-dialog-state header set to +// ElicitSlot // -// * x-amz-lex-intent-name header set to the -// intent name in the current context +// * x-amz-lex-intent-name header set to the intent name in the current +// context // -// * x-amz-lex-slot-to-elicit header -// set to the slot name for which the message is eliciting information +// * x-amz-lex-slot-to-elicit header set to the slot name for which the +// message is eliciting information // -// * -// x-amz-lex-slots header set to a map of slots configured for the intent with -// their current values +// * x-amz-lex-slots header set to a map of slots +// configured for the intent with their current values // -// * If the message is a confirmation prompt, the -// x-amz-lex-dialog-state header is set to Confirmation and the -// x-amz-lex-slot-to-elicit header is omitted. +// * If the message is a +// confirmation prompt, the x-amz-lex-dialog-state header is set to Confirmation +// and the x-amz-lex-slot-to-elicit header is omitted. // -// * If the message is a +// * If the message is a // clarification prompt configured for the intent, indicating that the user intent // is not understood, the x-amz-dialog-state header is set to ElicitIntent and the // x-amz-slot-to-elicit header is omitted. @@ -99,28 +98,27 @@ type PostContentInput struct { // You pass this value as the Content-Type HTTP header. Indicates the audio format // or text. The header value must start with one of the following prefixes: // - // * - // PCM format, audio data must be in little-endian byte order. + // * PCM + // format, audio data must be in little-endian byte order. // - // * - // audio/l16; rate=16000; channels=1 + // * audio/l16; + // rate=16000; channels=1 // - // * audio/x-l16; sample-rate=16000; - // channel-count=1 + // * audio/x-l16; sample-rate=16000; channel-count=1 // - // * audio/lpcm; sample-rate=8000; sample-size-bits=16; - // channel-count=1; is-big-endian=false + // * + // audio/lpcm; sample-rate=8000; sample-size-bits=16; channel-count=1; + // is-big-endian=false // - // * Opus format + // * Opus format // - // * - // audio/x-cbr-opus-with-preamble; preamble-size=0; bit-rate=256000; - // frame-size-milliseconds=4 + // * audio/x-cbr-opus-with-preamble; + // preamble-size=0; bit-rate=256000; frame-size-milliseconds=4 // - // * Text format + // * Text format // - // * text/plain; - // charset=utf-8 + // * + // text/plain; charset=utf-8 // // This member is required. ContentType *string @@ -139,19 +137,19 @@ type PostContentInput struct { // field. To decide the user ID to use for your application, consider the following // factors. // - // * The userID field must not contain any personally identifiable + // * The userID field must not contain any personally identifiable // information of the user, for example, name, personal identification numbers, or // other end user personal information. // - // * If you want a user to start a + // * If you want a user to start a // conversation on one device and continue on another device, use a user-specific // identifier. // - // * If you want the same user to be able to have two independent + // * If you want the same user to be able to have two independent // conversations on two different devices, choose a device-specific identifier. // - // - // * A user can't have two independent conversations with two different versions of + // * + // A user can't have two independent conversations with two different versions of // the same bot. For example, a user can't have a conversation with the PROD and // BETA versions of the same bot. If you anticipate that a user will need to have // conversation with two different versions, for example, while testing, include @@ -164,31 +162,31 @@ type PostContentInput struct { // the response can be either text or speech based on the Accept HTTP header value // in the request. // - // * If the value is text/plain; charset=utf-8, Amazon Lex - // returns text in the response. - // - // * If the value begins with audio/, Amazon Lex - // returns speech in the response. Amazon Lex uses Amazon Polly to generate the - // speech (using the configuration you specified in the Accept header). For - // example, if you specify audio/mpeg as the value, Amazon Lex returns speech in - // the MPEG format. + // * If the value is text/plain; charset=utf-8, Amazon Lex returns + // text in the response. // - // * If the value is audio/pcm, the speech returned is - // audio/pcm in 16-bit, little endian format. + // * If the value begins with audio/, Amazon Lex returns + // speech in the response. Amazon Lex uses Amazon Polly to generate the speech + // (using the configuration you specified in the Accept header). For example, if + // you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG + // format. // - // * The following are the accepted - // values: + // * If the value is audio/pcm, the speech returned is audio/pcm in + // 16-bit, little endian format. // - // * audio/mpeg + // * The following are the accepted values: // - // * audio/ogg + // * + // audio/mpeg // - // * audio/pcm + // * audio/ogg // + // * audio/pcm // // * text/plain; charset=utf-8 // - // * audio/* (defaults to mpeg) + // * audio/* + // (defaults to mpeg) Accept *string // You pass this value as the x-amz-lex-request-attributes HTTP header. @@ -248,13 +246,13 @@ type PostContentOutput struct { // the following values as dialogState. The client can optionally use this // information to customize the user interface. // - // * ElicitIntent - Amazon Lex - // wants to elicit the user's intent. Consider the following examples: For example, - // a user might utter an intent ("I want to order a pizza"). If Amazon Lex cannot + // * ElicitIntent - Amazon Lex wants + // to elicit the user's intent. Consider the following examples: For example, a + // user might utter an intent ("I want to order a pizza"). If Amazon Lex cannot // infer the user intent from this utterance, it will return this dialog state. // - // - // * ConfirmIntent - Amazon Lex is expecting a "yes" or "no" response. For example, + // * + // ConfirmIntent - Amazon Lex is expecting a "yes" or "no" response. For example, // Amazon Lex wants user confirmation before fulfilling an intent. Instead of a // simple "yes" or "no" response, a user might respond with additional information. // For example, "yes, but make it a thick crust pizza" or "no, I want to order a @@ -262,21 +260,21 @@ type PostContentOutput struct { // update the crust type slot or change the intent from OrderPizza to // OrderDrink). // - // * ElicitSlot - Amazon Lex is expecting the value of a slot for - // the current intent. For example, suppose that in the response Amazon Lex sends - // this message: "What size pizza would you like?". A user might reply with the - // slot value (e.g., "medium"). The user might also provide additional information - // in the response (e.g., "medium thick crust pizza"). Amazon Lex can process such + // * ElicitSlot - Amazon Lex is expecting the value of a slot for the + // current intent. For example, suppose that in the response Amazon Lex sends this + // message: "What size pizza would you like?". A user might reply with the slot + // value (e.g., "medium"). The user might also provide additional information in + // the response (e.g., "medium thick crust pizza"). Amazon Lex can process such // additional information appropriately. // - // * Fulfilled - Conveys that the Lambda + // * Fulfilled - Conveys that the Lambda // function has successfully fulfilled the intent. // - // * ReadyForFulfillment - - // Conveys that the client has to fulfill the request. + // * ReadyForFulfillment - Conveys + // that the client has to fulfill the request. // - // * Failed - Conveys that - // the conversation with the user failed. This can happen for various reasons, + // * Failed - Conveys that the + // conversation with the user failed. This can happen for various reasons, // including that the user does not provide an appropriate response to prompts from // the service (you can configure how many times Amazon Lex can prompt a user for // specific information), or if the Lambda function fails to fulfill the intent. @@ -309,18 +307,18 @@ type PostContentOutput struct { // The format of the response message. One of the following values: // - // * - // PlainText - The message contains plain UTF-8 text. + // * PlainText - + // The message contains plain UTF-8 text. // - // * CustomPayload - The - // message is a custom format for the client. + // * CustomPayload - The message is a + // custom format for the client. // - // * SSML - The message contains - // text formatted for voice output. + // * SSML - The message contains text formatted for + // voice output. // - // * Composite - The message contains an - // escaped JSON object containing one or more messages from the groups that - // messages were assigned to when the intent was created. + // * Composite - The message contains an escaped JSON object + // containing one or more messages from the groups that messages were assigned to + // when the intent was created. MessageFormat types.MessageFormatType // Provides a score that indicates how confident Amazon Lex is that the returned diff --git a/service/lexruntimeservice/api_op_PostText.go b/service/lexruntimeservice/api_op_PostText.go index abdfe340fc6..d9c21ac8e43 100644 --- a/service/lexruntimeservice/api_op_PostText.go +++ b/service/lexruntimeservice/api_op_PostText.go @@ -17,53 +17,53 @@ import ( // returns the next message to convey to the user an optional responseCard to // display. Consider the following example messages: // -// * For a user input "I -// would like a pizza", Amazon Lex might return a response with a message eliciting -// slot data (for example, PizzaSize): "What size pizza would you like?" +// * For a user input "I would +// like a pizza", Amazon Lex might return a response with a message eliciting slot +// data (for example, PizzaSize): "What size pizza would you like?" // -// * -// After the user provides all of the pizza order information, Amazon Lex might -// return a response with a message to obtain user confirmation "Proceed with the -// pizza order?". +// * After the +// user provides all of the pizza order information, Amazon Lex might return a +// response with a message to obtain user confirmation "Proceed with the pizza +// order?". // -// * After the user replies to a confirmation prompt with a -// "yes", Amazon Lex might return a conclusion statement: "Thank you, your cheese -// pizza has been ordered.". +// * After the user replies to a confirmation prompt with a "yes", Amazon +// Lex might return a conclusion statement: "Thank you, your cheese pizza has been +// ordered.". // -// Not all Amazon Lex messages require a user response. -// For example, a conclusion statement does not require a response. Some messages -// require only a "yes" or "no" user response. In addition to the message, Amazon -// Lex provides additional context about the message in the response that you might -// use to enhance client behavior, for example, to display the appropriate client -// user interface. These are the slotToElicit, dialogState, intentName, and slots -// fields in the response. Consider the following examples: +// Not all Amazon Lex messages require a user response. For example, a +// conclusion statement does not require a response. Some messages require only a +// "yes" or "no" user response. In addition to the message, Amazon Lex provides +// additional context about the message in the response that you might use to +// enhance client behavior, for example, to display the appropriate client user +// interface. These are the slotToElicit, dialogState, intentName, and slots fields +// in the response. Consider the following examples: // -// * If the message -// is to elicit slot data, Amazon Lex returns the following context information: +// * If the message is to elicit +// slot data, Amazon Lex returns the following context information: // +// * dialogState +// set to ElicitSlot // -// * dialogState set to ElicitSlot +// * intentName set to the intent name in the current context // -// * intentName set to the intent name in -// the current context +// * +// slotToElicit set to the slot name for which the message is eliciting +// information // -// * slotToElicit set to the slot name for which the -// message is eliciting information +// * slots set to a map of slots, configured for the intent, with +// currently known values // -// * slots set to a map of slots, -// configured for the intent, with currently known values +// * If the message is a confirmation prompt, the +// dialogState is set to ConfirmIntent and SlotToElicit is set to null. // -// * If the message is -// a confirmation prompt, the dialogState is set to ConfirmIntent and SlotToElicit -// is set to null. +// * If the +// message is a clarification prompt (configured for the intent) that indicates +// that user intent is not understood, the dialogState is set to ElicitIntent and +// slotToElicit is set to null. // -// * If the message is a clarification prompt (configured for -// the intent) that indicates that user intent is not understood, the dialogState -// is set to ElicitIntent and slotToElicit is set to null. -// -// In addition, Amazon Lex -// also returns your application-specific sessionAttributes. For more information, -// see Managing Conversation Context +// In addition, Amazon Lex also returns your +// application-specific sessionAttributes. For more information, see Managing +// Conversation Context // (https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html). func (c *Client) PostText(ctx context.Context, params *PostTextInput, optFns ...func(*Options)) (*PostTextOutput, error) { if params == nil { @@ -102,19 +102,19 @@ type PostTextInput struct { // field. To decide the user ID to use for your application, consider the following // factors. // - // * The userID field must not contain any personally identifiable + // * The userID field must not contain any personally identifiable // information of the user, for example, name, personal identification numbers, or // other end user personal information. // - // * If you want a user to start a + // * If you want a user to start a // conversation on one device and continue on another device, use a user-specific // identifier. // - // * If you want the same user to be able to have two independent + // * If you want the same user to be able to have two independent // conversations on two different devices, choose a device-specific identifier. // - // - // * A user can't have two independent conversations with two different versions of + // * + // A user can't have two independent conversations with two different versions of // the same bot. For example, a user can't have a conversation with the PROD and // BETA versions of the same bot. If you anticipate that a user will need to have // conversation with two different versions, for example, while testing, include @@ -158,38 +158,38 @@ type PostTextOutput struct { // the following values as dialogState. The client can optionally use this // information to customize the user interface. // - // * ElicitIntent - Amazon Lex - // wants to elicit user intent. For example, a user might utter an intent ("I want - // to order a pizza"). If Amazon Lex cannot infer the user intent from this - // utterance, it will return this dialogState. - // - // * ConfirmIntent - Amazon Lex is - // expecting a "yes" or "no" response. For example, Amazon Lex wants user - // confirmation before fulfilling an intent. Instead of a simple "yes" or "no," a - // user might respond with additional information. For example, "yes, but make it - // thick crust pizza" or "no, I want to order a drink". Amazon Lex can process such - // additional information (in these examples, update the crust type slot value, or - // change intent from OrderPizza to OrderDrink). + // * ElicitIntent - Amazon Lex wants + // to elicit user intent. For example, a user might utter an intent ("I want to + // order a pizza"). If Amazon Lex cannot infer the user intent from this utterance, + // it will return this dialogState. // - // * ElicitSlot - Amazon Lex is - // expecting a slot value for the current intent. For example, suppose that in the - // response Amazon Lex sends this message: "What size pizza would you like?". A - // user might reply with the slot value (e.g., "medium"). The user might also - // provide additional information in the response (e.g., "medium thick crust - // pizza"). Amazon Lex can process such additional information appropriately. + // * ConfirmIntent - Amazon Lex is expecting a + // "yes" or "no" response. For example, Amazon Lex wants user confirmation before + // fulfilling an intent. Instead of a simple "yes" or "no," a user might respond + // with additional information. For example, "yes, but make it thick crust pizza" + // or "no, I want to order a drink". Amazon Lex can process such additional + // information (in these examples, update the crust type slot value, or change + // intent from OrderPizza to OrderDrink). // + // * ElicitSlot - Amazon Lex is expecting a + // slot value for the current intent. For example, suppose that in the response + // Amazon Lex sends this message: "What size pizza would you like?". A user might + // reply with the slot value (e.g., "medium"). The user might also provide + // additional information in the response (e.g., "medium thick crust pizza"). + // Amazon Lex can process such additional information appropriately. // - // * Fulfilled - Conveys that the Lambda function configured for the intent has - // successfully fulfilled the intent. + // * Fulfilled - + // Conveys that the Lambda function configured for the intent has successfully + // fulfilled the intent. // - // * ReadyForFulfillment - Conveys that the - // client has to fulfill the intent. + // * ReadyForFulfillment - Conveys that the client has to + // fulfill the intent. // - // * Failed - Conveys that the conversation - // with the user failed. This can happen for various reasons including that the - // user did not provide an appropriate response to prompts from the service (you - // can configure how many times Amazon Lex can prompt a user for specific - // information), or the Lambda function failed to fulfill the intent. + // * Failed - Conveys that the conversation with the user + // failed. This can happen for various reasons including that the user did not + // provide an appropriate response to prompts from the service (you can configure + // how many times Amazon Lex can prompt a user for specific information), or the + // Lambda function failed to fulfill the intent. DialogState types.DialogState // The current user intent that Amazon Lex is aware of. @@ -211,18 +211,18 @@ type PostTextOutput struct { // The format of the response message. One of the following values: // - // * - // PlainText - The message contains plain UTF-8 text. + // * PlainText - + // The message contains plain UTF-8 text. // - // * CustomPayload - The - // message is a custom format defined by the Lambda function. + // * CustomPayload - The message is a + // custom format defined by the Lambda function. // - // * SSML - The - // message contains text formatted for voice output. + // * SSML - The message contains + // text formatted for voice output. // - // * Composite - The message - // contains an escaped JSON object containing one or more messages from the groups - // that messages were assigned to when the intent was created. + // * Composite - The message contains an escaped + // JSON object containing one or more messages from the groups that messages were + // assigned to when the intent was created. MessageFormat types.MessageFormatType // Provides a score that indicates how confident Amazon Lex is that the returned diff --git a/service/lexruntimeservice/api_op_PutSession.go b/service/lexruntimeservice/api_op_PutSession.go index 9ea099956e0..046b968fd8b 100644 --- a/service/lexruntimeservice/api_op_PutSession.go +++ b/service/lexruntimeservice/api_op_PutSession.go @@ -52,31 +52,31 @@ type PutSessionInput struct { // The message that Amazon Lex returns in the response can be either text or speech // based depending on the value of this field. // - // * If the value is text/plain; + // * If the value is text/plain; // charset=utf-8, Amazon Lex returns text in the response. // - // * If the value - // begins with audio/, Amazon Lex returns speech in the response. Amazon Lex uses - // Amazon Polly to generate the speech in the configuration that you specify. For - // example, if you specify audio/mpeg as the value, Amazon Lex returns speech in - // the MPEG format. + // * If the value begins + // with audio/, Amazon Lex returns speech in the response. Amazon Lex uses Amazon + // Polly to generate the speech in the configuration that you specify. For example, + // if you specify audio/mpeg as the value, Amazon Lex returns speech in the MPEG + // format. // - // * If the value is audio/pcm, the speech is returned as - // audio/pcm in 16-bit, little endian format. + // * If the value is audio/pcm, the speech is returned as audio/pcm in + // 16-bit, little endian format. // - // * The following are the accepted - // values: + // * The following are the accepted values: // - // * audio/mpeg + // * + // audio/mpeg // - // * audio/ogg - // - // * audio/pcm + // * audio/ogg // + // * audio/pcm // // * audio/* (defaults to mpeg) // - // * text/plain; charset=utf-8 + // * + // text/plain; charset=utf-8 Accept *string // Sets the next action that the bot should take to fulfill the conversation. @@ -88,19 +88,18 @@ type PutSessionInput struct { // you modify or add to the list must make sense for the bot. For example, the // intent name must be valid for the bot. You must provide valid values for: // - // * + // * // intentName // - // * slot names + // * slot names // - // * slotToElict + // * slotToElict // - // If you send the - // recentIntentSummaryView parameter in a PutSession request, the contents of the - // new summary view replaces the old summary view. For example, if a GetSession - // request returns three intents in the summary view and you call PutSession with - // one intent in the summary view, the next call to GetSession will only return one - // intent. + // If you send the recentIntentSummaryView + // parameter in a PutSession request, the contents of the new summary view replaces + // the old summary view. For example, if a GetSession request returns three intents + // in the summary view and you call PutSession with one intent in the summary view, + // the next call to GetSession will only return one intent. RecentIntentSummaryView []*types.IntentSummary // Map of key/value pairs representing the session-specific context information. It @@ -120,22 +119,22 @@ type PutSessionOutput struct { // * ConfirmIntent - Amazon Lex is expecting a "yes" or "no" response to confirm // the intent before fulfilling an intent. // - // * ElicitIntent - Amazon Lex wants - // to elicit the user's intent. + // * ElicitIntent - Amazon Lex wants to + // elicit the user's intent. // - // * ElicitSlot - Amazon Lex is expecting the - // value of a slot for the current intent. + // * ElicitSlot - Amazon Lex is expecting the value of a + // slot for the current intent. // - // * Failed - Conveys that the - // conversation with the user has failed. This can happen for various reasons, - // including the user does not provide an appropriate response to prompts from the - // service, or if the Lambda function fails to fulfill the intent. + // * Failed - Conveys that the conversation with the + // user has failed. This can happen for various reasons, including the user does + // not provide an appropriate response to prompts from the service, or if the + // Lambda function fails to fulfill the intent. // - // * Fulfilled - // - Conveys that the Lambda function has sucessfully fulfilled the intent. + // * Fulfilled - Conveys that the + // Lambda function has sucessfully fulfilled the intent. // - // * - // ReadyForFulfillment - Conveys that the client has to fulfill the intent. + // * ReadyForFulfillment - + // Conveys that the client has to fulfill the intent. DialogState types.DialogState // The name of the current intent. @@ -146,18 +145,18 @@ type PutSessionOutput struct { // The format of the response message. One of the following values: // - // * - // PlainText - The message contains plain UTF-8 text. + // * PlainText - + // The message contains plain UTF-8 text. // - // * CustomPayload - The - // message is a custom format for the client. + // * CustomPayload - The message is a + // custom format for the client. // - // * SSML - The message contains - // text formatted for voice output. + // * SSML - The message contains text formatted for + // voice output. // - // * Composite - The message contains an - // escaped JSON object containing one or more messages from the groups that - // messages were assigned to when the intent was created. + // * Composite - The message contains an escaped JSON object + // containing one or more messages from the groups that messages were assigned to + // when the intent was created. MessageFormat types.MessageFormatType // Map of key/value pairs representing session-specific context information. diff --git a/service/lexruntimeservice/types/enums.go b/service/lexruntimeservice/types/enums.go index 68634d1d045..f0c86c23c8b 100644 --- a/service/lexruntimeservice/types/enums.go +++ b/service/lexruntimeservice/types/enums.go @@ -42,11 +42,11 @@ type DialogActionType string // Enum values for DialogActionType const ( - DialogActionTypeElicit_intent DialogActionType = "ElicitIntent" - DialogActionTypeConfirm_intent DialogActionType = "ConfirmIntent" - DialogActionTypeElicit_slot DialogActionType = "ElicitSlot" - DialogActionTypeClose DialogActionType = "Close" - DialogActionTypeDelegate DialogActionType = "Delegate" + DialogActionTypeElicitIntent DialogActionType = "ElicitIntent" + DialogActionTypeConfirmIntent DialogActionType = "ConfirmIntent" + DialogActionTypeElicitSlot DialogActionType = "ElicitSlot" + DialogActionTypeClose DialogActionType = "Close" + DialogActionTypeDelegate DialogActionType = "Delegate" ) // Values returns all known values for DialogActionType. Note that this can be @@ -66,12 +66,12 @@ type DialogState string // Enum values for DialogState const ( - DialogStateElicit_intent DialogState = "ElicitIntent" - DialogStateConfirm_intent DialogState = "ConfirmIntent" - DialogStateElicit_slot DialogState = "ElicitSlot" - DialogStateFulfilled DialogState = "Fulfilled" - DialogStateReady_for_fulfillment DialogState = "ReadyForFulfillment" - DialogStateFailed DialogState = "Failed" + DialogStateElicitIntent DialogState = "ElicitIntent" + DialogStateConfirmIntent DialogState = "ConfirmIntent" + DialogStateElicitSlot DialogState = "ElicitSlot" + DialogStateFulfilled DialogState = "Fulfilled" + DialogStateReadyForFulfillment DialogState = "ReadyForFulfillment" + DialogStateFailed DialogState = "Failed" ) // Values returns all known values for DialogState. Note that this can be expanded @@ -92,9 +92,9 @@ type FulfillmentState string // Enum values for FulfillmentState const ( - FulfillmentStateFulfilled FulfillmentState = "Fulfilled" - FulfillmentStateFailed FulfillmentState = "Failed" - FulfillmentStateReady_for_fulfillment FulfillmentState = "ReadyForFulfillment" + FulfillmentStateFulfilled FulfillmentState = "Fulfilled" + FulfillmentStateFailed FulfillmentState = "Failed" + FulfillmentStateReadyForFulfillment FulfillmentState = "ReadyForFulfillment" ) // Values returns all known values for FulfillmentState. Note that this can be @@ -112,10 +112,10 @@ type MessageFormatType string // Enum values for MessageFormatType const ( - MessageFormatTypePlain_text MessageFormatType = "PlainText" - MessageFormatTypeCustom_payload MessageFormatType = "CustomPayload" - MessageFormatTypeSsml MessageFormatType = "SSML" - MessageFormatTypeComposite MessageFormatType = "Composite" + MessageFormatTypePlainText MessageFormatType = "PlainText" + MessageFormatTypeCustomPayload MessageFormatType = "CustomPayload" + MessageFormatTypeSsml MessageFormatType = "SSML" + MessageFormatTypeComposite MessageFormatType = "Composite" ) // Values returns all known values for MessageFormatType. Note that this can be diff --git a/service/lexruntimeservice/types/errors.go b/service/lexruntimeservice/types/errors.go index f4dfec100d4..ef3e34a98bc 100644 --- a/service/lexruntimeservice/types/errors.go +++ b/service/lexruntimeservice/types/errors.go @@ -63,14 +63,14 @@ func (e *ConflictException) ErrorFault() smithy.ErrorFault { return smithy.Fault // One of the dependencies, such as AWS Lambda or Amazon Polly, threw an exception. // For example, // -// * If Amazon Lex does not have sufficient permissions to call a +// * If Amazon Lex does not have sufficient permissions to call a // Lambda function. // -// * If a Lambda function takes longer than 30 seconds to +// * If a Lambda function takes longer than 30 seconds to // execute. // -// * If a fulfillment Lambda function returns a Delegate dialog -// action without removing any slot values. +// * If a fulfillment Lambda function returns a Delegate dialog action +// without removing any slot values. type DependencyFailedException struct { Message *string } diff --git a/service/lexruntimeservice/types/types.go b/service/lexruntimeservice/types/types.go index 250b57f7b91..156f0b1eb1e 100644 --- a/service/lexruntimeservice/types/types.go +++ b/service/lexruntimeservice/types/types.go @@ -27,38 +27,37 @@ type DialogAction struct { // The next action that the bot should take in its interaction with the user. The // possible values are: // - // * ConfirmIntent - The next action is asking the user - // if the intent is complete and ready to be fulfilled. This is a yes/no question - // such as "Place the order?" + // * ConfirmIntent - The next action is asking the user if + // the intent is complete and ready to be fulfilled. This is a yes/no question such + // as "Place the order?" // - // * Close - Indicates that the there will not be a - // response from the user. For example, the statement "Your order has been placed" - // does not require a response. + // * Close - Indicates that the there will not be a response + // from the user. For example, the statement "Your order has been placed" does not + // require a response. // - // * Delegate - The next action is determined by - // Amazon Lex. + // * Delegate - The next action is determined by Amazon + // Lex. // - // * ElicitIntent - The next action is to determine the intent - // that the user wants to fulfill. + // * ElicitIntent - The next action is to determine the intent that the user + // wants to fulfill. // - // * ElicitSlot - The next action is to elicit - // a slot value from the user. + // * ElicitSlot - The next action is to elicit a slot value from + // the user. // // This member is required. Type DialogActionType // The fulfillment state of the intent. The possible values are: // - // * Failed - - // The Lambda function associated with the intent failed to fulfill the intent. - // + // * Failed - The + // Lambda function associated with the intent failed to fulfill the intent. // - // * Fulfilled - The intent has fulfilled by the Lambda function associated with - // the intent. + // * + // Fulfilled - The intent has fulfilled by the Lambda function associated with the + // intent. // - // * ReadyForFulfillment - All of the information necessary for - // the intent is present and the intent ready to be fulfilled by the client - // application. + // * ReadyForFulfillment - All of the information necessary for the intent + // is present and the intent ready to be fulfilled by the client application. FulfillmentState FulfillmentState // The name of the intent. @@ -70,15 +69,14 @@ type DialogAction struct { // * PlainText - The message contains plain UTF-8 text. // - // * CustomPayload - The + // * CustomPayload - The // message is a custom format for the client. // - // * SSML - The message contains - // text formatted for voice output. + // * SSML - The message contains text + // formatted for voice output. // - // * Composite - The message contains an - // escaped JSON object containing one or more messages. For more information, see - // Message Groups + // * Composite - The message contains an escaped JSON + // object containing one or more messages. For more information, see Message Groups // (https://docs.aws.amazon.com/lex/latest/dg/howitworks-manage-prompts.html). MessageFormat MessageFormatType @@ -127,19 +125,19 @@ type IntentSummary struct { // The next action that the bot should take in its interaction with the user. The // possible values are: // - // * ConfirmIntent - The next action is asking the user - // if the intent is complete and ready to be fulfilled. This is a yes/no question - // such as "Place the order?" + // * ConfirmIntent - The next action is asking the user if + // the intent is complete and ready to be fulfilled. This is a yes/no question such + // as "Place the order?" // - // * Close - Indicates that the there will not be a - // response from the user. For example, the statement "Your order has been placed" - // does not require a response. + // * Close - Indicates that the there will not be a response + // from the user. For example, the statement "Your order has been placed" does not + // require a response. // - // * ElicitIntent - The next action is to - // determine the intent that the user wants to fulfill. + // * ElicitIntent - The next action is to determine the intent + // that the user wants to fulfill. // - // * ElicitSlot - The - // next action is to elicit a slot value from the user. + // * ElicitSlot - The next action is to elicit a + // slot value from the user. // // This member is required. DialogActionType DialogActionType @@ -155,29 +153,28 @@ type IntentSummary struct { // user denies the intent, Amazon Lex sets this value to Denied. The possible // values are: // - // * Confirmed - The user has responded "Yes" to the confirmation + // * Confirmed - The user has responded "Yes" to the confirmation // prompt, confirming that the intent is complete and that it is ready to be // fulfilled. // - // * Denied - The user has responded "No" to the confirmation + // * Denied - The user has responded "No" to the confirmation // prompt. // - // * None - The user has never been prompted for confirmation; or, the + // * None - The user has never been prompted for confirmation; or, the // user was prompted but did not confirm or deny the prompt. ConfirmationStatus ConfirmationStatus // The fulfillment state of the intent. The possible values are: // - // * Failed - - // The Lambda function associated with the intent failed to fulfill the intent. - // + // * Failed - The + // Lambda function associated with the intent failed to fulfill the intent. // - // * Fulfilled - The intent has fulfilled by the Lambda function associated with - // the intent. + // * + // Fulfilled - The intent has fulfilled by the Lambda function associated with the + // intent. // - // * ReadyForFulfillment - All of the information necessary for - // the intent is present and the intent ready to be fulfilled by the client - // application. + // * ReadyForFulfillment - All of the information necessary for the intent + // is present and the intent ready to be fulfilled by the client application. FulfillmentState FulfillmentState // The name of the intent. diff --git a/service/licensemanager/api_op_CreateLicenseConfiguration.go b/service/licensemanager/api_op_CreateLicenseConfiguration.go index d9e518495d3..939b89d77a8 100644 --- a/service/licensemanager/api_op_CreateLicenseConfiguration.go +++ b/service/licensemanager/api_op_CreateLicenseConfiguration.go @@ -58,17 +58,17 @@ type CreateLicenseConfigurationInput struct { // #allowedTenancy=EC2-DedicatedHost). The available rules vary by dimension, as // follows. // - // * Cores dimension: allowedTenancy | licenseAffinityToHost | + // * Cores dimension: allowedTenancy | licenseAffinityToHost | // maximumCores | minimumCores // - // * Instances dimension: allowedTenancy | + // * Instances dimension: allowedTenancy | // maximumCores | minimumCores | maximumSockets | minimumSockets | maximumVcpus | // minimumVcpus // - // * Sockets dimension: allowedTenancy | licenseAffinityToHost | + // * Sockets dimension: allowedTenancy | licenseAffinityToHost | // maximumSockets | minimumSockets // - // * vCPUs dimension: allowedTenancy | + // * vCPUs dimension: allowedTenancy | // honorVcpuOptimization | maximumVcpus | minimumVcpus // // The unit for diff --git a/service/licensemanager/api_op_ListLicenseConfigurations.go b/service/licensemanager/api_op_ListLicenseConfigurations.go index 6b4936e8dd2..e166c7fa07d 100644 --- a/service/licensemanager/api_op_ListLicenseConfigurations.go +++ b/service/licensemanager/api_op_ListLicenseConfigurations.go @@ -32,16 +32,16 @@ type ListLicenseConfigurationsInput struct { // Filters to scope the results. The following filters and logical operators are // supported: // - // * licenseCountingType - The dimension on which licenses are - // counted. Possible values are vCPU | Instance | Core | Socket. Logical operators - // are EQUALS | NOT_EQUALS. + // * licenseCountingType - The dimension on which licenses are counted. + // Possible values are vCPU | Instance | Core | Socket. Logical operators are + // EQUALS | NOT_EQUALS. // - // * enforceLicenseCount - A Boolean value that - // indicates whether hard license enforcement is used. Logical operators are EQUALS - // | NOT_EQUALS. + // * enforceLicenseCount - A Boolean value that indicates + // whether hard license enforcement is used. Logical operators are EQUALS | + // NOT_EQUALS. // - // * usagelimitExceeded - A Boolean value that indicates whether - // the available licenses have been exceeded. Logical operators are EQUALS | + // * usagelimitExceeded - A Boolean value that indicates whether the + // available licenses have been exceeded. Logical operators are EQUALS | // NOT_EQUALS. Filters []*types.Filter diff --git a/service/licensemanager/api_op_ListResourceInventory.go b/service/licensemanager/api_op_ListResourceInventory.go index a92298fcf62..2cb3ab6bf7f 100644 --- a/service/licensemanager/api_op_ListResourceInventory.go +++ b/service/licensemanager/api_op_ListResourceInventory.go @@ -32,22 +32,22 @@ type ListResourceInventoryInput struct { // Filters to scope the results. The following filters and logical operators are // supported: // - // * account_id - The ID of the AWS account that owns the resource. + // * account_id - The ID of the AWS account that owns the resource. // Logical operators are EQUALS | NOT_EQUALS. // - // * application_name - The name of - // the application. Logical operators are EQUALS | BEGINS_WITH. + // * application_name - The name of the + // application. Logical operators are EQUALS | BEGINS_WITH. // - // * - // license_included - The type of license included. Logical operators are EQUALS | - // NOT_EQUALS. Possible values are sql-server-enterprise | sql-server-standard | - // sql-server-web | windows-server-datacenter. + // * license_included - + // The type of license included. Logical operators are EQUALS | NOT_EQUALS. + // Possible values are sql-server-enterprise | sql-server-standard | sql-server-web + // | windows-server-datacenter. // - // * platform - The platform of - // the resource. Logical operators are EQUALS | BEGINS_WITH. + // * platform - The platform of the resource. Logical + // operators are EQUALS | BEGINS_WITH. // - // * resource_id - - // The ID of the resource. Logical operators are EQUALS | NOT_EQUALS. + // * resource_id - The ID of the resource. + // Logical operators are EQUALS | NOT_EQUALS. Filters []*types.InventoryFilter // Maximum number of results to return in a single call. diff --git a/service/licensemanager/api_op_ListUsageForLicenseConfiguration.go b/service/licensemanager/api_op_ListUsageForLicenseConfiguration.go index 6db6b882090..966f510b6ad 100644 --- a/service/licensemanager/api_op_ListUsageForLicenseConfiguration.go +++ b/service/licensemanager/api_op_ListUsageForLicenseConfiguration.go @@ -40,15 +40,15 @@ type ListUsageForLicenseConfigurationInput struct { // Filters to scope the results. The following filters and logical operators are // supported: // - // * resourceArn - The ARN of the license configuration resource. + // * resourceArn - The ARN of the license configuration resource. // Logical operators are EQUALS | NOT_EQUALS. // - // * resourceType - The resource - // type (EC2_INSTANCE | EC2_HOST | EC2_AMI | SYSTEMS_MANAGER_MANAGED_INSTANCE). - // Logical operators are EQUALS | NOT_EQUALS. + // * resourceType - The resource type + // (EC2_INSTANCE | EC2_HOST | EC2_AMI | SYSTEMS_MANAGER_MANAGED_INSTANCE). Logical + // operators are EQUALS | NOT_EQUALS. // - // * resourceAccount - The ID of - // the account that owns the resource. Logical operators are EQUALS | NOT_EQUALS. + // * resourceAccount - The ID of the account + // that owns the resource. Logical operators are EQUALS | NOT_EQUALS. Filters []*types.Filter // Maximum number of results to return in a single call. diff --git a/service/licensemanager/types/enums.go b/service/licensemanager/types/enums.go index 9bc12bc47da..6473a9453aa 100644 --- a/service/licensemanager/types/enums.go +++ b/service/licensemanager/types/enums.go @@ -6,10 +6,10 @@ type InventoryFilterCondition string // Enum values for InventoryFilterCondition const ( - InventoryFilterConditionEquals InventoryFilterCondition = "EQUALS" - InventoryFilterConditionNot_equals InventoryFilterCondition = "NOT_EQUALS" - InventoryFilterConditionBegins_with InventoryFilterCondition = "BEGINS_WITH" - InventoryFilterConditionContains InventoryFilterCondition = "CONTAINS" + InventoryFilterConditionEquals InventoryFilterCondition = "EQUALS" + InventoryFilterConditionNotEquals InventoryFilterCondition = "NOT_EQUALS" + InventoryFilterConditionBeginsWith InventoryFilterCondition = "BEGINS_WITH" + InventoryFilterConditionContains InventoryFilterCondition = "CONTAINS" ) // Values returns all known values for InventoryFilterCondition. Note that this can @@ -68,11 +68,11 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeEc2_instance ResourceType = "EC2_INSTANCE" - ResourceTypeEc2_host ResourceType = "EC2_HOST" - ResourceTypeEc2_ami ResourceType = "EC2_AMI" - ResourceTypeRds ResourceType = "RDS" - ResourceTypeSystems_manager_managed_instance ResourceType = "SYSTEMS_MANAGER_MANAGED_INSTANCE" + ResourceTypeEc2Instance ResourceType = "EC2_INSTANCE" + ResourceTypeEc2Host ResourceType = "EC2_HOST" + ResourceTypeEc2Ami ResourceType = "EC2_AMI" + ResourceTypeRds ResourceType = "RDS" + ResourceTypeSystemsManagerManagedInstance ResourceType = "SYSTEMS_MANAGER_MANAGED_INSTANCE" ) // Values returns all known values for ResourceType. Note that this can be expanded diff --git a/service/licensemanager/types/types.go b/service/licensemanager/types/types.go index 190aa25409a..9e707b43a1c 100644 --- a/service/licensemanager/types/types.go +++ b/service/licensemanager/types/types.go @@ -216,36 +216,36 @@ type ProductInformation struct { // Product information filters. The following filters and logical operators are // supported when the resource type is SSM_MANAGED: // - // * Application Name - The - // name of the application. Logical operator is EQUALS. + // * Application Name - The name + // of the application. Logical operator is EQUALS. // - // * Application - // Publisher - The publisher of the application. Logical operator is EQUALS. + // * Application Publisher - The + // publisher of the application. Logical operator is EQUALS. // - // * - // Application Version - The version of the application. Logical operator is - // EQUALS. + // * Application Version + // - The version of the application. Logical operator is EQUALS. // - // * Platform Name - The name of the platform. Logical operator is - // EQUALS. + // * Platform Name - + // The name of the platform. Logical operator is EQUALS. // - // * Platform Type - The platform type. Logical operator is EQUALS. + // * Platform Type - The + // platform type. Logical operator is EQUALS. // + // * License Included - The type of + // license included. Logical operators are EQUALS and NOT_EQUALS. Possible values + // are: sql-server-enterprise | sql-server-standard | sql-server-web | + // windows-server-datacenter. // - // * License Included - The type of license included. Logical operators are EQUALS - // and NOT_EQUALS. Possible values are: sql-server-enterprise | sql-server-standard - // | sql-server-web | windows-server-datacenter. + // The following filters and logical operators are + // supported when the resource type is RDS: // - // The following filters and logical - // operators are supported when the resource type is RDS: + // * Engine Edition - The edition of the + // database engine. Logical operator is EQUALS. Possible values are: oracle-ee | + // oracle-se | oracle-se1 | oracle-se2. // - // * Engine Edition - - // The edition of the database engine. Logical operator is EQUALS. Possible values - // are: oracle-ee | oracle-se | oracle-se1 | oracle-se2. - // - // * License Pack - The - // license pack. Logical operator is EQUALS. Possible values are: data guard | - // diagnostic pack sqlt | tuning pack sqlt | ols | olap. + // * License Pack - The license pack. Logical + // operator is EQUALS. Possible values are: data guard | diagnostic pack sqlt | + // tuning pack sqlt | ols | olap. // // This member is required. ProductInformationFilterList []*ProductInformationFilter diff --git a/service/lightsail/api_op_CopySnapshot.go b/service/lightsail/api_op_CopySnapshot.go index 8b53b83eb26..d8291a3d76c 100644 --- a/service/lightsail/api_op_CopySnapshot.go +++ b/service/lightsail/api_op_CopySnapshot.go @@ -50,44 +50,42 @@ type CopySnapshotInput struct { // operation to identify the dates of the available automatic snapshots. // Constraints: // - // * Must be specified in YYYY-MM-DD format. + // * Must be specified in YYYY-MM-DD format. // - // * This - // parameter cannot be defined together with the use latest restorable auto - // snapshot parameter. The restore date and use latest restorable auto snapshot - // parameters are mutually exclusive. + // * This parameter cannot + // be defined together with the use latest restorable auto snapshot parameter. The + // restore date and use latest restorable auto snapshot parameters are mutually + // exclusive. // - // * Define this parameter only when - // copying an automatic snapshot as a manual snapshot. For more information, see - // the Lightsail Dev Guide + // * Define this parameter only when copying an automatic snapshot as a + // manual snapshot. For more information, see the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). RestoreDate *string // The name of the source instance or disk from which the source automatic snapshot // was created. Constraint: // - // * Define this parameter only when copying an - // automatic snapshot as a manual snapshot. For more information, see the Lightsail - // Dev Guide + // * Define this parameter only when copying an automatic + // snapshot as a manual snapshot. For more information, see the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). SourceResourceName *string // The name of the source manual snapshot to copy. Constraint: // - // * Define this + // * Define this // parameter only when copying a manual snapshot as another manual snapshot. SourceSnapshotName *string // A Boolean value to indicate whether to use the latest available automatic // snapshot of the specified source instance or disk. Constraints: // - // * This + // * This // parameter cannot be defined together with the restore date parameter. The use // latest restorable auto snapshot and restore date parameters are mutually // exclusive. // - // * Define this parameter only when copying an automatic snapshot - // as a manual snapshot. For more information, see the Lightsail Dev Guide + // * Define this parameter only when copying an automatic snapshot as a + // manual snapshot. For more information, see the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool } diff --git a/service/lightsail/api_op_CreateContactMethod.go b/service/lightsail/api_op_CreateContactMethod.go index 3d3b0b23675..32f8f0b0ae4 100644 --- a/service/lightsail/api_op_CreateContactMethod.go +++ b/service/lightsail/api_op_CreateContactMethod.go @@ -50,25 +50,25 @@ type CreateContactMethodInput struct { // The protocol of the contact method, such as Email or SMS (text messaging). The // SMS protocol is supported only in the following AWS Regions. // - // * US East (N. + // * US East (N. // Virginia) (us-east-1) // - // * US West (Oregon) (us-west-2) + // * US West (Oregon) (us-west-2) // - // * Europe - // (Ireland) (eu-west-1) + // * Europe (Ireland) + // (eu-west-1) // - // * Asia Pacific (Tokyo) (ap-northeast-1) + // * Asia Pacific (Tokyo) (ap-northeast-1) // - // * Asia - // Pacific (Singapore) (ap-southeast-1) + // * Asia Pacific (Singapore) + // (ap-southeast-1) // - // * Asia Pacific (Sydney) - // (ap-southeast-2) + // * Asia Pacific (Sydney) (ap-southeast-2) // - // For a list of countries/regions where SMS text messages can be - // sent, and the latest AWS Regions where SMS text messaging is supported, see - // Supported Regions and Countries + // For a list of + // countries/regions where SMS text messages can be sent, and the latest AWS + // Regions where SMS text messaging is supported, see Supported Regions and + // Countries // (https://docs.aws.amazon.com/sns/latest/dg/sns-supported-regions-countries.html) // in the Amazon SNS Developer Guide. For more information about notifications in // Amazon Lightsail, see Notifications in Amazon Lightsail diff --git a/service/lightsail/api_op_CreateDiskFromSnapshot.go b/service/lightsail/api_op_CreateDiskFromSnapshot.go index 379554d9572..18b2c328444 100644 --- a/service/lightsail/api_op_CreateDiskFromSnapshot.go +++ b/service/lightsail/api_op_CreateDiskFromSnapshot.go @@ -59,8 +59,8 @@ type CreateDiskFromSnapshotInput struct { // The name of the disk snapshot (e.g., my-snapshot) from which to create the new // storage disk. Constraint: // - // * This parameter cannot be defined together with - // the source disk name parameter. The disk snapshot name and source disk name + // * This parameter cannot be defined together with the + // source disk name parameter. The disk snapshot name and source disk name // parameters are mutually exclusive. DiskSnapshotName *string @@ -68,29 +68,27 @@ type CreateDiskFromSnapshotInput struct { // snapshots operation to identify the dates of the available automatic snapshots. // Constraints: // - // * Must be specified in YYYY-MM-DD format. + // * Must be specified in YYYY-MM-DD format. // - // * This - // parameter cannot be defined together with the use latest restorable auto - // snapshot parameter. The restore date and use latest restorable auto snapshot - // parameters are mutually exclusive. + // * This parameter cannot + // be defined together with the use latest restorable auto snapshot parameter. The + // restore date and use latest restorable auto snapshot parameters are mutually + // exclusive. // - // * Define this parameter only when - // creating a new disk from an automatic snapshot. For more information, see the - // Lightsail Dev Guide + // * Define this parameter only when creating a new disk from an + // automatic snapshot. For more information, see the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). RestoreDate *string // The name of the source disk from which the source automatic snapshot was // created. Constraints: // - // * This parameter cannot be defined together with the - // disk snapshot name parameter. The source disk name and disk snapshot name - // parameters are mutually exclusive. + // * This parameter cannot be defined together with the disk + // snapshot name parameter. The source disk name and disk snapshot name parameters + // are mutually exclusive. // - // * Define this parameter only when - // creating a new disk from an automatic snapshot. For more information, see the - // Lightsail Dev Guide + // * Define this parameter only when creating a new disk + // from an automatic snapshot. For more information, see the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). SourceDiskName *string @@ -101,13 +99,13 @@ type CreateDiskFromSnapshotInput struct { // A Boolean value to indicate whether to use the latest available automatic // snapshot. Constraints: // - // * This parameter cannot be defined together with the + // * This parameter cannot be defined together with the // restore date parameter. The use latest restorable auto snapshot and restore date // parameters are mutually exclusive. // - // * Define this parameter only when - // creating a new disk from an automatic snapshot. For more information, see the - // Lightsail Dev Guide + // * Define this parameter only when creating a + // new disk from an automatic snapshot. For more information, see the Lightsail Dev + // Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool } diff --git a/service/lightsail/api_op_CreateInstancesFromSnapshot.go b/service/lightsail/api_op_CreateInstancesFromSnapshot.go index ada1da2a83f..c42a8dd8cd8 100644 --- a/service/lightsail/api_op_CreateInstancesFromSnapshot.go +++ b/service/lightsail/api_op_CreateInstancesFromSnapshot.go @@ -64,7 +64,7 @@ type CreateInstancesFromSnapshotInput struct { // Use the get instance snapshots operation to return information about your // existing snapshots. Constraint: // - // * This parameter cannot be defined together + // * This parameter cannot be defined together // with the source instance name parameter. The instance snapshot name and source // instance name parameters are mutually exclusive. InstanceSnapshotName *string @@ -76,27 +76,26 @@ type CreateInstancesFromSnapshotInput struct { // snapshots operation to identify the dates of the available automatic snapshots. // Constraints: // - // * Must be specified in YYYY-MM-DD format. + // * Must be specified in YYYY-MM-DD format. // - // * This - // parameter cannot be defined together with the use latest restorable auto - // snapshot parameter. The restore date and use latest restorable auto snapshot - // parameters are mutually exclusive. + // * This parameter cannot + // be defined together with the use latest restorable auto snapshot parameter. The + // restore date and use latest restorable auto snapshot parameters are mutually + // exclusive. // - // * Define this parameter only when - // creating a new instance from an automatic snapshot. For more information, see - // the Lightsail Dev Guide + // * Define this parameter only when creating a new instance from an + // automatic snapshot. For more information, see the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). RestoreDate *string // The name of the source instance from which the source automatic snapshot was // created. Constraints: // - // * This parameter cannot be defined together with the + // * This parameter cannot be defined together with the // instance snapshot name parameter. The source instance name and instance snapshot // name parameters are mutually exclusive. // - // * Define this parameter only when + // * Define this parameter only when // creating a new instance from an automatic snapshot. For more information, see // the Lightsail Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). @@ -109,13 +108,13 @@ type CreateInstancesFromSnapshotInput struct { // A Boolean value to indicate whether to use the latest available automatic // snapshot. Constraints: // - // * This parameter cannot be defined together with the + // * This parameter cannot be defined together with the // restore date parameter. The use latest restorable auto snapshot and restore date // parameters are mutually exclusive. // - // * Define this parameter only when - // creating a new instance from an automatic snapshot. For more information, see - // the Lightsail Dev Guide + // * Define this parameter only when creating a + // new instance from an automatic snapshot. For more information, see the Lightsail + // Dev Guide // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool diff --git a/service/lightsail/api_op_CreateRelationalDatabase.go b/service/lightsail/api_op_CreateRelationalDatabase.go index ba96065fedf..a4ba66090d9 100644 --- a/service/lightsail/api_op_CreateRelationalDatabase.go +++ b/service/lightsail/api_op_CreateRelationalDatabase.go @@ -35,27 +35,27 @@ type CreateRelationalDatabaseInput struct { // The name of the master database created when the Lightsail database resource is // created. Constraints: // - // * Must contain from 1 to 64 alphanumeric - // characters. + // * Must contain from 1 to 64 alphanumeric characters. // - // * Cannot be a word reserved by the specified database engine + // * + // Cannot be a word reserved by the specified database engine // // This member is required. MasterDatabaseName *string // The master user name for your new database. Constraints: // - // * Master user name - // is required. + // * Master user name is + // required. // - // * Must contain from 1 to 16 alphanumeric characters. + // * Must contain from 1 to 16 alphanumeric characters. // - // * - // The first character must be a letter. + // * The first + // character must be a letter. // - // * Cannot be a reserved word for the - // database engine you choose. For more information about reserved words in MySQL - // 5.6 or 5.7, see the Keywords and Reserved Words articles for MySQL 5.6 + // * Cannot be a reserved word for the database engine + // you choose. For more information about reserved words in MySQL 5.6 or 5.7, see + // the Keywords and Reserved Words articles for MySQL 5.6 // (https://dev.mysql.com/doc/refman/5.6/en/keywords.html) or MySQL 5.7 // (https://dev.mysql.com/doc/refman/5.7/en/keywords.html) respectively. // @@ -78,11 +78,11 @@ type CreateRelationalDatabaseInput struct { // The name to use for your new database. Constraints: // - // * Must contain from 2 - // to 255 alphanumeric characters, or hyphens. + // * Must contain from 2 to + // 255 alphanumeric characters, or hyphens. // - // * The first and last character - // must be a letter or number. + // * The first and last character must be + // a letter or number. // // This member is required. RelationalDatabaseName *string @@ -107,16 +107,15 @@ type CreateRelationalDatabaseInput struct { // guide in the Amazon Relational Database Service (Amazon RDS) documentation. // Constraints: // - // * Must be in the hh24:mi-hh24:mi format. Example: - // 16:00-16:30 + // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30 // - // * Specified in Coordinated Universal Time (UTC). + // * + // Specified in Coordinated Universal Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not conflict with the + // preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur on your new @@ -124,18 +123,17 @@ type CreateRelationalDatabaseInput struct { // block of time for each AWS Region, occurring on a random day of the week. // Constraints: // - // * Must be in the ddd:hh24:mi-ddd:hh24:mi format. + // * Must be in the ddd:hh24:mi-ddd:hh24:mi format. // - // * Valid - // days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + // * Valid days: + // Mon, Tue, Wed, Thu, Fri, Sat, Sun. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. // + // * Specified + // in Coordinated Universal Time (UTC). // - // * Specified in Coordinated Universal Time (UTC). - // - // * Example: - // Tue:17:00-Tue:17:30 + // * Example: Tue:17:00-Tue:17:30 PreferredMaintenanceWindow *string // Specifies the accessibility options for your new database. A value of true diff --git a/service/lightsail/api_op_CreateRelationalDatabaseFromSnapshot.go b/service/lightsail/api_op_CreateRelationalDatabaseFromSnapshot.go index 447801c305c..bd882d9099f 100644 --- a/service/lightsail/api_op_CreateRelationalDatabaseFromSnapshot.go +++ b/service/lightsail/api_op_CreateRelationalDatabaseFromSnapshot.go @@ -39,11 +39,11 @@ type CreateRelationalDatabaseFromSnapshotInput struct { // The name to use for your new database. Constraints: // - // * Must contain from 2 - // to 255 alphanumeric characters, or hyphens. + // * Must contain from 2 to + // 255 alphanumeric characters, or hyphens. // - // * The first and last character - // must be a letter or number. + // * The first and last character must be + // a letter or number. // // This member is required. RelationalDatabaseName *string @@ -72,18 +72,18 @@ type CreateRelationalDatabaseFromSnapshotInput struct { // The date and time to restore your database from. Constraints: // - // * Must be - // before the latest restorable time for the database. + // * Must be before + // the latest restorable time for the database. // - // * Cannot be specified - // if the use latest restorable time parameter is true. + // * Cannot be specified if the use + // latest restorable time parameter is true. // - // * Specified in - // Coordinated Universal Time (UTC). + // * Specified in Coordinated Universal + // Time (UTC). // - // * Specified in the Unix time format. For - // example, if you wish to use a restore time of October 1, 2018, at 8 PM UTC, then - // you input 1538424000 as the restore time. + // * Specified in the Unix time format. For example, if you wish to + // use a restore time of October 1, 2018, at 8 PM UTC, then you input 1538424000 as + // the restore time. RestoreTime *time.Time // The name of the source database. diff --git a/service/lightsail/api_op_CreateRelationalDatabaseSnapshot.go b/service/lightsail/api_op_CreateRelationalDatabaseSnapshot.go index 6fafc606a6d..3a636f4c74d 100644 --- a/service/lightsail/api_op_CreateRelationalDatabaseSnapshot.go +++ b/service/lightsail/api_op_CreateRelationalDatabaseSnapshot.go @@ -41,11 +41,11 @@ type CreateRelationalDatabaseSnapshotInput struct { // The name for your new database snapshot. Constraints: // - // * Must contain from 2 - // to 255 alphanumeric characters, or hyphens. + // * Must contain from 2 to + // 255 alphanumeric characters, or hyphens. // - // * The first and last character - // must be a letter or number. + // * The first and last character must be + // a letter or number. // // This member is required. RelationalDatabaseSnapshotName *string diff --git a/service/lightsail/api_op_DeleteRelationalDatabase.go b/service/lightsail/api_op_DeleteRelationalDatabase.go index 95cdabc6575..9fe5fd69f78 100644 --- a/service/lightsail/api_op_DeleteRelationalDatabase.go +++ b/service/lightsail/api_op_DeleteRelationalDatabase.go @@ -43,10 +43,10 @@ type DeleteRelationalDatabaseInput struct { // specifying the skip final snapshot parameter to true results in an error. // Constraints: // - // * Must contain from 2 to 255 alphanumeric characters, or + // * Must contain from 2 to 255 alphanumeric characters, or // hyphens. // - // * The first and last character must be a letter or number. + // * The first and last character must be a letter or number. FinalRelationalDatabaseSnapshotName *string // Determines whether a final database snapshot is created before your database is diff --git a/service/lightsail/api_op_GetDistributionMetricData.go b/service/lightsail/api_op_GetDistributionMetricData.go index cd3b2955983..6850edc58f1 100644 --- a/service/lightsail/api_op_GetDistributionMetricData.go +++ b/service/lightsail/api_op_GetDistributionMetricData.go @@ -43,10 +43,10 @@ type GetDistributionMetricDataInput struct { // The end of the time interval for which to get metric data. Constraints: // - // * + // * // Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time + // * Specified in the Unix time // format. For example, if you wish to use an end time of October 1, 2018, at 9 PM // UTC, specify 1538427600 as the end time. // @@ -61,37 +61,37 @@ type GetDistributionMetricDataInput struct { // names are listed below, along with the most useful statistics to include in your // request, and the published unit value. // - // * Requests - The total number of - // viewer requests received by your Lightsail distribution, for all HTTP methods, - // and for both HTTP and HTTPS requests. Statistics: The most useful statistic is - // Sum. Unit: The published unit is None. - // - // * BytesDownloaded - The number of - // bytes downloaded by viewers for GET, HEAD, and OPTIONS requests. Statistics: The - // most useful statistic is Sum. Unit: The published unit is None. - // - // * - // BytesUploaded - The number of bytes uploaded to your origin by your Lightsail - // distribution, using POST and PUT requests. Statistics: The most useful statistic - // is Sum. Unit: The published unit is None. - // - // * TotalErrorRate - The percentage - // of all viewer requests for which the response's HTTP status code was 4xx or 5xx. - // Statistics: The most useful statistic is Average. Unit: The published unit is - // Percent. - // - // * 4xxErrorRate - The percentage of all viewer requests for which - // the response's HTTP status cod was 4xx. In these cases, the client or client - // viewer may have made an error. For example, a status code of 404 (Not Found) - // means that the client requested an object that could not be found. Statistics: + // * Requests - The total number of viewer + // requests received by your Lightsail distribution, for all HTTP methods, and for + // both HTTP and HTTPS requests. Statistics: The most useful statistic is Sum. + // Unit: The published unit is None. + // + // * BytesDownloaded - The number of bytes + // downloaded by viewers for GET, HEAD, and OPTIONS requests. Statistics: The most + // useful statistic is Sum. Unit: The published unit is None. + // + // * BytesUploaded - + // The number of bytes uploaded to your origin by your Lightsail distribution, + // using POST and PUT requests. Statistics: The most useful statistic is Sum. Unit: + // The published unit is None. + // + // * TotalErrorRate - The percentage of all viewer + // requests for which the response's HTTP status code was 4xx or 5xx. Statistics: // The most useful statistic is Average. Unit: The published unit is Percent. // + // * + // 4xxErrorRate - The percentage of all viewer requests for which the response's + // HTTP status cod was 4xx. In these cases, the client or client viewer may have + // made an error. For example, a status code of 404 (Not Found) means that the + // client requested an object that could not be found. Statistics: The most useful + // statistic is Average. Unit: The published unit is Percent. // - // * 5xxErrorRate - The percentage of all viewer requests for which the response's - // HTTP status code was 5xx. In these cases, the origin server did not satisfy the - // requests. For example, a status code of 503 (Service Unavailable) means that the - // origin server is currently unavailable. Statistics: The most useful statistic is - // Average. Unit: The published unit is Percent. + // * 5xxErrorRate - The + // percentage of all viewer requests for which the response's HTTP status code was + // 5xx. In these cases, the origin server did not satisfy the requests. For + // example, a status code of 503 (Service Unavailable) means that the origin server + // is currently unavailable. Statistics: The most useful statistic is Average. + // Unit: The published unit is Percent. // // This member is required. MetricName types.DistributionMetricName @@ -103,10 +103,10 @@ type GetDistributionMetricDataInput struct { // The start of the time interval for which to get metric data. Constraints: // - // * + // * // Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time + // * Specified in the Unix time // format. For example, if you wish to use a start time of October 1, 2018, at 8 PM // UTC, specify 1538424000 as the start time. // @@ -119,26 +119,26 @@ type GetDistributionMetricDataInput struct { // The statistic for the metric. The following statistics are available: // - // * - // Minimum - The lowest value observed during the specified period. Use this value - // to determine low volumes of activity for your application. + // * Minimum + // - The lowest value observed during the specified period. Use this value to + // determine low volumes of activity for your application. // - // * Maximum - The - // highest value observed during the specified period. Use this value to determine - // high volumes of activity for your application. + // * Maximum - The highest + // value observed during the specified period. Use this value to determine high + // volumes of activity for your application. // - // * Sum - All values submitted - // for the matching metric added together. You can use this statistic to determine - // the total volume of a metric. + // * Sum - All values submitted for the + // matching metric added together. You can use this statistic to determine the + // total volume of a metric. // - // * Average - The value of Sum / SampleCount - // during the specified period. By comparing this statistic with the Minimum and - // Maximum values, you can determine the full scope of a metric and how close the - // average use is to the Minimum and Maximum values. This comparison helps you to - // know when to increase or decrease your resources. + // * Average - The value of Sum / SampleCount during the + // specified period. By comparing this statistic with the Minimum and Maximum + // values, you can determine the full scope of a metric and how close the average + // use is to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. // - // * SampleCount - The - // count, or number, of data points used for the statistical calculation. + // * SampleCount - The count, or number, + // of data points used for the statistical calculation. // // This member is required. Statistics []types.MetricStatistic diff --git a/service/lightsail/api_op_GetInstanceMetricData.go b/service/lightsail/api_op_GetInstanceMetricData.go index a075754663c..7c3aaafe834 100644 --- a/service/lightsail/api_op_GetInstanceMetricData.go +++ b/service/lightsail/api_op_GetInstanceMetricData.go @@ -47,7 +47,7 @@ type GetInstanceMetricDataInput struct { // are listed below, along with the most useful statistics to include in your // request, and the published unit value. // - // * BurstCapacityPercentage - The + // * BurstCapacityPercentage - The // percentage of CPU performance available for your instance to burst above its // baseline. Your instance continuously accrues and consumes burst capacity. Burst // capacity stops accruing when your instance's BurstCapacityPercentage reaches @@ -57,28 +57,28 @@ type GetInstanceMetricDataInput struct { // Statistics: The most useful statistics are Maximum and Average. Unit: The // published unit is Percent. // - // * BurstCapacityTime - The available amount of - // time for your instance to burst at 100% CPU utilization. Your instance - // continuously accrues and consumes burst capacity. Burst capacity time stops - // accruing when your instance's BurstCapacityPercentage metric reaches 100%. Burst - // capacity time is consumed at the full rate only when your instance operates at - // 100% CPU utilization. For example, if your instance operates at 50% CPU - // utilization in the burstable zone for a 5-minute period, then it consumes CPU - // burst capacity minutes at a 50% rate in that period. Your instance consumed 2 - // minutes and 30 seconds of CPU burst capacity minutes in the 5-minute period. For - // more information, see Viewing instance burst capacity in Amazon Lightsail + // * BurstCapacityTime - The available amount of time + // for your instance to burst at 100% CPU utilization. Your instance continuously + // accrues and consumes burst capacity. Burst capacity time stops accruing when + // your instance's BurstCapacityPercentage metric reaches 100%. Burst capacity time + // is consumed at the full rate only when your instance operates at 100% CPU + // utilization. For example, if your instance operates at 50% CPU utilization in + // the burstable zone for a 5-minute period, then it consumes CPU burst capacity + // minutes at a 50% rate in that period. Your instance consumed 2 minutes and 30 + // seconds of CPU burst capacity minutes in the 5-minute period. For more + // information, see Viewing instance burst capacity in Amazon Lightsail // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-viewing-instance-burst-capacity). // Statistics: The most useful statistics are Maximum and Average. Unit: The // published unit is Seconds. // - // * CPUUtilization - The percentage of allocated + // * CPUUtilization - The percentage of allocated // compute units that are currently in use on the instance. This metric identifies // the processing power to run the applications on the instance. Tools in your // operating system can show a lower percentage than Lightsail when the instance is // not allocated a full processor core. Statistics: The most useful statistics are // Maximum and Average. Unit: The published unit is Percent. // - // * NetworkIn - The + // * NetworkIn - The // number of bytes received on all network interfaces by the instance. This metric // identifies the volume of incoming network traffic to the instance. The number // reported is the number of bytes received during the period. Because this metric @@ -86,31 +86,30 @@ type GetInstanceMetricDataInput struct { // Bytes/second. Statistics: The most useful statistic is Sum. Unit: The published // unit is Bytes. // - // * NetworkOut - The number of bytes sent out on all network + // * NetworkOut - The number of bytes sent out on all network // interfaces by the instance. This metric identifies the volume of outgoing // network traffic from the instance. The number reported is the number of bytes // sent during the period. Because this metric is reported in 5-minute intervals, // divide the reported number by 300 to find Bytes/second. Statistics: The most // useful statistic is Sum. Unit: The published unit is Bytes. // - // * - // StatusCheckFailed - Reports whether the instance passed or failed both the - // instance status check and the system status check. This metric can be either 0 - // (passed) or 1 (failed). This metric data is available in 1-minute (60 seconds) - // granularity. Statistics: The most useful statistic is Sum. Unit: The published - // unit is Count. - // - // * StatusCheckFailed_Instance - Reports whether the instance - // passed or failed the instance status check. This metric can be either 0 (passed) - // or 1 (failed). This metric data is available in 1-minute (60 seconds) - // granularity. Statistics: The most useful statistic is Sum. Unit: The published - // unit is Count. - // - // * StatusCheckFailed_System - Reports whether the instance - // passed or failed the system status check. This metric can be either 0 (passed) - // or 1 (failed). This metric data is available in 1-minute (60 seconds) - // granularity. Statistics: The most useful statistic is Sum. Unit: The published - // unit is Count. + // * StatusCheckFailed + // - Reports whether the instance passed or failed both the instance status check + // and the system status check. This metric can be either 0 (passed) or 1 (failed). + // This metric data is available in 1-minute (60 seconds) granularity. Statistics: + // The most useful statistic is Sum. Unit: The published unit is Count. + // + // * + // StatusCheckFailed_Instance - Reports whether the instance passed or failed the + // instance status check. This metric can be either 0 (passed) or 1 (failed). This + // metric data is available in 1-minute (60 seconds) granularity. Statistics: The + // most useful statistic is Sum. Unit: The published unit is Count. + // + // * + // StatusCheckFailed_System - Reports whether the instance passed or failed the + // system status check. This metric can be either 0 (passed) or 1 (failed). This + // metric data is available in 1-minute (60 seconds) granularity. Statistics: The + // most useful statistic is Sum. Unit: The published unit is Count. // // This member is required. MetricName types.InstanceMetricName @@ -130,26 +129,26 @@ type GetInstanceMetricDataInput struct { // The statistic for the metric. The following statistics are available: // - // * - // Minimum - The lowest value observed during the specified period. Use this value - // to determine low volumes of activity for your application. + // * Minimum + // - The lowest value observed during the specified period. Use this value to + // determine low volumes of activity for your application. // - // * Maximum - The - // highest value observed during the specified period. Use this value to determine - // high volumes of activity for your application. + // * Maximum - The highest + // value observed during the specified period. Use this value to determine high + // volumes of activity for your application. // - // * Sum - All values submitted - // for the matching metric added together. You can use this statistic to determine - // the total volume of a metric. + // * Sum - All values submitted for the + // matching metric added together. You can use this statistic to determine the + // total volume of a metric. // - // * Average - The value of Sum / SampleCount - // during the specified period. By comparing this statistic with the Minimum and - // Maximum values, you can determine the full scope of a metric and how close the - // average use is to the Minimum and Maximum values. This comparison helps you to - // know when to increase or decrease your resources. + // * Average - The value of Sum / SampleCount during the + // specified period. By comparing this statistic with the Minimum and Maximum + // values, you can determine the full scope of a metric and how close the average + // use is to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. // - // * SampleCount - The - // count, or number, of data points used for the statistical calculation. + // * SampleCount - The count, or number, + // of data points used for the statistical calculation. // // This member is required. Statistics []types.MetricStatistic diff --git a/service/lightsail/api_op_GetLoadBalancerMetricData.go b/service/lightsail/api_op_GetLoadBalancerMetricData.go index f08c1ab6070..6d3b11f31c6 100644 --- a/service/lightsail/api_op_GetLoadBalancerMetricData.go +++ b/service/lightsail/api_op_GetLoadBalancerMetricData.go @@ -47,77 +47,77 @@ type GetLoadBalancerMetricDataInput struct { // names are listed below, along with the most useful statistics to include in your // request, and the published unit value. // - // * ClientTLSNegotiationErrorCount - - // The number of TLS connections initiated by the client that did not establish a + // * ClientTLSNegotiationErrorCount - The + // number of TLS connections initiated by the client that did not establish a // session with the load balancer due to a TLS error generated by the load // balancer. Possible causes include a mismatch of ciphers or protocols. // Statistics: The most useful statistic is Sum. Unit: The published unit is // Count. // - // * HealthyHostCount - The number of target instances that are - // considered healthy. Statistics: The most useful statistic are Average, Minimum, - // and Maximum. Unit: The published unit is Count. - // - // * - // HTTPCode_Instance_2XX_Count - The number of HTTP 2XX response codes generated by - // the target instances. This does not include any response codes generated by the - // load balancer. Statistics: The most useful statistic is Sum. Note that Minimum, - // Maximum, and Average all return 1. Unit: The published unit is Count. - // - // * - // HTTPCode_Instance_3XX_Count - The number of HTTP 3XX response codes generated by - // the target instances. This does not include any response codes generated by the - // load balancer. Statistics: The most useful statistic is Sum. Note that Minimum, - // Maximum, and Average all return 1. Unit: The published unit is Count. - // - // * - // HTTPCode_Instance_4XX_Count - The number of HTTP 4XX response codes generated by - // the target instances. This does not include any response codes generated by the - // load balancer. Statistics: The most useful statistic is Sum. Note that Minimum, - // Maximum, and Average all return 1. Unit: The published unit is Count. - // - // * - // HTTPCode_Instance_5XX_Count - The number of HTTP 5XX response codes generated by - // the target instances. This does not include any response codes generated by the - // load balancer. Statistics: The most useful statistic is Sum. Note that Minimum, - // Maximum, and Average all return 1. Unit: The published unit is Count. - // - // * - // HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that - // originated from the load balancer. Client errors are generated when requests are - // malformed or incomplete. These requests were not received by the target - // instance. This count does not include response codes generated by the target - // instances. Statistics: The most useful statistic is Sum. Note that Minimum, - // Maximum, and Average all return 1. Unit: The published unit is Count. - // - // * - // HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that - // originated from the load balancer. This does not include any response codes - // generated by the target instance. This metric is reported if there are no - // healthy instances attached to the load balancer, or if the request rate exceeds - // the capacity of the instances (spillover) or the load balancer. Statistics: The + // * HealthyHostCount - The number of target instances that are considered + // healthy. Statistics: The most useful statistic are Average, Minimum, and + // Maximum. Unit: The published unit is Count. + // + // * HTTPCode_Instance_2XX_Count - The + // number of HTTP 2XX response codes generated by the target instances. This does + // not include any response codes generated by the load balancer. Statistics: The // most useful statistic is Sum. Note that Minimum, Maximum, and Average all return // 1. Unit: The published unit is Count. // - // * InstanceResponseTime - The time - // elapsed, in seconds, after the request leaves the load balancer until a response - // from the target instance is received. Statistics: The most useful statistic is - // Average. Unit: The published unit is Seconds. + // * HTTPCode_Instance_3XX_Count - The + // number of HTTP 3XX response codes generated by the target instances. This does + // not include any response codes generated by the load balancer. Statistics: The + // most useful statistic is Sum. Note that Minimum, Maximum, and Average all return + // 1. Unit: The published unit is Count. // - // * RejectedConnectionCount - - // The number of connections that were rejected because the load balancer had - // reached its maximum number of connections. Statistics: The most useful statistic - // is Sum. Unit: The published unit is Count. + // * HTTPCode_Instance_4XX_Count - The + // number of HTTP 4XX response codes generated by the target instances. This does + // not include any response codes generated by the load balancer. Statistics: The + // most useful statistic is Sum. Note that Minimum, Maximum, and Average all return + // 1. Unit: The published unit is Count. // - // * RequestCount - The number of - // requests processed over IPv4. This count includes only the requests with a - // response generated by a target instance of the load balancer. Statistics: The + // * HTTPCode_Instance_5XX_Count - The + // number of HTTP 5XX response codes generated by the target instances. This does + // not include any response codes generated by the load balancer. Statistics: The // most useful statistic is Sum. Note that Minimum, Maximum, and Average all return // 1. Unit: The published unit is Count. // - // * UnhealthyHostCount - The number of - // target instances that are considered unhealthy. Statistics: The most useful - // statistic are Average, Minimum, and Maximum. Unit: The published unit is Count. + // * HTTPCode_LB_4XX_Count - The number of + // HTTP 4XX client error codes that originated from the load balancer. Client + // errors are generated when requests are malformed or incomplete. These requests + // were not received by the target instance. This count does not include response + // codes generated by the target instances. Statistics: The most useful statistic + // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The + // published unit is Count. + // + // * HTTPCode_LB_5XX_Count - The number of HTTP 5XX + // server error codes that originated from the load balancer. This does not include + // any response codes generated by the target instance. This metric is reported if + // there are no healthy instances attached to the load balancer, or if the request + // rate exceeds the capacity of the instances (spillover) or the load balancer. + // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and + // Average all return 1. Unit: The published unit is Count. + // + // * InstanceResponseTime + // - The time elapsed, in seconds, after the request leaves the load balancer until + // a response from the target instance is received. Statistics: The most useful + // statistic is Average. Unit: The published unit is Seconds. + // + // * + // RejectedConnectionCount - The number of connections that were rejected because + // the load balancer had reached its maximum number of connections. Statistics: The + // most useful statistic is Sum. Unit: The published unit is Count. + // + // * RequestCount + // - The number of requests processed over IPv4. This count includes only the + // requests with a response generated by a target instance of the load balancer. + // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, and + // Average all return 1. Unit: The published unit is Count. + // + // * UnhealthyHostCount - + // The number of target instances that are considered unhealthy. Statistics: The + // most useful statistic are Average, Minimum, and Maximum. Unit: The published + // unit is Count. // // This member is required. MetricName types.LoadBalancerMetricName @@ -134,26 +134,26 @@ type GetLoadBalancerMetricDataInput struct { // The statistic for the metric. The following statistics are available: // - // * - // Minimum - The lowest value observed during the specified period. Use this value - // to determine low volumes of activity for your application. + // * Minimum + // - The lowest value observed during the specified period. Use this value to + // determine low volumes of activity for your application. // - // * Maximum - The - // highest value observed during the specified period. Use this value to determine - // high volumes of activity for your application. + // * Maximum - The highest + // value observed during the specified period. Use this value to determine high + // volumes of activity for your application. // - // * Sum - All values submitted - // for the matching metric added together. You can use this statistic to determine - // the total volume of a metric. + // * Sum - All values submitted for the + // matching metric added together. You can use this statistic to determine the + // total volume of a metric. // - // * Average - The value of Sum / SampleCount - // during the specified period. By comparing this statistic with the Minimum and - // Maximum values, you can determine the full scope of a metric and how close the - // average use is to the Minimum and Maximum values. This comparison helps you to - // know when to increase or decrease your resources. + // * Average - The value of Sum / SampleCount during the + // specified period. By comparing this statistic with the Minimum and Maximum + // values, you can determine the full scope of a metric and how close the average + // use is to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. // - // * SampleCount - The - // count, or number, of data points used for the statistical calculation. + // * SampleCount - The count, or number, + // of data points used for the statistical calculation. // // This member is required. Statistics []types.MetricStatistic diff --git a/service/lightsail/api_op_GetRelationalDatabaseLogEvents.go b/service/lightsail/api_op_GetRelationalDatabaseLogEvents.go index 63a53859e7b..7058a55dd5e 100644 --- a/service/lightsail/api_op_GetRelationalDatabaseLogEvents.go +++ b/service/lightsail/api_op_GetRelationalDatabaseLogEvents.go @@ -43,10 +43,10 @@ type GetRelationalDatabaseLogEventsInput struct { // The end of the time interval from which to get log events. Constraints: // - // * + // * // Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time + // * Specified in the Unix time // format. For example, if you wish to use an end time of October 1, 2018, at 8 PM // UTC, then you input 1538424000 as the end time. EndTime *time.Time @@ -66,10 +66,10 @@ type GetRelationalDatabaseLogEventsInput struct { // The start of the time interval from which to get log events. Constraints: // - // * + // * // Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time + // * Specified in the Unix time // format. For example, if you wish to use a start time of October 1, 2018, at 8 PM // UTC, then you input 1538424000 as the start time. StartTime *time.Time diff --git a/service/lightsail/api_op_GetRelationalDatabaseMetricData.go b/service/lightsail/api_op_GetRelationalDatabaseMetricData.go index 43911f923a5..30db8565882 100644 --- a/service/lightsail/api_op_GetRelationalDatabaseMetricData.go +++ b/service/lightsail/api_op_GetRelationalDatabaseMetricData.go @@ -35,10 +35,10 @@ type GetRelationalDatabaseMetricDataInput struct { // The end of the time interval from which to get metric data. Constraints: // - // * + // * // Specified in Coordinated Universal Time (UTC). // - // * Specified in the Unix time + // * Specified in the Unix time // format. For example, if you wish to use an end time of October 1, 2018, at 8 PM // UTC, then you input 1538424000 as the end time. // @@ -50,33 +50,33 @@ type GetRelationalDatabaseMetricDataInput struct { // in your request, and the published unit value. All relational database metric // data is available in 1-minute (60 seconds) granularity. // - // * CPUUtilization - - // The percentage of CPU utilization currently in use on the database. Statistics: - // The most useful statistics are Maximum and Average. Unit: The published unit is + // * CPUUtilization - The + // percentage of CPU utilization currently in use on the database. Statistics: The + // most useful statistics are Maximum and Average. Unit: The published unit is // Percent. // - // * DatabaseConnections - The number of database connections in use. + // * DatabaseConnections - The number of database connections in use. // Statistics: The most useful statistics are Maximum and Sum. Unit: The published // unit is Count. // - // * DiskQueueDepth - The number of outstanding IOs (read/write + // * DiskQueueDepth - The number of outstanding IOs (read/write // requests) that are waiting to access the disk. Statistics: The most useful // statistic is Sum. Unit: The published unit is Count. // - // * FreeStorageSpace - - // The amount of available storage space. Statistics: The most useful statistic is - // Sum. Unit: The published unit is Bytes. + // * FreeStorageSpace - The + // amount of available storage space. Statistics: The most useful statistic is Sum. + // Unit: The published unit is Bytes. // - // * NetworkReceiveThroughput - The - // incoming (Receive) network traffic on the database, including both customer - // database traffic and AWS traffic used for monitoring and replication. - // Statistics: The most useful statistic is Average. Unit: The published unit is - // Bytes/Second. + // * NetworkReceiveThroughput - The incoming + // (Receive) network traffic on the database, including both customer database + // traffic and AWS traffic used for monitoring and replication. Statistics: The + // most useful statistic is Average. Unit: The published unit is Bytes/Second. // - // * NetworkTransmitThroughput - The outgoing (Transmit) network - // traffic on the database, including both customer database traffic and AWS - // traffic used for monitoring and replication. Statistics: The most useful - // statistic is Average. Unit: The published unit is Bytes/Second. + // * + // NetworkTransmitThroughput - The outgoing (Transmit) network traffic on the + // database, including both customer database traffic and AWS traffic used for + // monitoring and replication. Statistics: The most useful statistic is Average. + // Unit: The published unit is Bytes/Second. // // This member is required. MetricName types.RelationalDatabaseMetricName @@ -94,38 +94,38 @@ type GetRelationalDatabaseMetricDataInput struct { // The start of the time interval from which to get metric data. Constraints: // + // * + // Specified in Coordinated Universal Time (UTC). // - // * Specified in Coordinated Universal Time (UTC). - // - // * Specified in the Unix - // time format. For example, if you wish to use a start time of October 1, 2018, at - // 8 PM UTC, then you input 1538424000 as the start time. + // * Specified in the Unix time + // format. For example, if you wish to use a start time of October 1, 2018, at 8 PM + // UTC, then you input 1538424000 as the start time. // // This member is required. StartTime *time.Time // The statistic for the metric. The following statistics are available: // - // * - // Minimum - The lowest value observed during the specified period. Use this value - // to determine low volumes of activity for your application. + // * Minimum + // - The lowest value observed during the specified period. Use this value to + // determine low volumes of activity for your application. // - // * Maximum - The - // highest value observed during the specified period. Use this value to determine - // high volumes of activity for your application. + // * Maximum - The highest + // value observed during the specified period. Use this value to determine high + // volumes of activity for your application. // - // * Sum - All values submitted - // for the matching metric added together. You can use this statistic to determine - // the total volume of a metric. + // * Sum - All values submitted for the + // matching metric added together. You can use this statistic to determine the + // total volume of a metric. // - // * Average - The value of Sum / SampleCount - // during the specified period. By comparing this statistic with the Minimum and - // Maximum values, you can determine the full scope of a metric and how close the - // average use is to the Minimum and Maximum values. This comparison helps you to - // know when to increase or decrease your resources. + // * Average - The value of Sum / SampleCount during the + // specified period. By comparing this statistic with the Minimum and Maximum + // values, you can determine the full scope of a metric and how close the average + // use is to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. // - // * SampleCount - The - // count, or number, of data points used for the statistical calculation. + // * SampleCount - The count, or number, + // of data points used for the statistical calculation. // // This member is required. Statistics []types.MetricStatistic diff --git a/service/lightsail/api_op_PutAlarm.go b/service/lightsail/api_op_PutAlarm.go index 70f06ebb532..ba265151345 100644 --- a/service/lightsail/api_op_PutAlarm.go +++ b/service/lightsail/api_op_PutAlarm.go @@ -66,20 +66,20 @@ type PutAlarmInput struct { EvaluationPeriods *int32 // The name of the metric to associate with the alarm. You can configure up to two - // alarms per metric. The following metrics are available for each resource type: - // + // alarms per metric. The following metrics are available for each resource + // type: // // * Instances: BurstCapacityPercentage, BurstCapacityTime, CPUUtilization, // NetworkIn, NetworkOut, StatusCheckFailed, StatusCheckFailed_Instance, and // StatusCheckFailed_System. // - // * Load balancers: ClientTLSNegotiationErrorCount, + // * Load balancers: ClientTLSNegotiationErrorCount, // HealthyHostCount, UnhealthyHostCount, HTTPCode_LB_4XX_Count, // HTTPCode_LB_5XX_Count, HTTPCode_Instance_2XX_Count, HTTPCode_Instance_3XX_Count, // HTTPCode_Instance_4XX_Count, HTTPCode_Instance_5XX_Count, InstanceResponseTime, // RejectedConnectionCount, and RequestCount. // - // * Relational databases: + // * Relational databases: // CPUUtilization, DatabaseConnections, DiskQueueDepth, FreeStorageSpace, // NetworkReceiveThroughput, and NetworkTransmitThroughput. // @@ -124,52 +124,50 @@ type PutAlarmInput struct { // The alarm states that trigger a notification. An alarm has the following // possible states: // - // * ALARM - The metric is outside of the defined - // threshold. + // * ALARM - The metric is outside of the defined threshold. // - // * INSUFFICIENT_DATA - The alarm has just started, the metric is - // not available, or not enough data is available for the metric to determine the - // alarm state. + // * + // INSUFFICIENT_DATA - The alarm has just started, the metric is not available, or + // not enough data is available for the metric to determine the alarm state. // - // * OK - The metric is within the defined threshold. + // * OK + // - The metric is within the defined threshold. // - // When you - // specify a notification trigger, the ALARM state must be specified. The - // INSUFFICIENT_DATA and OK states can be specified in addition to the ALARM - // state. + // When you specify a notification + // trigger, the ALARM state must be specified. The INSUFFICIENT_DATA and OK states + // can be specified in addition to the ALARM state. // - // * If you specify OK as an alarm trigger, a notification is sent when - // the alarm switches from an ALARM or INSUFFICIENT_DATA alarm state to an OK - // state. This can be thought of as an all clear alarm notification. + // * If you specify OK as an + // alarm trigger, a notification is sent when the alarm switches from an ALARM or + // INSUFFICIENT_DATA alarm state to an OK state. This can be thought of as an all + // clear alarm notification. // - // * If you - // specify INSUFFICIENT_DATA as the alarm trigger, a notification is sent when the - // alarm switches from an OK or ALARM alarm state to an INSUFFICIENT_DATA - // state. + // * If you specify INSUFFICIENT_DATA as the alarm + // trigger, a notification is sent when the alarm switches from an OK or ALARM + // alarm state to an INSUFFICIENT_DATA state. // - // The notification trigger defaults to ALARM if you don't specify this - // parameter. + // The notification trigger defaults to + // ALARM if you don't specify this parameter. NotificationTriggers []types.AlarmState // Sets how this alarm will handle missing data points. An alarm can treat missing // data in the following ways: // - // * breaching - Assume the missing data is not - // within the threshold. Missing data counts towards the number of times the metric - // is not within the threshold. + // * breaching - Assume the missing data is not within + // the threshold. Missing data counts towards the number of times the metric is not + // within the threshold. // - // * notBreaching - Assume the missing data is - // within the threshold. Missing data does not count towards the number of times - // the metric is not within the threshold. + // * notBreaching - Assume the missing data is within the + // threshold. Missing data does not count towards the number of times the metric is + // not within the threshold. // - // * ignore - Ignore the missing data. - // Maintains the current alarm state. + // * ignore - Ignore the missing data. Maintains the + // current alarm state. // - // * missing - Missing data is treated as - // missing. + // * missing - Missing data is treated as missing. // - // If treatMissingData is not specified, the default behavior of missing - // is used. + // If + // treatMissingData is not specified, the default behavior of missing is used. TreatMissingData types.TreatMissingData } diff --git a/service/lightsail/api_op_TestAlarm.go b/service/lightsail/api_op_TestAlarm.go index 2b26ee0169f..aaee1e2bd60 100644 --- a/service/lightsail/api_op_TestAlarm.go +++ b/service/lightsail/api_op_TestAlarm.go @@ -44,14 +44,14 @@ type TestAlarmInput struct { // The alarm state to test. An alarm has the following possible states that can be // tested: // - // * ALARM - The metric is outside of the defined threshold. + // * ALARM - The metric is outside of the defined threshold. // - // * + // * // INSUFFICIENT_DATA - The alarm has just started, the metric is not available, or // not enough data is available for the metric to determine the alarm state. // - // * - // OK - The metric is within the defined threshold. + // * OK + // - The metric is within the defined threshold. // // This member is required. State types.AlarmState diff --git a/service/lightsail/api_op_UpdateRelationalDatabase.go b/service/lightsail/api_op_UpdateRelationalDatabase.go index 004583ace6e..f039cc6681b 100644 --- a/service/lightsail/api_op_UpdateRelationalDatabase.go +++ b/service/lightsail/api_op_UpdateRelationalDatabase.go @@ -66,16 +66,16 @@ type UpdateRelationalDatabaseInput struct { // The daily time range during which automated backups are created for your // database if automated backups are enabled. Constraints: // - // * Must be in the + // * Must be in the // hh24:mi-hh24:mi format. Example: 16:00-16:30 // - // * Specified in Coordinated + // * Specified in Coordinated // Universal Time (UTC). // - // * Must not conflict with the preferred maintenance + // * Must not conflict with the preferred maintenance // window. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur on your @@ -83,18 +83,17 @@ type UpdateRelationalDatabaseInput struct { // block of time for each AWS Region, occurring on a random day of the week. // Constraints: // - // * Must be in the ddd:hh24:mi-ddd:hh24:mi format. + // * Must be in the ddd:hh24:mi-ddd:hh24:mi format. // - // * Valid - // days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + // * Valid days: + // Mon, Tue, Wed, Thu, Fri, Sat, Sun. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. // + // * Specified + // in Coordinated Universal Time (UTC). // - // * Specified in Coordinated Universal Time (UTC). - // - // * Example: - // Tue:17:00-Tue:17:30 + // * Example: Tue:17:00-Tue:17:30 PreferredMaintenanceWindow *string // Specifies the accessibility options for your database. A value of true specifies diff --git a/service/lightsail/types/enums.go b/service/lightsail/types/enums.go index ece68ee90e6..f873f5d01cf 100644 --- a/service/lightsail/types/enums.go +++ b/service/lightsail/types/enums.go @@ -40,9 +40,9 @@ type AlarmState string // Enum values for AlarmState const ( - AlarmStateOk AlarmState = "OK" - AlarmStateAlarm AlarmState = "ALARM" - AlarmStateInsufficient_data AlarmState = "INSUFFICIENT_DATA" + AlarmStateOk AlarmState = "OK" + AlarmStateAlarm AlarmState = "ALARM" + AlarmStateInsufficientData AlarmState = "INSUFFICIENT_DATA" ) // Values returns all known values for AlarmState. Note that this can be expanded @@ -60,10 +60,10 @@ type AutoSnapshotStatus string // Enum values for AutoSnapshotStatus const ( - AutoSnapshotStatusSuccess AutoSnapshotStatus = "Success" - AutoSnapshotStatusFailed AutoSnapshotStatus = "Failed" - AutoSnapshotStatusIn_progress AutoSnapshotStatus = "InProgress" - AutoSnapshotStatusNot_found AutoSnapshotStatus = "NotFound" + AutoSnapshotStatusSuccess AutoSnapshotStatus = "Success" + AutoSnapshotStatusFailed AutoSnapshotStatus = "Failed" + AutoSnapshotStatusInProgress AutoSnapshotStatus = "InProgress" + AutoSnapshotStatusNotFound AutoSnapshotStatus = "NotFound" ) // Values returns all known values for AutoSnapshotStatus. Note that this can be @@ -475,14 +475,14 @@ type InstanceMetricName string // Enum values for InstanceMetricName const ( - InstanceMetricNameCpuutilization InstanceMetricName = "CPUUtilization" - InstanceMetricNameNetworkin InstanceMetricName = "NetworkIn" - InstanceMetricNameNetworkout InstanceMetricName = "NetworkOut" - InstanceMetricNameStatuscheckfailed InstanceMetricName = "StatusCheckFailed" - InstanceMetricNameStatuscheckfailed_instance InstanceMetricName = "StatusCheckFailed_Instance" - InstanceMetricNameStatuscheckfailed_system InstanceMetricName = "StatusCheckFailed_System" - InstanceMetricNameBurstcapacitytime InstanceMetricName = "BurstCapacityTime" - InstanceMetricNameBurstcapacitypercentage InstanceMetricName = "BurstCapacityPercentage" + InstanceMetricNameCpuutilization InstanceMetricName = "CPUUtilization" + InstanceMetricNameNetworkin InstanceMetricName = "NetworkIn" + InstanceMetricNameNetworkout InstanceMetricName = "NetworkOut" + InstanceMetricNameStatuscheckfailed InstanceMetricName = "StatusCheckFailed" + InstanceMetricNameStatuscheckfailedInstance InstanceMetricName = "StatusCheckFailed_Instance" + InstanceMetricNameStatuscheckfailedSystem InstanceMetricName = "StatusCheckFailed_System" + InstanceMetricNameBurstcapacitytime InstanceMetricName = "BurstCapacityTime" + InstanceMetricNameBurstcapacitypercentage InstanceMetricName = "BurstCapacityPercentage" ) // Values returns all known values for InstanceMetricName. Note that this can be @@ -543,9 +543,9 @@ type LoadBalancerAttributeName string // Enum values for LoadBalancerAttributeName const ( - LoadBalancerAttributeNameHealthcheckpath LoadBalancerAttributeName = "HealthCheckPath" - LoadBalancerAttributeNameSessionstickinessenabled LoadBalancerAttributeName = "SessionStickinessEnabled" - LoadBalancerAttributeNameSessionstickiness_lb_cookiedurationseconds LoadBalancerAttributeName = "SessionStickiness_LB_CookieDurationSeconds" + LoadBalancerAttributeNameHealthcheckpath LoadBalancerAttributeName = "HealthCheckPath" + LoadBalancerAttributeNameSessionstickinessenabled LoadBalancerAttributeName = "SessionStickinessEnabled" + LoadBalancerAttributeNameSessionstickinessLbCookiedurationseconds LoadBalancerAttributeName = "SessionStickiness_LB_CookieDurationSeconds" ) // Values returns all known values for LoadBalancerAttributeName. Note that this @@ -566,12 +566,12 @@ const ( LoadBalancerMetricNameClienttlsnegotiationerrorcount LoadBalancerMetricName = "ClientTLSNegotiationErrorCount" LoadBalancerMetricNameHealthyhostcount LoadBalancerMetricName = "HealthyHostCount" LoadBalancerMetricNameUnhealthyhostcount LoadBalancerMetricName = "UnhealthyHostCount" - LoadBalancerMetricNameHttpcode_lb_4xx_count LoadBalancerMetricName = "HTTPCode_LB_4XX_Count" - LoadBalancerMetricNameHttpcode_lb_5xx_count LoadBalancerMetricName = "HTTPCode_LB_5XX_Count" - LoadBalancerMetricNameHttpcode_instance_2xx_count LoadBalancerMetricName = "HTTPCode_Instance_2XX_Count" - LoadBalancerMetricNameHttpcode_instance_3xx_count LoadBalancerMetricName = "HTTPCode_Instance_3XX_Count" - LoadBalancerMetricNameHttpcode_instance_4xx_count LoadBalancerMetricName = "HTTPCode_Instance_4XX_Count" - LoadBalancerMetricNameHttpcode_instance_5xx_count LoadBalancerMetricName = "HTTPCode_Instance_5XX_Count" + LoadBalancerMetricNameHttpcodeLb4xxCount LoadBalancerMetricName = "HTTPCode_LB_4XX_Count" + LoadBalancerMetricNameHttpcodeLb5xxCount LoadBalancerMetricName = "HTTPCode_LB_5XX_Count" + LoadBalancerMetricNameHttpcodeInstance2xxCount LoadBalancerMetricName = "HTTPCode_Instance_2XX_Count" + LoadBalancerMetricNameHttpcodeInstance3xxCount LoadBalancerMetricName = "HTTPCode_Instance_3XX_Count" + LoadBalancerMetricNameHttpcodeInstance4xxCount LoadBalancerMetricName = "HTTPCode_Instance_4XX_Count" + LoadBalancerMetricNameHttpcodeInstance5xxCount LoadBalancerMetricName = "HTTPCode_Instance_5XX_Count" LoadBalancerMetricNameInstanceresponsetime LoadBalancerMetricName = "InstanceResponseTime" LoadBalancerMetricNameRejectedconnectioncount LoadBalancerMetricName = "RejectedConnectionCount" LoadBalancerMetricNameRequestcount LoadBalancerMetricName = "RequestCount" @@ -601,8 +601,8 @@ type LoadBalancerProtocol string // Enum values for LoadBalancerProtocol const ( - LoadBalancerProtocolHttp_https LoadBalancerProtocol = "HTTP_HTTPS" - LoadBalancerProtocolHttp LoadBalancerProtocol = "HTTP" + LoadBalancerProtocolHttpHttps LoadBalancerProtocol = "HTTP_HTTPS" + LoadBalancerProtocolHttp LoadBalancerProtocol = "HTTP" ) // Values returns all known values for LoadBalancerProtocol. Note that this can be @@ -782,17 +782,17 @@ const ( MetricNameNetworkin MetricName = "NetworkIn" MetricNameNetworkout MetricName = "NetworkOut" MetricNameStatuscheckfailed MetricName = "StatusCheckFailed" - MetricNameStatuscheckfailed_instance MetricName = "StatusCheckFailed_Instance" - MetricNameStatuscheckfailed_system MetricName = "StatusCheckFailed_System" + MetricNameStatuscheckfailedInstance MetricName = "StatusCheckFailed_Instance" + MetricNameStatuscheckfailedSystem MetricName = "StatusCheckFailed_System" MetricNameClienttlsnegotiationerrorcount MetricName = "ClientTLSNegotiationErrorCount" MetricNameHealthyhostcount MetricName = "HealthyHostCount" MetricNameUnhealthyhostcount MetricName = "UnhealthyHostCount" - MetricNameHttpcode_lb_4xx_count MetricName = "HTTPCode_LB_4XX_Count" - MetricNameHttpcode_lb_5xx_count MetricName = "HTTPCode_LB_5XX_Count" - MetricNameHttpcode_instance_2xx_count MetricName = "HTTPCode_Instance_2XX_Count" - MetricNameHttpcode_instance_3xx_count MetricName = "HTTPCode_Instance_3XX_Count" - MetricNameHttpcode_instance_4xx_count MetricName = "HTTPCode_Instance_4XX_Count" - MetricNameHttpcode_instance_5xx_count MetricName = "HTTPCode_Instance_5XX_Count" + MetricNameHttpcodeLb4xxCount MetricName = "HTTPCode_LB_4XX_Count" + MetricNameHttpcodeLb5xxCount MetricName = "HTTPCode_LB_5XX_Count" + MetricNameHttpcodeInstance2xxCount MetricName = "HTTPCode_Instance_2XX_Count" + MetricNameHttpcodeInstance3xxCount MetricName = "HTTPCode_Instance_3XX_Count" + MetricNameHttpcodeInstance4xxCount MetricName = "HTTPCode_Instance_4XX_Count" + MetricNameHttpcodeInstance5xxCount MetricName = "HTTPCode_Instance_5XX_Count" MetricNameInstanceresponsetime MetricName = "InstanceResponseTime" MetricNameRejectedconnectioncount MetricName = "RejectedConnectionCount" MetricNameRequestcount MetricName = "RequestCount" @@ -1218,20 +1218,20 @@ type RegionName string // Enum values for RegionName const ( - RegionNameUs_east_1 RegionName = "us-east-1" - RegionNameUs_east_2 RegionName = "us-east-2" - RegionNameUs_west_1 RegionName = "us-west-1" - RegionNameUs_west_2 RegionName = "us-west-2" - RegionNameEu_west_1 RegionName = "eu-west-1" - RegionNameEu_west_2 RegionName = "eu-west-2" - RegionNameEu_west_3 RegionName = "eu-west-3" - RegionNameEu_central_1 RegionName = "eu-central-1" - RegionNameCa_central_1 RegionName = "ca-central-1" - RegionNameAp_south_1 RegionName = "ap-south-1" - RegionNameAp_southeast_1 RegionName = "ap-southeast-1" - RegionNameAp_southeast_2 RegionName = "ap-southeast-2" - RegionNameAp_northeast_1 RegionName = "ap-northeast-1" - RegionNameAp_northeast_2 RegionName = "ap-northeast-2" + RegionNameUsEast1 RegionName = "us-east-1" + RegionNameUsEast2 RegionName = "us-east-2" + RegionNameUsWest1 RegionName = "us-west-1" + RegionNameUsWest2 RegionName = "us-west-2" + RegionNameEuWest1 RegionName = "eu-west-1" + RegionNameEuWest2 RegionName = "eu-west-2" + RegionNameEuWest3 RegionName = "eu-west-3" + RegionNameEuCentral1 RegionName = "eu-central-1" + RegionNameCaCentral1 RegionName = "ca-central-1" + RegionNameApSouth1 RegionName = "ap-south-1" + RegionNameApSoutheast1 RegionName = "ap-southeast-1" + RegionNameApSoutheast2 RegionName = "ap-southeast-2" + RegionNameApNortheast1 RegionName = "ap-northeast-1" + RegionNameApNortheast2 RegionName = "ap-northeast-2" ) // Values returns all known values for RegionName. Note that this can be expanded diff --git a/service/lightsail/types/types.go b/service/lightsail/types/types.go index 49ad6ac5283..44dcfef3dce 100644 --- a/service/lightsail/types/types.go +++ b/service/lightsail/types/types.go @@ -94,39 +94,39 @@ type Alarm struct { // The current state of the alarm. An alarm has the following possible states: // + // * + // ALARM - The metric is outside of the defined threshold. // - // * ALARM - The metric is outside of the defined threshold. + // * INSUFFICIENT_DATA - + // The alarm has just started, the metric is not available, or not enough data is + // available for the metric to determine the alarm state. // - // * - // INSUFFICIENT_DATA - The alarm has just started, the metric is not available, or - // not enough data is available for the metric to determine the alarm state. - // - // * - // OK - The metric is within the defined threshold. + // * OK - The metric is + // within the defined threshold. State AlarmState // The statistic for the metric associated with the alarm. The following statistics // are available: // - // * Minimum - The lowest value observed during the specified + // * Minimum - The lowest value observed during the specified // period. Use this value to determine low volumes of activity for your // application. // - // * Maximum - The highest value observed during the specified + // * Maximum - The highest value observed during the specified // period. Use this value to determine high volumes of activity for your // application. // - // * Sum - All values submitted for the matching metric added + // * Sum - All values submitted for the matching metric added // together. You can use this statistic to determine the total volume of a // metric. // - // * Average - The value of Sum / SampleCount during the specified - // period. By comparing this statistic with the Minimum and Maximum values, you can + // * Average - The value of Sum / SampleCount during the specified period. + // By comparing this statistic with the Minimum and Maximum values, you can // determine the full scope of a metric and how close the average use is to the // Minimum and Maximum values. This comparison helps you to know when to increase // or decrease your resources. // - // * SampleCount - The count, or number, of data + // * SampleCount - The count, or number, of data // points used for the statistical calculation. Statistic MetricStatistic @@ -141,19 +141,18 @@ type Alarm struct { // Specifies how the alarm handles missing data points. An alarm can treat missing // data in the following ways: // - // * breaching - Assume the missing data is not - // within the threshold. Missing data counts towards the number of times the metric - // is not within the threshold. + // * breaching - Assume the missing data is not within + // the threshold. Missing data counts towards the number of times the metric is not + // within the threshold. // - // * notBreaching - Assume the missing data is - // within the threshold. Missing data does not count towards the number of times - // the metric is not within the threshold. + // * notBreaching - Assume the missing data is within the + // threshold. Missing data does not count towards the number of times the metric is + // not within the threshold. // - // * ignore - Ignore the missing data. - // Maintains the current alarm state. + // * ignore - Ignore the missing data. Maintains the + // current alarm state. // - // * missing - Missing data is treated as - // missing. + // * missing - Missing data is treated as missing. TreatMissingData TreatMissingData // The unit of the metric associated with the alarm. @@ -176,45 +175,44 @@ type AttachedDisk struct { // for a resource, it is typically effective immediately except under the following // conditions: // -// * If an automatic snapshot has been created for the current -// day, and you change the snapshot time to a later time of day, then the new -// snapshot time will be effective the following day. This ensures that two -// snapshots are not created for the current day. -// -// * If an automatic snapshot -// has not yet been created for the current day, and you change the snapshot time -// to an earlier time of day, then the new snapshot time will be effective the -// following day and a snapshot is automatically created at the previously set time -// for the current day. This ensures that a snapshot is created for the current -// day. -// -// * If an automatic snapshot has not yet been created for the current -// day, and you change the snapshot time to a time that is within 30 minutes from -// your current time, then the new snapshot time will be effective the following -// day and a snapshot is automatically created at the previously set time for the -// current day. This ensures that a snapshot is created for the current day, -// because 30 minutes is required between your current time and the new snapshot -// time that you specify. +// * If an automatic snapshot has been created for the current day, +// and you change the snapshot time to a later time of day, then the new snapshot +// time will be effective the following day. This ensures that two snapshots are +// not created for the current day. // -// * If an automatic snapshot is scheduled to be -// created within 30 minutes from your current time and you change the snapshot -// time, then the new snapshot time will be effective the following day and a +// * If an automatic snapshot has not yet been +// created for the current day, and you change the snapshot time to an earlier time +// of day, then the new snapshot time will be effective the following day and a // snapshot is automatically created at the previously set time for the current -// day. This ensures that a snapshot is created for the current day, because 30 -// minutes is required between your current time and the new snapshot time that you +// day. This ensures that a snapshot is created for the current day. +// +// * If an +// automatic snapshot has not yet been created for the current day, and you change +// the snapshot time to a time that is within 30 minutes from your current time, +// then the new snapshot time will be effective the following day and a snapshot is +// automatically created at the previously set time for the current day. This +// ensures that a snapshot is created for the current day, because 30 minutes is +// required between your current time and the new snapshot time that you // specify. +// +// * If an automatic snapshot is scheduled to be created within 30 +// minutes from your current time and you change the snapshot time, then the new +// snapshot time will be effective the following day and a snapshot is +// automatically created at the previously set time for the current day. This +// ensures that a snapshot is created for the current day, because 30 minutes is +// required between your current time and the new snapshot time that you specify. type AutoSnapshotAddOnRequest struct { // The daily time when an automatic snapshot will be created. Constraints: // - // * - // Must be in HH:00 format, and in an hourly increment. + // * Must + // be in HH:00 format, and in an hourly increment. // - // * Specified in - // Coordinated Universal Time (UTC). + // * Specified in Coordinated + // Universal Time (UTC). // - // * The snapshot will be automatically - // created between the time specified and up to 45 minutes after. + // * The snapshot will be automatically created between the + // time specified and up to 45 minutes after. SnapshotTimeOfDay *string } @@ -346,14 +344,14 @@ type CacheBehavior struct { // The cache behavior of the distribution. The following cache behaviors can be // specified: // - // * cache - This option is best for static sites. When specified, - // your distribution caches and serves your entire website as static content. This + // * cache - This option is best for static sites. When specified, your + // distribution caches and serves your entire website as static content. This // behavior is ideal for websites with static content that doesn't change depending // on who views it, or for websites that don't use cookies, headers, or query // strings to personalize content. // - // * dont-cache - This option is best for - // sites that serve a mix of static and dynamic content. When specified, your + // * dont-cache - This option is best for sites + // that serve a mix of static and dynamic content. When specified, your // distribution caches and serve only the content that is specified in the // distribution's CacheBehaviorPerPath parameter. This behavior is ideal for // websites or web applications that use cookies, headers, and query strings to @@ -375,33 +373,33 @@ type CacheBehaviorPerPath struct { // The cache behavior for the specified path. You can specify one of the following // per-path cache behaviors: // - // * cache - This behavior caches the specified - // path. + // * cache - This behavior caches the specified path. // - // * dont-cache - This behavior doesn't cache the specified path. + // * + // dont-cache - This behavior doesn't cache the specified path. Behavior BehaviorEnum // The path to a directory or file to cached, or not cache. Use an asterisk symbol // to specify wildcard directories (path/to/assets/*), and file types (*.html, // *jpg, *js). Directories and file paths are case-sensitive. Examples: // - // * - // Specify the following to cache all files in the document root of an Apache web - // server running on a Lightsail instance. var/www/html/ + // * Specify + // the following to cache all files in the document root of an Apache web server + // running on a Lightsail instance. var/www/html/ // - // * Specify the - // following file to cache only the index page in the document root of an Apache - // web server. var/www/html/index.html + // * Specify the following file to + // cache only the index page in the document root of an Apache web server. + // var/www/html/index.html // - // * Specify the following to cache only - // the .html files in the document root of an Apache web server. - // var/www/html/.html + // * Specify the following to cache only the .html files + // in the document root of an Apache web server. var/www/html/.html // - // * Specify the following to cache only the .jpg, .png, - // and .gif files in the images sub-directory of the document root of an Apache web - // server. var/www/html/images/.jpgvar/www/html/images/.pngvar/www/html/images/.gif - // Specify the following to cache all files in the images sub-directory of the - // document root of an Apache web server. var/www/html/images/ + // * Specify the + // following to cache only the .jpg, .png, and .gif files in the images + // sub-directory of the document root of an Apache web server. + // var/www/html/images/.jpgvar/www/html/images/.pngvar/www/html/images/.gif Specify + // the following to cache all files in the images sub-directory of the document + // root of an Apache web server. var/www/html/images/ Path *string } @@ -414,30 +412,30 @@ type CacheSettings struct { // The HTTP methods that are processed and forwarded to the distribution's origin. // You can specify the following options: // - // * GET,HEAD - The distribution - // forwards the GET and HEAD methods. + // * GET,HEAD - The distribution forwards + // the GET and HEAD methods. // - // * GET,HEAD,OPTIONS - The distribution - // forwards the GET, HEAD, and OPTIONS methods. + // * GET,HEAD,OPTIONS - The distribution forwards the + // GET, HEAD, and OPTIONS methods. // - // * - // GET,HEAD,OPTIONS,PUT,PATCH,POST,DELETE - The distribution forwards the GET, - // HEAD, OPTIONS, PUT, PATCH, POST, and DELETE methods. + // * GET,HEAD,OPTIONS,PUT,PATCH,POST,DELETE - The + // distribution forwards the GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE + // methods. // - // If you specify the third - // option, you might need to restrict access to your distribution's origin so users - // can't perform operations that you don't want them to. For example, you might not - // want users to have permission to delete objects from your origin. + // If you specify the third option, you might need to restrict access to + // your distribution's origin so users can't perform operations that you don't want + // them to. For example, you might not want users to have permission to delete + // objects from your origin. AllowedHTTPMethods *string // The HTTP method responses that are cached by your distribution. You can specify // the following options: // - // * GET,HEAD - The distribution caches responses to - // the GET and HEAD methods. + // * GET,HEAD - The distribution caches responses to the + // GET and HEAD methods. // - // * GET,HEAD,OPTIONS - The distribution caches - // responses to the GET, HEAD, and OPTIONS methods. + // * GET,HEAD,OPTIONS - The distribution caches responses to + // the GET, HEAD, and OPTIONS methods. CachedHTTPMethods *string // The default amount of time that objects stay in the distribution's cache before @@ -524,10 +522,10 @@ type Certificate struct { // The validation failure reason, if any, of the certificate. The following failure // reasons are possible: // - // * NO_AVAILABLE_CONTACTS - This failure applies to - // email validation, which is not available for Lightsail certificates. + // * NO_AVAILABLE_CONTACTS - This failure applies to email + // validation, which is not available for Lightsail certificates. // - // * + // * // ADDITIONAL_VERIFICATION_REQUIRED - Lightsail requires additional information to // process this certificate request. This can happen as a fraud-protection measure, // such as when the domain ranks within the Alexa top 1000 websites. To provide the @@ -536,22 +534,22 @@ type Certificate struct { // request a certificate for Amazon-owned domain names such as those ending in // amazonaws.com, cloudfront.net, or elasticbeanstalk.com. // - // * - // DOMAIN_NOT_ALLOWED - One or more of the domain names in the certificate request - // was reported as an unsafe domain by VirusTotal - // (https://www.virustotal.com/gui/home/url). To correct the problem, search for - // your domain name on the VirusTotal (https://www.virustotal.com/gui/home/url) - // website. If your domain is reported as suspicious, see Google Help for Hacked - // Websites (https://www.google.com/webmasters/hacked/?hl=en) to learn what you can - // do. If you believe that the result is a false positive, notify the organization - // that is reporting the domain. VirusTotal is an aggregate of several antivirus - // and URL scanners and cannot remove your domain from a block list itself. After - // you correct the problem and the VirusTotal registry has been updated, request a - // new certificate. If you see this error and your domain is not included in the + // * DOMAIN_NOT_ALLOWED - + // One or more of the domain names in the certificate request was reported as an + // unsafe domain by VirusTotal (https://www.virustotal.com/gui/home/url). To + // correct the problem, search for your domain name on the VirusTotal + // (https://www.virustotal.com/gui/home/url) website. If your domain is reported as + // suspicious, see Google Help for Hacked Websites + // (https://www.google.com/webmasters/hacked/?hl=en) to learn what you can do. If + // you believe that the result is a false positive, notify the organization that is + // reporting the domain. VirusTotal is an aggregate of several antivirus and URL + // scanners and cannot remove your domain from a block list itself. After you + // correct the problem and the VirusTotal registry has been updated, request a new + // certificate. If you see this error and your domain is not included in the // VirusTotal list, visit the AWS Support Center // (https://console.aws.amazon.com/support/home) and create a case. // - // * + // * // INVALID_PUBLIC_DOMAIN - One or more of the domain names in the certificate // request is not valid. Typically, this is because a domain name in the request is // not a valid top-level domain. Try to request a certificate again, correcting any @@ -560,10 +558,10 @@ type Certificate struct { // cannot request a certificate for example.invalidpublicdomain because // invalidpublicdomain is not a valid top-level domain. // - // * OTHER - Typically, - // this failure occurs when there is a typographical error in one or more of the - // domain names in the certificate request. Try to request a certificate again, - // correcting any spelling errors or typos that were in the failed request. + // * OTHER - Typically, this + // failure occurs when there is a typographical error in one or more of the domain + // names in the certificate request. Try to request a certificate again, correcting + // any spelling errors or typos that were in the failed request. RequestFailureReason *string // The reason the certificate was revoked. This value is present only when the @@ -695,14 +693,14 @@ type ContactMethod struct { // The current status of the contact method. A contact method has the following // possible status: // - // * PendingVerification - The contact method has not yet - // been verified, and the verification has not yet expired. + // * PendingVerification - The contact method has not yet been + // verified, and the verification has not yet expired. // - // * Valid - The - // contact method has been verified. + // * Valid - The contact + // method has been verified. // - // * InValid - An attempt was made to verify - // the contact method, but the verification has expired. + // * InValid - An attempt was made to verify the contact + // method, but the verification has expired. Status ContactMethodStatus // The support code. Include this code in your email to support when you have @@ -972,20 +970,20 @@ type DomainEntry struct { // exchanger (MX), name server (NS), start of authority (SOA), service locator // (SRV), or text (TXT). The following domain entry types can be used: // - // * A - // + // * A // - // * CNAME + // * + // CNAME // - // * MX + // * MX // - // * NS + // * NS // - // * SOA + // * SOA // - // * SRV + // * SRV // - // * TXT + // * TXT Type *string } @@ -1075,14 +1073,14 @@ type HeaderObject struct { // The headers that you want your distribution to forward to your origin and base // caching on. You can configure your distribution to do one of the following: // + // * + // all - Forward all headers to your origin. // - // * all - Forward all headers to your origin. + // * none - Forward only the default + // headers. // - // * none - Forward only the - // default headers. - // - // * allow-list - Forward only the headers you specify using - // the headersAllowList parameter. + // * allow-list - Forward only the headers you specify using the + // headersAllowList parameter. Option ForwardValues } @@ -1096,22 +1094,22 @@ type HostKeyAttributes struct { // The SHA-1 fingerprint of the returned SSH host key or RDP certificate. // - // * + // * // Example of an SHA-1 SSH fingerprint: SHA1:1CHH6FaAaXjtFOsR/t83vf91SR0 // - // * - // Example of an SHA-1 RDP fingerprint: + // * Example + // of an SHA-1 RDP fingerprint: // af:34:51:fe:09:f0:e0:da:b8:4e:56:ca:60:c2:10:ff:38:06:db:45 FingerprintSHA1 *string // The SHA-256 fingerprint of the returned SSH host key or RDP certificate. // - // * + // * // Example of an SHA-256 SSH fingerprint: // SHA256:KTsMnRBh1IhD17HpdfsbzeGA4jOijm5tyXsMjKVbB8o // - // * Example of an SHA-256 - // RDP fingerprint: + // * Example of an SHA-256 RDP + // fingerprint: // 03:9b:36:9f:4b:de:4e:61:70:fc:7c:c9:78:e7:d2:1a:1c:25:a8:0c:91:f6:7c:e4:d6:a0:85:c8:b4:53:99:68 FingerprintSHA256 *string @@ -1285,21 +1283,21 @@ type InstanceEntry struct { // The port configuration to use for the new Amazon EC2 instance. The following // configuration options are available: // - // * DEFAULT - Use the default firewall + // * DEFAULT - Use the default firewall // settings from the Lightsail instance blueprint. // - // * INSTANCE - Use the - // configured firewall settings from the source Lightsail instance. + // * INSTANCE - Use the configured + // firewall settings from the source Lightsail instance. // - // * NONE - - // Use the default Amazon EC2 security group. + // * NONE - Use the default + // Amazon EC2 security group. // - // * CLOSED - All ports closed. + // * CLOSED - All ports closed. // - // If - // you configured lightsail-connect as a cidrListAliases on your instance, or if - // you chose to allow the Lightsail browser-based SSH or RDP clients to connect to - // your instance, that configuration is not carried over to your new Amazon EC2 + // If you configured + // lightsail-connect as a cidrListAliases on your instance, or if you chose to + // allow the Lightsail browser-based SSH or RDP clients to connect to your + // instance, that configuration is not carried over to your new Amazon EC2 // instance. // // This member is required. @@ -1344,55 +1342,54 @@ type InstanceHealthSummary struct { // then an instanceHealthReason value is not provided. If instanceHealth is // initial, the instanceHealthReason value can be one of the following: // - // * + // * // Lb.RegistrationInProgress - The target instance is in the process of being // registered with the load balancer. // - // * Lb.InitialHealthChecking - The - // Lightsail load balancer is still sending the target instance the minimum number - // of health checks required to determine its health status. + // * Lb.InitialHealthChecking - The Lightsail + // load balancer is still sending the target instance the minimum number of health + // checks required to determine its health status. // - // If instanceHealth is - // unhealthy, the instanceHealthReason value can be one of the following: + // If instanceHealth is unhealthy, + // the instanceHealthReason value can be one of the following: // - // * + // * // Instance.ResponseCodeMismatch - The health checks did not return an expected // HTTP code. // - // * Instance.Timeout - The health check requests timed out. + // * Instance.Timeout - The health check requests timed out. // - // * + // * // Instance.FailedHealthChecks - The health checks failed because the connection to // the target instance timed out, the target instance response was malformed, or // the target instance failed the health check for an unknown reason. // - // * + // * // Lb.InternalError - The health checks failed due to an internal error. // // If // instanceHealth is unused, the instanceHealthReason value can be one of the // following: // - // * Instance.NotRegistered - The target instance is not registered + // * Instance.NotRegistered - The target instance is not registered // with the target group. // - // * Instance.NotInUse - The target group is not used - // by any load balancer, or the target instance is in an Availability Zone that is - // not enabled for its load balancer. + // * Instance.NotInUse - The target group is not used by + // any load balancer, or the target instance is in an Availability Zone that is not + // enabled for its load balancer. // - // * Instance.IpUnusable - The target IP - // address is reserved for use by a Lightsail load balancer. + // * Instance.IpUnusable - The target IP address is + // reserved for use by a Lightsail load balancer. // - // * - // Instance.InvalidState - The target is in the stopped or terminated state. + // * Instance.InvalidState - The + // target is in the stopped or terminated state. // - // If - // instanceHealth is draining, the instanceHealthReason value can be one of the - // following: + // If instanceHealth is draining, + // the instanceHealthReason value can be one of the following: // - // * Instance.DeregistrationInProgress - The target instance is in - // the process of being deregistered and the deregistration delay period has not - // expired. + // * + // Instance.DeregistrationInProgress - The target instance is in the process of + // being deregistered and the deregistration delay period has not expired. InstanceHealthReason InstanceHealthReason // The name of the Lightsail instance for which you are requesting health check @@ -1444,38 +1441,38 @@ type InstancePortInfo struct { // The first port in a range of open ports on an instance. Allowed ports: // - // * - // TCP and UDP - 0 to 65535 + // * TCP + // and UDP - 0 to 65535 // - // * ICMP - The ICMP type. For example, specify 8 as - // the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. - // For more information, see Control Messages + // * ICMP - The ICMP type. For example, specify 8 as the + // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For + // more information, see Control Messages // (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) // on Wikipedia. FromPort *int32 // The IP protocol name. The name can be one of the following: // - // * tcp - + // * tcp - // Transmission Control Protocol (TCP) provides reliable, ordered, and // error-checked delivery of streamed data between applications running on hosts // communicating by an IP network. If you have an application that doesn't require // reliable data stream service, use UDP instead. // - // * all - All transport layer + // * all - All transport layer // protocol types. For more general information, see Transport layer // (https://en.wikipedia.org/wiki/Transport_layer) on Wikipedia. // - // * udp - With - // User Datagram Protocol (UDP), computer applications can send messages (or - // datagrams) to other hosts on an Internet Protocol (IP) network. Prior - // communications are not required to set up transmission channels or data paths. - // Applications that don't require reliable data stream service can use UDP, which - // provides a connectionless datagram service that emphasizes reduced latency over + // * udp - With User + // Datagram Protocol (UDP), computer applications can send messages (or datagrams) + // to other hosts on an Internet Protocol (IP) network. Prior communications are + // not required to set up transmission channels or data paths. Applications that + // don't require reliable data stream service can use UDP, which provides a + // connectionless datagram service that emphasizes reduced latency over // reliability. If you do require reliable data stream service, use TCP instead. // - // - // * icmp - Internet Control Message Protocol (ICMP) is used to send error messages + // * + // icmp - Internet Control Message Protocol (ICMP) is used to send error messages // and operational information indicating success or failure when communicating // with an instance. For example, an error is indicated when an instance could not // be reached. When you specify icmp as the protocol, you must specify the ICMP @@ -1484,12 +1481,12 @@ type InstancePortInfo struct { // The last port in a range of open ports on an instance. Allowed ports: // - // * TCP - // and UDP - 0 to 65535 + // * TCP and + // UDP - 0 to 65535 // - // * ICMP - The ICMP code. For example, specify 8 as the - // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For - // more information, see Control Messages + // * ICMP - The ICMP code. For example, specify 8 as the fromPort + // (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more + // information, see Control Messages // (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) // on Wikipedia. ToPort *int32 @@ -1515,38 +1512,38 @@ type InstancePortState struct { // The first port in a range of open ports on an instance. Allowed ports: // - // * - // TCP and UDP - 0 to 65535 + // * TCP + // and UDP - 0 to 65535 // - // * ICMP - The ICMP type. For example, specify 8 as - // the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. - // For more information, see Control Messages + // * ICMP - The ICMP type. For example, specify 8 as the + // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For + // more information, see Control Messages // (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) // on Wikipedia. FromPort *int32 // The IP protocol name. The name can be one of the following: // - // * tcp - + // * tcp - // Transmission Control Protocol (TCP) provides reliable, ordered, and // error-checked delivery of streamed data between applications running on hosts // communicating by an IP network. If you have an application that doesn't require // reliable data stream service, use UDP instead. // - // * all - All transport layer + // * all - All transport layer // protocol types. For more general information, see Transport layer // (https://en.wikipedia.org/wiki/Transport_layer) on Wikipedia. // - // * udp - With - // User Datagram Protocol (UDP), computer applications can send messages (or - // datagrams) to other hosts on an Internet Protocol (IP) network. Prior - // communications are not required to set up transmission channels or data paths. - // Applications that don't require reliable data stream service can use UDP, which - // provides a connectionless datagram service that emphasizes reduced latency over + // * udp - With User + // Datagram Protocol (UDP), computer applications can send messages (or datagrams) + // to other hosts on an Internet Protocol (IP) network. Prior communications are + // not required to set up transmission channels or data paths. Applications that + // don't require reliable data stream service can use UDP, which provides a + // connectionless datagram service that emphasizes reduced latency over // reliability. If you do require reliable data stream service, use TCP instead. // - // - // * icmp - Internet Control Message Protocol (ICMP) is used to send error messages + // * + // icmp - Internet Control Message Protocol (ICMP) is used to send error messages // and operational information indicating success or failure when communicating // with an instance. For example, an error is indicated when an instance could not // be reached. When you specify icmp as the protocol, you must specify the ICMP @@ -1559,12 +1556,12 @@ type InstancePortState struct { // The last port in a range of open ports on an instance. Allowed ports: // - // * TCP - // and UDP - 0 to 65535 + // * TCP and + // UDP - 0 to 65535 // - // * ICMP - The ICMP code. For example, specify 8 as the - // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For - // more information, see Control Messages + // * ICMP - The ICMP code. For example, specify 8 as the fromPort + // (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more + // information, see Control Messages // (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) // on Wikipedia. ToPort *int32 @@ -1848,10 +1845,10 @@ type LoadBalancerTlsCertificate struct { // The validation failure reason, if any, of the certificate. The following failure // reasons are possible: // - // * NO_AVAILABLE_CONTACTS - This failure applies to - // email validation, which is not available for Lightsail certificates. + // * NO_AVAILABLE_CONTACTS - This failure applies to email + // validation, which is not available for Lightsail certificates. // - // * + // * // ADDITIONAL_VERIFICATION_REQUIRED - Lightsail requires additional information to // process this certificate request. This can happen as a fraud-protection measure, // such as when the domain ranks within the Alexa top 1000 websites. To provide the @@ -1860,22 +1857,22 @@ type LoadBalancerTlsCertificate struct { // request a certificate for Amazon-owned domain names such as those ending in // amazonaws.com, cloudfront.net, or elasticbeanstalk.com. // - // * - // DOMAIN_NOT_ALLOWED - One or more of the domain names in the certificate request - // was reported as an unsafe domain by VirusTotal - // (https://www.virustotal.com/gui/home/url). To correct the problem, search for - // your domain name on the VirusTotal (https://www.virustotal.com/gui/home/url) - // website. If your domain is reported as suspicious, see Google Help for Hacked - // Websites (https://www.google.com/webmasters/hacked/?hl=en) to learn what you can - // do. If you believe that the result is a false positive, notify the organization - // that is reporting the domain. VirusTotal is an aggregate of several antivirus - // and URL scanners and cannot remove your domain from a block list itself. After - // you correct the problem and the VirusTotal registry has been updated, request a - // new certificate. If you see this error and your domain is not included in the + // * DOMAIN_NOT_ALLOWED - + // One or more of the domain names in the certificate request was reported as an + // unsafe domain by VirusTotal (https://www.virustotal.com/gui/home/url). To + // correct the problem, search for your domain name on the VirusTotal + // (https://www.virustotal.com/gui/home/url) website. If your domain is reported as + // suspicious, see Google Help for Hacked Websites + // (https://www.google.com/webmasters/hacked/?hl=en) to learn what you can do. If + // you believe that the result is a false positive, notify the organization that is + // reporting the domain. VirusTotal is an aggregate of several antivirus and URL + // scanners and cannot remove your domain from a block list itself. After you + // correct the problem and the VirusTotal registry has been updated, request a new + // certificate. If you see this error and your domain is not included in the // VirusTotal list, visit the AWS Support Center // (https://console.aws.amazon.com/support/home) and create a case. // - // * + // * // INVALID_PUBLIC_DOMAIN - One or more of the domain names in the certificate // request is not valid. Typically, this is because a domain name in the request is // not a valid top-level domain. Try to request a certificate again, correcting any @@ -1884,10 +1881,10 @@ type LoadBalancerTlsCertificate struct { // cannot request a certificate for example.invalidpublicdomain because // invalidpublicdomain is not a valid top-level domain. // - // * OTHER - Typically, - // this failure occurs when there is a typographical error in one or more of the - // domain names in the certificate request. Try to request a certificate again, - // correcting any spelling errors or typos that were in the failed request. + // * OTHER - Typically, this + // failure occurs when there is a typographical error in one or more of the domain + // names in the certificate request. Try to request a certificate again, correcting + // any spelling errors or typos that were in the failed request. FailureReason LoadBalancerTlsCertificateFailureReason // When true, the SSL/TLS certificate is attached to the Lightsail load balancer. @@ -1923,32 +1920,31 @@ type LoadBalancerTlsCertificate struct { // The resource type (e.g., LoadBalancerTlsCertificate). // - // * Instance - A - // Lightsail instance (a virtual private server) - // - // * StaticIp - A static IP - // address - // - // * KeyPair - The key pair used to connect to a Lightsail instance - // + // * Instance - A Lightsail + // instance (a virtual private server) // - // * InstanceSnapshot - A Lightsail instance snapshot + // * StaticIp - A static IP address // - // * Domain - A DNS zone + // * KeyPair + // - The key pair used to connect to a Lightsail instance // + // * InstanceSnapshot - A + // Lightsail instance snapshot // - // * PeeredVpc - A peered VPC + // * Domain - A DNS zone // - // * LoadBalancer - A Lightsail load balancer + // * PeeredVpc - A peered + // VPC // + // * LoadBalancer - A Lightsail load balancer // - // * LoadBalancerTlsCertificate - An SSL/TLS certificate associated with a - // Lightsail load balancer + // * LoadBalancerTlsCertificate - + // An SSL/TLS certificate associated with a Lightsail load balancer // - // * Disk - A Lightsail block storage disk + // * Disk - A + // Lightsail block storage disk // - // * - // DiskSnapshot - A block storage disk snapshot + // * DiskSnapshot - A block storage disk snapshot ResourceType ResourceType // The reason the certificate was revoked. This value is present only when the @@ -2021,24 +2017,23 @@ type LoadBalancerTlsCertificateDomainValidationRecord struct { // certificate. The renewal status of the certificate. The following renewal status // are possible: // -// * PendingAutoRenewal - Lightsail is attempting to -// automatically validate the domain names in the certificate. No further action is -// required. +// * PendingAutoRenewal - Lightsail is attempting to automatically +// validate the domain names in the certificate. No further action is required. // -// * PendingValidation - Lightsail couldn't automatically validate -// one or more domain names in the certificate. You must take action to validate -// these domain names or the certificate won't be renewed. If you used DNS -// validation, check to make sure your certificate's domain validation records -// exist in your domain's DNS, and that your certificate remains in use. +// * +// PendingValidation - Lightsail couldn't automatically validate one or more domain +// names in the certificate. You must take action to validate these domain names or +// the certificate won't be renewed. If you used DNS validation, check to make sure +// your certificate's domain validation records exist in your domain's DNS, and +// that your certificate remains in use. // -// * -// Success - All domain names in the certificate are validated, and Lightsail -// renewed the certificate. No further action is required. +// * Success - All domain names in the +// certificate are validated, and Lightsail renewed the certificate. No further +// action is required. // -// * Failed - One or -// more domain names were not validated before the certificate expired, and -// Lightsail did not renew the certificate. You can request a new certificate using -// the CreateCertificate action. +// * Failed - One or more domain names were not validated +// before the certificate expired, and Lightsail did not renew the certificate. You +// can request a new certificate using the CreateCertificate action. type LoadBalancerTlsCertificateRenewalSummary struct { // Contains information about the validation of each domain name in the @@ -2050,23 +2045,23 @@ type LoadBalancerTlsCertificateRenewalSummary struct { // The renewal status of the certificate. The following renewal status are // possible: // - // * PendingAutoRenewal - Lightsail is attempting to automatically + // * PendingAutoRenewal - Lightsail is attempting to automatically // validate the domain names of the certificate. No further action is required. // + // * + // PendingValidation - Lightsail couldn't automatically validate one or more domain + // names of the certificate. You must take action to validate these domain names or + // the certificate won't be renewed. Check to make sure your certificate's domain + // validation records exist in your domain's DNS, and that your certificate remains + // in use. // - // * PendingValidation - Lightsail couldn't automatically validate one or more - // domain names of the certificate. You must take action to validate these domain - // names or the certificate won't be renewed. Check to make sure your certificate's - // domain validation records exist in your domain's DNS, and that your certificate - // remains in use. + // * Success - All domain names in the certificate are validated, and + // Lightsail renewed the certificate. No further action is required. // - // * Success - All domain names in the certificate are - // validated, and Lightsail renewed the certificate. No further action is - // required. - // - // * Failed - One or more domain names were not validated before the - // certificate expired, and Lightsail did not renew the certificate. You can - // request a new certificate using the CreateCertificate action. + // * Failed - + // One or more domain names were not validated before the certificate expired, and + // Lightsail did not renew the certificate. You can request a new certificate using + // the CreateCertificate action. RenewalStatus LoadBalancerTlsCertificateRenewalStatus } @@ -2265,10 +2260,10 @@ type PortInfo struct { // connect to an instance through the ports, and the protocol. Lightsail supports // IPv4 addresses. Examples: // - // * To allow the IP address 192.0.2.44, specify + // * To allow the IP address 192.0.2.44, specify // 192.0.2.44 or 192.0.2.44/32. // - // * To allow the IP addresses 192.0.2.0 to + // * To allow the IP addresses 192.0.2.0 to // 192.0.2.255, specify 192.0.2.0/24. // // For more information about CIDR block @@ -2279,38 +2274,38 @@ type PortInfo struct { // The first port in a range of open ports on an instance. Allowed ports: // - // * - // TCP and UDP - 0 to 65535 + // * TCP + // and UDP - 0 to 65535 // - // * ICMP - The ICMP type. For example, specify 8 as - // the fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. - // For more information, see Control Messages + // * ICMP - The ICMP type. For example, specify 8 as the + // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For + // more information, see Control Messages // (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) // on Wikipedia. FromPort *int32 // The IP protocol name. The name can be one of the following: // - // * tcp - + // * tcp - // Transmission Control Protocol (TCP) provides reliable, ordered, and // error-checked delivery of streamed data between applications running on hosts // communicating by an IP network. If you have an application that doesn't require // reliable data stream service, use UDP instead. // - // * all - All transport layer + // * all - All transport layer // protocol types. For more general information, see Transport layer // (https://en.wikipedia.org/wiki/Transport_layer) on Wikipedia. // - // * udp - With - // User Datagram Protocol (UDP), computer applications can send messages (or - // datagrams) to other hosts on an Internet Protocol (IP) network. Prior - // communications are not required to set up transmission channels or data paths. - // Applications that don't require reliable data stream service can use UDP, which - // provides a connectionless datagram service that emphasizes reduced latency over + // * udp - With User + // Datagram Protocol (UDP), computer applications can send messages (or datagrams) + // to other hosts on an Internet Protocol (IP) network. Prior communications are + // not required to set up transmission channels or data paths. Applications that + // don't require reliable data stream service can use UDP, which provides a + // connectionless datagram service that emphasizes reduced latency over // reliability. If you do require reliable data stream service, use TCP instead. // - // - // * icmp - Internet Control Message Protocol (ICMP) is used to send error messages + // * + // icmp - Internet Control Message Protocol (ICMP) is used to send error messages // and operational information indicating success or failure when communicating // with an instance. For example, an error is indicated when an instance could not // be reached. When you specify icmp as the protocol, you must specify the ICMP @@ -2319,12 +2314,12 @@ type PortInfo struct { // The last port in a range of open ports on an instance. Allowed ports: // - // * TCP - // and UDP - 0 to 65535 + // * TCP and + // UDP - 0 to 65535 // - // * ICMP - The ICMP code. For example, specify 8 as the - // fromPort (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For - // more information, see Control Messages + // * ICMP - The ICMP code. For example, specify 8 as the fromPort + // (ICMP type), and -1 as the toPort (ICMP code), to enable ICMP Ping. For more + // information, see Control Messages // (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) // on Wikipedia. ToPort *int32 @@ -2654,23 +2649,23 @@ type RenewalSummary struct { // The renewal status of the certificate. The following renewal status are // possible: // - // * PendingAutoRenewal - Lightsail is attempting to automatically + // * PendingAutoRenewal - Lightsail is attempting to automatically // validate the domain names of the certificate. No further action is required. // + // * + // PendingValidation - Lightsail couldn't automatically validate one or more domain + // names of the certificate. You must take action to validate these domain names or + // the certificate won't be renewed. Check to make sure your certificate's domain + // validation records exist in your domain's DNS, and that your certificate remains + // in use. // - // * PendingValidation - Lightsail couldn't automatically validate one or more - // domain names of the certificate. You must take action to validate these domain - // names or the certificate won't be renewed. Check to make sure your certificate's - // domain validation records exist in your domain's DNS, and that your certificate - // remains in use. - // - // * Success - All domain names in the certificate are - // validated, and Lightsail renewed the certificate. No further action is - // required. + // * Success - All domain names in the certificate are validated, and + // Lightsail renewed the certificate. No further action is required. // - // * Failed - One or more domain names were not validated before the - // certificate expired, and Lightsail did not renew the certificate. You can - // request a new certificate using the CreateCertificate action. + // * Failed - + // One or more domain names were not validated before the certificate expired, and + // Lightsail did not renew the certificate. You can request a new certificate using + // the CreateCertificate action. RenewalStatus RenewalStatus // The reason for the renewal status of the certificate. diff --git a/service/machinelearning/api_op_CreateDataSourceFromRDS.go b/service/machinelearning/api_op_CreateDataSourceFromRDS.go index e4fb10f36da..d9b2af99126 100644 --- a/service/machinelearning/api_op_CreateDataSourceFromRDS.go +++ b/service/machinelearning/api_op_CreateDataSourceFromRDS.go @@ -47,53 +47,53 @@ type CreateDataSourceFromRDSInput struct { // The data specification of an Amazon RDS DataSource: // - // * DatabaseInformation - // - + // * DatabaseInformation - // - // * DatabaseName - The name of the Amazon RDS database. + // * + // DatabaseName - The name of the Amazon RDS database. // - // * - // InstanceIdentifier - A unique identifier for the Amazon RDS database - // instance. + // * InstanceIdentifier - A + // unique identifier for the Amazon RDS database instance. // - // * DatabaseCredentials - AWS Identity and Access Management (IAM) - // credentials that are used to connect to the Amazon RDS database. + // * DatabaseCredentials - + // AWS Identity and Access Management (IAM) credentials that are used to connect to + // the Amazon RDS database. // - // * - // ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 - // instance to carry out the copy task from Amazon RDS to Amazon Simple Storage - // Service (Amazon S3). For more information, see Role templates + // * ResourceRole - A role + // (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the + // copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more + // information, see Role templates // (https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) // for data pipelines. // - // * ServiceRole - A role (DataPipelineDefaultRole) - // assumed by the AWS Data Pipeline service to monitor the progress of the copy - // task from Amazon RDS to Amazon S3. For more information, see Role templates + // * ServiceRole - A role (DataPipelineDefaultRole) assumed by + // the AWS Data Pipeline service to monitor the progress of the copy task from + // Amazon RDS to Amazon S3. For more information, see Role templates // (https://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html) // for data pipelines. // - // * SecurityInfo - The security information to use to - // access an RDS DB instance. You need to set up appropriate ingress rules for the + // * SecurityInfo - The security information to use to access + // an RDS DB instance. You need to set up appropriate ingress rules for the // security entity IDs provided to allow access to the Amazon RDS instance. Specify // a [SubnetId, SecurityGroupIds] pair for a VPC-based RDS DB instance. // - // * + // * // SelectSqlQuery - A query that is used to retrieve the observation data for the // Datasource. // - // * S3StagingLocation - The Amazon S3 location for staging Amazon - // RDS data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in - // this location. + // * S3StagingLocation - The Amazon S3 location for staging Amazon RDS + // data. The data retrieved from Amazon RDS using SelectSqlQuery is stored in this + // location. // - // * DataSchemaUri - The Amazon S3 location of the - // DataSchema. + // * DataSchemaUri - The Amazon S3 location of the DataSchema. // - // * DataSchema - A JSON string representing the schema. This is - // not required if DataSchemaUri is specified. + // * + // DataSchema - A JSON string representing the schema. This is not required if + // DataSchemaUri is specified. // - // * DataRearrangement - A JSON - // string that represents the splitting and rearrangement requirements for the - // Datasource. Sample - "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" + // * DataRearrangement - A JSON string that represents + // the splitting and rearrangement requirements for the Datasource. Sample - + // "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" // // This member is required. RDSData *types.RDSDataSpec diff --git a/service/machinelearning/api_op_CreateDataSourceFromRedshift.go b/service/machinelearning/api_op_CreateDataSourceFromRedshift.go index 212e7edbbb3..c814e91f71a 100644 --- a/service/machinelearning/api_op_CreateDataSourceFromRedshift.go +++ b/service/machinelearning/api_op_CreateDataSourceFromRedshift.go @@ -61,35 +61,35 @@ type CreateDataSourceFromRedshiftInput struct { // The data specification of an Amazon Redshift DataSource: // - // * - // DatabaseInformation - + // * DatabaseInformation + // - // - // * DatabaseName - The name of the Amazon Redshift - // database. + // * DatabaseName - The name of the Amazon Redshift database. // - // * ClusterIdentifier - The unique ID for the Amazon Redshift - // cluster. + // * + // ClusterIdentifier - The unique ID for the Amazon Redshift cluster. // - // * DatabaseCredentials - The AWS Identity and Access Management - // (IAM) credentials that are used to connect to the Amazon Redshift database. + // * + // DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials + // that are used to connect to the Amazon Redshift database. // + // * SelectSqlQuery - + // The query that is used to retrieve the observation data for the Datasource. // - // * SelectSqlQuery - The query that is used to retrieve the observation data for - // the Datasource. + // * + // S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for + // staging Amazon Redshift data. The data retrieved from Amazon Redshift using the + // SelectSqlQuery query is stored in this location. // - // * S3StagingLocation - The Amazon Simple Storage Service - // (Amazon S3) location for staging Amazon Redshift data. The data retrieved from - // Amazon Redshift using the SelectSqlQuery query is stored in this location. + // * DataSchemaUri - The Amazon + // S3 location of the DataSchema. // + // * DataSchema - A JSON string representing the + // schema. This is not required if DataSchemaUri is specified. // - // * DataSchemaUri - The Amazon S3 location of the DataSchema. - // - // * DataSchema - - // A JSON string representing the schema. This is not required if DataSchemaUri is - // specified. - // - // * DataRearrangement - A JSON string that represents the - // splitting and rearrangement requirements for the DataSource. Sample - + // * DataRearrangement + // - A JSON string that represents the splitting and rearrangement requirements for + // the DataSource. Sample - // "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" // // This member is required. @@ -98,11 +98,11 @@ type CreateDataSourceFromRedshiftInput struct { // A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on // behalf of the user to create the following: // - // * A security group to allow - // Amazon ML to execute the SelectSqlQuery query on an Amazon Redshift cluster - // + // * A security group to allow Amazon + // ML to execute the SelectSqlQuery query on an Amazon Redshift cluster // - // * An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the + // * An + // Amazon S3 bucket policy to grant Amazon ML read/write permissions on the // S3StagingLocation // // This member is required. diff --git a/service/machinelearning/api_op_CreateDataSourceFromS3.go b/service/machinelearning/api_op_CreateDataSourceFromS3.go index 3c20497561c..a4c31938b22 100644 --- a/service/machinelearning/api_op_CreateDataSourceFromS3.go +++ b/service/machinelearning/api_op_CreateDataSourceFromS3.go @@ -58,18 +58,18 @@ type CreateDataSourceFromS3Input struct { // The data specification of a DataSource: // - // * DataLocationS3 - The Amazon S3 + // * DataLocationS3 - The Amazon S3 // location of the observation data. // - // * DataSchemaLocationS3 - The Amazon S3 + // * DataSchemaLocationS3 - The Amazon S3 // location of the DataSchema. // - // * DataSchema - A JSON string representing the + // * DataSchema - A JSON string representing the // schema. This is not required if DataSchemaUri is specified. // - // * - // DataRearrangement - A JSON string that represents the splitting and - // rearrangement requirements for the Datasource. Sample - + // * DataRearrangement + // - A JSON string that represents the splitting and rearrangement requirements for + // the Datasource. Sample - // "{\"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}" // // This member is required. diff --git a/service/machinelearning/api_op_CreateMLModel.go b/service/machinelearning/api_op_CreateMLModel.go index 292ac2287d3..1d63c782c0a 100644 --- a/service/machinelearning/api_op_CreateMLModel.go +++ b/service/machinelearning/api_op_CreateMLModel.go @@ -47,17 +47,17 @@ type CreateMLModelInput struct { // The category of supervised learning that this MLModel will address. Choose from // the following types: // - // * Choose REGRESSION if the MLModel will be used to - // predict a numeric value. + // * Choose REGRESSION if the MLModel will be used to predict + // a numeric value. // - // * Choose BINARY if the MLModel result has two - // possible values. + // * Choose BINARY if the MLModel result has two possible + // values. // - // * Choose MULTICLASS if the MLModel result has a limited - // number of values. + // * Choose MULTICLASS if the MLModel result has a limited number of + // values. // - // For more information, see the Amazon Machine Learning - // Developer Guide (https://docs.aws.amazon.com/machine-learning/latest/dg). + // For more information, see the Amazon Machine Learning Developer Guide + // (https://docs.aws.amazon.com/machine-learning/latest/dg). // // This member is required. MLModelType types.MLModelType @@ -74,30 +74,30 @@ type CreateMLModelInput struct { // map of key-value pairs. The following is the current set of training // parameters: // - // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the + // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the // model. Depending on the input data, the size of the model might affect its // performance. The value is an integer that ranges from 100000 to 2147483648. The // default value is 33554432. // - // * sgd.maxPasses - The number of times that the + // * sgd.maxPasses - The number of times that the // training process traverses the observations to build the MLModel. The value is // an integer that ranges from 1 to 100. The default value is 10. // - // * + // * // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling the // data improves a model's ability to find the optimal solution for a variety of // data types. The valid values are auto and none. The default value is none. We // strongly recommend that you shuffle your data. // - // * sgd.l1RegularizationAmount - // - The coefficient regularization L1 norm. It controls overfitting the data by + // * sgd.l1RegularizationAmount - + // The coefficient regularization L1 norm. It controls overfitting the data by // penalizing large coefficients. This tends to drive coefficients to zero, // resulting in a sparse feature set. If you use this parameter, start by // specifying a small value, such as 1.0E-08. The value is a double that ranges // from 0 to MAX_DOUBLE. The default is to not use L1 normalization. This parameter // can't be used when L2 is specified. Use this parameter sparingly. // - // * + // * // sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It controls // overfitting the data by penalizing large coefficients. This tends to drive // coefficients to small, nonzero values. If you use this parameter, start by diff --git a/service/machinelearning/api_op_DescribeBatchPredictions.go b/service/machinelearning/api_op_DescribeBatchPredictions.go index db478911a40..7ce308455da 100644 --- a/service/machinelearning/api_op_DescribeBatchPredictions.go +++ b/service/machinelearning/api_op_DescribeBatchPredictions.go @@ -36,29 +36,28 @@ type DescribeBatchPredictionsInput struct { // Use one of the following variables to filter a list of BatchPrediction: // - // * + // * // CreatedAt - Sets the search criteria to the BatchPrediction creation date. // + // * + // Status - Sets the search criteria to the BatchPrediction status. // - // * Status - Sets the search criteria to the BatchPrediction status. + // * Name - Sets + // the search criteria to the contents of the BatchPredictionName. // - // * Name - - // Sets the search criteria to the contents of the BatchPredictionName. + // * IAMUser - + // Sets the search criteria to the user account that invoked the BatchPrediction + // creation. // - // * - // IAMUser - Sets the search criteria to the user account that invoked the - // BatchPrediction creation. + // * MLModelId - Sets the search criteria to the MLModel used in the + // BatchPrediction. // - // * MLModelId - Sets the search criteria to the - // MLModel used in the BatchPrediction. + // * DataSourceId - Sets the search criteria to the DataSource + // used in the BatchPrediction. // - // * DataSourceId - Sets the search - // criteria to the DataSource used in the BatchPrediction. - // - // * DataURI - Sets - // the search criteria to the data file(s) used in the BatchPrediction. The URL can - // identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket - // or directory. + // * DataURI - Sets the search criteria to the data + // file(s) used in the BatchPrediction. The URL can identify either a file or an + // Amazon Simple Storage Solution (Amazon S3) bucket or directory. FilterVariable types.BatchPredictionFilterVariable // The greater than or equal to operator. The BatchPrediction results will have @@ -95,21 +94,21 @@ type DescribeBatchPredictionsInput struct { // Name2014-09-09-HolidayGiftMailer. To search for this BatchPrediction, select // Name for the FilterVariable and any of the following strings for the Prefix: // + // * + // 2014-09 // - // * 2014-09 - // - // * 2014-09-09 + // * 2014-09-09 // - // * 2014-09-09-Holiday + // * 2014-09-09-Holiday Prefix *string // A two-value parameter that determines the sequence of the resulting list of // MLModels. // - // * asc - Arranges the list in ascending order (A-Z, 0-9). + // * asc - Arranges the list in ascending order (A-Z, 0-9). // - // * - // dsc - Arranges the list in descending order (Z-A, 9-0). + // * dsc - + // Arranges the list in descending order (Z-A, 9-0). // // Results are sorted by // FilterVariable. diff --git a/service/machinelearning/api_op_DescribeDataSources.go b/service/machinelearning/api_op_DescribeDataSources.go index 3dfb0771751..f5c2f9cf780 100644 --- a/service/machinelearning/api_op_DescribeDataSources.go +++ b/service/machinelearning/api_op_DescribeDataSources.go @@ -35,22 +35,22 @@ type DescribeDataSourcesInput struct { // Use one of the following variables to filter a list of DataSource: // - // * - // CreatedAt - Sets the search criteria to DataSource creation dates. + // * CreatedAt + // - Sets the search criteria to DataSource creation dates. // - // * Status - // - Sets the search criteria to DataSource statuses. + // * Status - Sets the + // search criteria to DataSource statuses. // - // * Name - Sets the search - // criteria to the contents of DataSourceName. + // * Name - Sets the search criteria to + // the contents of DataSourceName. // - // * DataUri - Sets the search - // criteria to the URI of data files used to create the DataSource. The URI can - // identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or - // directory. + // * DataUri - Sets the search criteria to the URI + // of data files used to create the DataSource. The URI can identify either a file + // or an Amazon Simple Storage Service (Amazon S3) bucket or directory. // - // * IAMUser - Sets the search criteria to the user account that - // invoked the DataSource creation. + // * IAMUser + // - Sets the search criteria to the user account that invoked the DataSource + // creation. FilterVariable types.DataSourceFilterVariable // The greater than or equal to operator. The DataSource results will have @@ -86,21 +86,20 @@ type DescribeDataSourcesInput struct { // for this DataSource, select Name for the FilterVariable and any of the following // strings for the Prefix: // - // * 2014-09 + // * 2014-09 // - // * 2014-09-09 + // * 2014-09-09 // - // * - // 2014-09-09-Holiday + // * 2014-09-09-Holiday Prefix *string // A two-value parameter that determines the sequence of the resulting list of // DataSource. // - // * asc - Arranges the list in ascending order (A-Z, 0-9). + // * asc - Arranges the list in ascending order (A-Z, 0-9). // - // * - // dsc - Arranges the list in descending order (Z-A, 9-0). + // * dsc - + // Arranges the list in descending order (Z-A, 9-0). // // Results are sorted by // FilterVariable. diff --git a/service/machinelearning/api_op_DescribeEvaluations.go b/service/machinelearning/api_op_DescribeEvaluations.go index a428cb02204..196d67eab65 100644 --- a/service/machinelearning/api_op_DescribeEvaluations.go +++ b/service/machinelearning/api_op_DescribeEvaluations.go @@ -36,28 +36,28 @@ type DescribeEvaluationsInput struct { // Use one of the following variable to filter a list of Evaluation objects: // - // * + // * // CreatedAt - Sets the search criteria to the Evaluation creation date. // - // * - // Status - Sets the search criteria to the Evaluation status. + // * Status + // - Sets the search criteria to the Evaluation status. // - // * Name - Sets - // the search criteria to the contents of EvaluationName. + // * Name - Sets the search + // criteria to the contents of EvaluationName. // - // * IAMUser - Sets the - // search criteria to the user account that invoked an Evaluation. + // * IAMUser - Sets the search + // criteria to the user account that invoked an Evaluation. // - // * MLModelId - // - Sets the search criteria to the MLModel that was evaluated. + // * MLModelId - Sets the + // search criteria to the MLModel that was evaluated. // - // * - // DataSourceId - Sets the search criteria to the DataSource used in Evaluation. + // * DataSourceId - Sets the + // search criteria to the DataSource used in Evaluation. // - // - // * DataUri - Sets the search criteria to the data file(s) used in Evaluation. The - // URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) - // bucket or directory. + // * DataUri - Sets the + // search criteria to the data file(s) used in Evaluation. The URL can identify + // either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or + // directory. FilterVariable types.EvaluationFilterVariable // The greater than or equal to operator. The Evaluation results will have @@ -93,21 +93,20 @@ type DescribeEvaluationsInput struct { // search for this Evaluation, select Name for the FilterVariable and any of the // following strings for the Prefix: // - // * 2014-09 + // * 2014-09 // - // * 2014-09-09 + // * 2014-09-09 // - // * - // 2014-09-09-Holiday + // * 2014-09-09-Holiday Prefix *string // A two-value parameter that determines the sequence of the resulting list of // Evaluation. // - // * asc - Arranges the list in ascending order (A-Z, 0-9). + // * asc - Arranges the list in ascending order (A-Z, 0-9). // - // * - // dsc - Arranges the list in descending order (Z-A, 9-0). + // * dsc - + // Arranges the list in descending order (Z-A, 9-0). // // Results are sorted by // FilterVariable. diff --git a/service/machinelearning/api_op_DescribeMLModels.go b/service/machinelearning/api_op_DescribeMLModels.go index d5717269e77..056701a3efe 100644 --- a/service/machinelearning/api_op_DescribeMLModels.go +++ b/service/machinelearning/api_op_DescribeMLModels.go @@ -35,35 +35,34 @@ type DescribeMLModelsInput struct { // Use one of the following variables to filter a list of MLModel: // - // * CreatedAt - // - Sets the search criteria to MLModel creation date. + // * CreatedAt - + // Sets the search criteria to MLModel creation date. // - // * Status - Sets the - // search criteria to MLModel status. + // * Status - Sets the search + // criteria to MLModel status. // - // * Name - Sets the search criteria to the - // contents of MLModelName. + // * Name - Sets the search criteria to the contents + // of MLModelName. // - // * IAMUser - Sets the search criteria to the user - // account that invoked the MLModel creation. + // * IAMUser - Sets the search criteria to the user account that + // invoked the MLModel creation. // - // * TrainingDataSourceId - Sets - // the search criteria to the DataSource used to train one or more MLModel. + // * TrainingDataSourceId - Sets the search criteria + // to the DataSource used to train one or more MLModel. // - // * - // RealtimeEndpointStatus - Sets the search criteria to the MLModel real-time - // endpoint status. + // * RealtimeEndpointStatus - + // Sets the search criteria to the MLModel real-time endpoint status. // - // * MLModelType - Sets the search criteria to MLModel type: - // binary, regression, or multi-class. + // * + // MLModelType - Sets the search criteria to MLModel type: binary, regression, or + // multi-class. // - // * Algorithm - Sets the search criteria - // to the algorithm that the MLModel uses. + // * Algorithm - Sets the search criteria to the algorithm that the + // MLModel uses. // - // * TrainingDataURI - Sets the search - // criteria to the data file(s) used in training a MLModel. The URL can identify - // either a file or an Amazon Simple Storage Service (Amazon S3) bucket or - // directory. + // * TrainingDataURI - Sets the search criteria to the data file(s) + // used in training a MLModel. The URL can identify either a file or an Amazon + // Simple Storage Service (Amazon S3) bucket or directory. FilterVariable types.MLModelFilterVariable // The greater than or equal to operator. The MLModel results will have @@ -99,21 +98,20 @@ type DescribeMLModelsInput struct { // for this MLModel, select Name for the FilterVariable and any of the following // strings for the Prefix: // - // * 2014-09 + // * 2014-09 // - // * 2014-09-09 + // * 2014-09-09 // - // * - // 2014-09-09-Holiday + // * 2014-09-09-Holiday Prefix *string // A two-value parameter that determines the sequence of the resulting list of // MLModel. // - // * asc - Arranges the list in ascending order (A-Z, 0-9). + // * asc - Arranges the list in ascending order (A-Z, 0-9). // - // * - // dsc - Arranges the list in descending order (Z-A, 9-0). + // * dsc - + // Arranges the list in descending order (Z-A, 9-0). // // Results are sorted by // FilterVariable. diff --git a/service/machinelearning/api_op_GetBatchPrediction.go b/service/machinelearning/api_op_GetBatchPrediction.go index f053ee88dd2..b51d523eca6 100644 --- a/service/machinelearning/api_op_GetBatchPrediction.go +++ b/service/machinelearning/api_op_GetBatchPrediction.go @@ -104,21 +104,21 @@ type GetBatchPredictionOutput struct { // The status of the BatchPrediction, which can be one of the following values: // - // - // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate + // * + // PENDING - Amazon Machine Learning (Amazon ML) submitted a request to generate // batch predictions. // - // * INPROGRESS - The batch predictions are in progress. - // + // * INPROGRESS - The batch predictions are in progress. // - // * FAILED - The request to perform a batch prediction did not run to completion. - // It is not usable. + // * + // FAILED - The request to perform a batch prediction did not run to completion. It + // is not usable. // - // * COMPLETED - The batch prediction process completed + // * COMPLETED - The batch prediction process completed // successfully. // - // * DELETED - The BatchPrediction is marked as deleted. It is - // not usable. + // * DELETED - The BatchPrediction is marked as deleted. It is not + // usable. Status types.EntityStatus // The number of total records that Amazon Machine Learning saw while processing diff --git a/service/machinelearning/api_op_GetDataSource.go b/service/machinelearning/api_op_GetDataSource.go index 3923c03217e..5c225f15023 100644 --- a/service/machinelearning/api_op_GetDataSource.go +++ b/service/machinelearning/api_op_GetDataSource.go @@ -123,19 +123,19 @@ type GetDataSourceOutput struct { // The current status of the DataSource. This element can have one of the following // values: // - // * PENDING - Amazon ML submitted a request to create a DataSource. + // * PENDING - Amazon ML submitted a request to create a DataSource. // + // * + // INPROGRESS - The creation process is underway. // - // * INPROGRESS - The creation process is underway. + // * FAILED - The request to create + // a DataSource did not run to completion. It is not usable. // - // * FAILED - The request to - // create a DataSource did not run to completion. It is not usable. + // * COMPLETED - The + // creation process completed successfully. // - // * - // COMPLETED - The creation process completed successfully. - // - // * DELETED - The - // DataSource is marked as deleted. It is not usable. + // * DELETED - The DataSource is marked + // as deleted. It is not usable. Status types.EntityStatus // Metadata pertaining to the operation's result. diff --git a/service/machinelearning/api_op_GetEvaluation.go b/service/machinelearning/api_op_GetEvaluation.go index 0fc84e3f1f3..b34740a2839 100644 --- a/service/machinelearning/api_op_GetEvaluation.go +++ b/service/machinelearning/api_op_GetEvaluation.go @@ -88,15 +88,15 @@ type GetEvaluationOutput struct { // the DataSource. One of the following metric is returned based on the type of the // MLModel: // - // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) + // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) // technique to measure performance. // - // * RegressionRMSE: A regression MLModel - // uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE + // * RegressionRMSE: A regression MLModel uses + // the Root Mean Square Error (RMSE) technique to measure performance. RMSE // measures the difference between predicted and actual values for a single // variable. // - // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score + // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score // technique to measure performance. // // For more information about performance @@ -111,19 +111,19 @@ type GetEvaluationOutput struct { // The status of the evaluation. This element can have one of the following // values: // - // * PENDING - Amazon Machine Language (Amazon ML) submitted a request - // to evaluate an MLModel. + // * PENDING - Amazon Machine Language (Amazon ML) submitted a request to + // evaluate an MLModel. // - // * INPROGRESS - The evaluation is underway. + // * INPROGRESS - The evaluation is underway. // - // * - // FAILED - The request to evaluate an MLModel did not run to completion. It is not - // usable. + // * FAILED - The + // request to evaluate an MLModel did not run to completion. It is not usable. // - // * COMPLETED - The evaluation process completed successfully. + // * + // COMPLETED - The evaluation process completed successfully. // - // * - // DELETED - The Evaluation is marked as deleted. It is not usable. + // * DELETED - The + // Evaluation is marked as deleted. It is not usable. Status types.EntityStatus // Metadata pertaining to the operation's result. diff --git a/service/machinelearning/api_op_GetMLModel.go b/service/machinelearning/api_op_GetMLModel.go index 2f3e4312ef4..329434e681d 100644 --- a/service/machinelearning/api_op_GetMLModel.go +++ b/service/machinelearning/api_op_GetMLModel.go @@ -83,16 +83,15 @@ type GetMLModelOutput struct { // Identifies the MLModel category. The following are the available types: // - // * + // * // REGRESSION -- Produces a numeric result. For example, "What price should a house // be listed at?" // - // * BINARY -- Produces one of two possible results. For - // example, "Is this an e-commerce website?" + // * BINARY -- Produces one of two possible results. For example, + // "Is this an e-commerce website?" // - // * MULTICLASS -- Produces one of - // several possible results. For example, "Is this a HIGH, LOW or MEDIUM risk - // trade?" + // * MULTICLASS -- Produces one of several + // possible results. For example, "Is this a HIGH, LOW or MEDIUM risk trade?" MLModelType types.MLModelType // A description of the most recent details about accessing the MLModel. @@ -132,19 +131,19 @@ type GetMLModelOutput struct { // The current status of the MLModel. This element can have one of the following // values: // - // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request - // to describe a MLModel. + // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to + // describe a MLModel. // - // * INPROGRESS - The request is processing. + // * INPROGRESS - The request is processing. // - // * - // FAILED - The request did not run to completion. The ML model isn't usable. + // * FAILED - The + // request did not run to completion. The ML model isn't usable. // + // * COMPLETED - The + // request completed successfully. // - // * COMPLETED - The request completed successfully. - // - // * DELETED - The MLModel - // is marked as deleted. It isn't usable. + // * DELETED - The MLModel is marked as deleted. + // It isn't usable. Status types.EntityStatus // The ID of the training DataSource. @@ -154,30 +153,30 @@ type GetMLModelOutput struct { // map of key-value pairs. The following is the current set of training // parameters: // - // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the + // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the // model. Depending on the input data, the size of the model might affect its // performance. The value is an integer that ranges from 100000 to 2147483648. The // default value is 33554432. // - // * sgd.maxPasses - The number of times that the + // * sgd.maxPasses - The number of times that the // training process traverses the observations to build the MLModel. The value is // an integer that ranges from 1 to 100. The default value is 10. // - // * + // * // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling data // improves a model's ability to find the optimal solution for a variety of data // types. The valid values are auto and none. The default value is none. We // strongly recommend that you shuffle your data. // - // * sgd.l1RegularizationAmount - // - The coefficient regularization L1 norm. It controls overfitting the data by + // * sgd.l1RegularizationAmount - + // The coefficient regularization L1 norm. It controls overfitting the data by // penalizing large coefficients. This tends to drive coefficients to zero, // resulting in a sparse feature set. If you use this parameter, start by // specifying a small value, such as 1.0E-08. The value is a double that ranges // from 0 to MAX_DOUBLE. The default is to not use L1 normalization. This parameter // can't be used when L2 is specified. Use this parameter sparingly. // - // * + // * // sgd.l2RegularizationAmount - The coefficient regularization L2 norm. It controls // overfitting the data by penalizing large coefficients. This tends to drive // coefficients to small, nonzero values. If you use this parameter, start by diff --git a/service/machinelearning/api_op_Predict.go b/service/machinelearning/api_op_Predict.go index f885f6274a8..a01e5d9c036 100644 --- a/service/machinelearning/api_op_Predict.go +++ b/service/machinelearning/api_op_Predict.go @@ -54,18 +54,18 @@ type PredictOutput struct { // The output from a Predict operation: // - // * Details - Contains the following + // * Details - Contains the following // attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | // MULTICLASSDetailsAttributes.ALGORITHM - SGD // - // * PredictedLabel - Present for + // * PredictedLabel - Present for // either a BINARY or MULTICLASSMLModel request. // - // * PredictedScores - Contains - // the raw classification score corresponding to each label. + // * PredictedScores - Contains the + // raw classification score corresponding to each label. // - // * PredictedValue - // - Present for a REGRESSIONMLModel request. + // * PredictedValue - + // Present for a REGRESSIONMLModel request. Prediction *types.Prediction // Metadata pertaining to the operation's result. diff --git a/service/machinelearning/types/enums.go b/service/machinelearning/types/enums.go index b2783faa3a7..00582a1dcfb 100644 --- a/service/machinelearning/types/enums.go +++ b/service/machinelearning/types/enums.go @@ -22,14 +22,14 @@ type BatchPredictionFilterVariable string // Enum values for BatchPredictionFilterVariable const ( - BatchPredictionFilterVariableCreated_at BatchPredictionFilterVariable = "CreatedAt" - BatchPredictionFilterVariableLast_updated_at BatchPredictionFilterVariable = "LastUpdatedAt" - BatchPredictionFilterVariableStatus BatchPredictionFilterVariable = "Status" - BatchPredictionFilterVariableName BatchPredictionFilterVariable = "Name" - BatchPredictionFilterVariableIam_user BatchPredictionFilterVariable = "IAMUser" - BatchPredictionFilterVariableMl_model_id BatchPredictionFilterVariable = "MLModelId" - BatchPredictionFilterVariableDatasource_id BatchPredictionFilterVariable = "DataSourceId" - BatchPredictionFilterVariableData_uri BatchPredictionFilterVariable = "DataURI" + BatchPredictionFilterVariableCreatedAt BatchPredictionFilterVariable = "CreatedAt" + BatchPredictionFilterVariableLastUpdatedAt BatchPredictionFilterVariable = "LastUpdatedAt" + BatchPredictionFilterVariableStatus BatchPredictionFilterVariable = "Status" + BatchPredictionFilterVariableName BatchPredictionFilterVariable = "Name" + BatchPredictionFilterVariableIamUser BatchPredictionFilterVariable = "IAMUser" + BatchPredictionFilterVariableMlModelId BatchPredictionFilterVariable = "MLModelId" + BatchPredictionFilterVariableDatasourceId BatchPredictionFilterVariable = "DataSourceId" + BatchPredictionFilterVariableDataUri BatchPredictionFilterVariable = "DataURI" ) // Values returns all known values for BatchPredictionFilterVariable. Note that @@ -53,12 +53,12 @@ type DataSourceFilterVariable string // Enum values for DataSourceFilterVariable const ( - DataSourceFilterVariableCreated_at DataSourceFilterVariable = "CreatedAt" - DataSourceFilterVariableLast_updated_at DataSourceFilterVariable = "LastUpdatedAt" - DataSourceFilterVariableStatus DataSourceFilterVariable = "Status" - DataSourceFilterVariableName DataSourceFilterVariable = "Name" - DataSourceFilterVariableData_uri DataSourceFilterVariable = "DataLocationS3" - DataSourceFilterVariableIam_user DataSourceFilterVariable = "IAMUser" + DataSourceFilterVariableCreatedAt DataSourceFilterVariable = "CreatedAt" + DataSourceFilterVariableLastUpdatedAt DataSourceFilterVariable = "LastUpdatedAt" + DataSourceFilterVariableStatus DataSourceFilterVariable = "Status" + DataSourceFilterVariableName DataSourceFilterVariable = "Name" + DataSourceFilterVariableDataUri DataSourceFilterVariable = "DataLocationS3" + DataSourceFilterVariableIamUser DataSourceFilterVariable = "IAMUser" ) // Values returns all known values for DataSourceFilterVariable. Note that this can @@ -79,8 +79,8 @@ type DetailsAttributes string // Enum values for DetailsAttributes const ( - DetailsAttributesPredictive_model_type DetailsAttributes = "PredictiveModelType" - DetailsAttributesAlgorithm DetailsAttributes = "Algorithm" + DetailsAttributesPredictiveModelType DetailsAttributes = "PredictiveModelType" + DetailsAttributesAlgorithm DetailsAttributes = "Algorithm" ) // Values returns all known values for DetailsAttributes. Note that this can be @@ -121,14 +121,14 @@ type EvaluationFilterVariable string // Enum values for EvaluationFilterVariable const ( - EvaluationFilterVariableCreated_at EvaluationFilterVariable = "CreatedAt" - EvaluationFilterVariableLast_updated_at EvaluationFilterVariable = "LastUpdatedAt" - EvaluationFilterVariableStatus EvaluationFilterVariable = "Status" - EvaluationFilterVariableName EvaluationFilterVariable = "Name" - EvaluationFilterVariableIam_user EvaluationFilterVariable = "IAMUser" - EvaluationFilterVariableMl_model_id EvaluationFilterVariable = "MLModelId" - EvaluationFilterVariableDatasource_id EvaluationFilterVariable = "DataSourceId" - EvaluationFilterVariableData_uri EvaluationFilterVariable = "DataURI" + EvaluationFilterVariableCreatedAt EvaluationFilterVariable = "CreatedAt" + EvaluationFilterVariableLastUpdatedAt EvaluationFilterVariable = "LastUpdatedAt" + EvaluationFilterVariableStatus EvaluationFilterVariable = "Status" + EvaluationFilterVariableName EvaluationFilterVariable = "Name" + EvaluationFilterVariableIamUser EvaluationFilterVariable = "IAMUser" + EvaluationFilterVariableMlModelId EvaluationFilterVariable = "MLModelId" + EvaluationFilterVariableDatasourceId EvaluationFilterVariable = "DataSourceId" + EvaluationFilterVariableDataUri EvaluationFilterVariable = "DataURI" ) // Values returns all known values for EvaluationFilterVariable. Note that this can @@ -151,16 +151,16 @@ type MLModelFilterVariable string // Enum values for MLModelFilterVariable const ( - MLModelFilterVariableCreated_at MLModelFilterVariable = "CreatedAt" - MLModelFilterVariableLast_updated_at MLModelFilterVariable = "LastUpdatedAt" - MLModelFilterVariableStatus MLModelFilterVariable = "Status" - MLModelFilterVariableName MLModelFilterVariable = "Name" - MLModelFilterVariableIam_user MLModelFilterVariable = "IAMUser" - MLModelFilterVariableTraining_datasource_id MLModelFilterVariable = "TrainingDataSourceId" - MLModelFilterVariableReal_time_endpoint_status MLModelFilterVariable = "RealtimeEndpointStatus" - MLModelFilterVariableMl_model_type MLModelFilterVariable = "MLModelType" - MLModelFilterVariableAlgorithm MLModelFilterVariable = "Algorithm" - MLModelFilterVariableTraining_data_uri MLModelFilterVariable = "TrainingDataURI" + MLModelFilterVariableCreatedAt MLModelFilterVariable = "CreatedAt" + MLModelFilterVariableLastUpdatedAt MLModelFilterVariable = "LastUpdatedAt" + MLModelFilterVariableStatus MLModelFilterVariable = "Status" + MLModelFilterVariableName MLModelFilterVariable = "Name" + MLModelFilterVariableIamUser MLModelFilterVariable = "IAMUser" + MLModelFilterVariableTrainingDatasourceId MLModelFilterVariable = "TrainingDataSourceId" + MLModelFilterVariableRealTimeEndpointStatus MLModelFilterVariable = "RealtimeEndpointStatus" + MLModelFilterVariableMlModelType MLModelFilterVariable = "MLModelType" + MLModelFilterVariableAlgorithm MLModelFilterVariable = "Algorithm" + MLModelFilterVariableTrainingDataUri MLModelFilterVariable = "TrainingDataURI" ) // Values returns all known values for MLModelFilterVariable. Note that this can be @@ -245,10 +245,10 @@ type TaggableResourceType string // Enum values for TaggableResourceType const ( - TaggableResourceTypeBatch_prediction TaggableResourceType = "BatchPrediction" - TaggableResourceTypeDatasource TaggableResourceType = "DataSource" - TaggableResourceTypeEvaluation TaggableResourceType = "Evaluation" - TaggableResourceTypeMl_model TaggableResourceType = "MLModel" + TaggableResourceTypeBatchPrediction TaggableResourceType = "BatchPrediction" + TaggableResourceTypeDatasource TaggableResourceType = "DataSource" + TaggableResourceTypeEvaluation TaggableResourceType = "Evaluation" + TaggableResourceTypeMlModel TaggableResourceType = "MLModel" ) // Values returns all known values for TaggableResourceType. Note that this can be diff --git a/service/machinelearning/types/types.go b/service/machinelearning/types/types.go index 18fc0ad798d..4145dc28f8b 100644 --- a/service/machinelearning/types/types.go +++ b/service/machinelearning/types/types.go @@ -66,20 +66,20 @@ type BatchPrediction struct { // The status of the BatchPrediction. This element can have one of the following // values: // - // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request - // to generate predictions for a batch of observations. + // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to + // generate predictions for a batch of observations. // - // * INPROGRESS - The - // process is underway. + // * INPROGRESS - The process is + // underway. // - // * FAILED - The request to perform a batch prediction - // did not run to completion. It is not usable. + // * FAILED - The request to perform a batch prediction did not run to + // completion. It is not usable. // - // * COMPLETED - The batch - // prediction process completed successfully. + // * COMPLETED - The batch prediction process + // completed successfully. // - // * DELETED - The BatchPrediction - // is marked as deleted. It is not usable. + // * DELETED - The BatchPrediction is marked as deleted. + // It is not usable. Status EntityStatus // Long integer type that is a 64-bit signed number. @@ -154,19 +154,19 @@ type DataSource struct { // The current status of the DataSource. This element can have one of the following // values: // - // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request - // to create a DataSource. + // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to + // create a DataSource. // - // * INPROGRESS - The creation process is underway. + // * INPROGRESS - The creation process is underway. // + // * FAILED + // - The request to create a DataSource did not run to completion. It is not + // usable. // - // * FAILED - The request to create a DataSource did not run to completion. It is - // not usable. - // - // * COMPLETED - The creation process completed successfully. - // + // * COMPLETED - The creation process completed successfully. // - // * DELETED - The DataSource is marked as deleted. It is not usable. + // * DELETED - + // The DataSource is marked as deleted. It is not usable. Status EntityStatus } @@ -215,15 +215,15 @@ type Evaluation struct { // the DataSource. One of the following metrics is returned, based on the type of // the MLModel: // - // * BinaryAUC: A binary MLModel uses the Area Under the Curve - // (AUC) technique to measure performance. + // * BinaryAUC: A binary MLModel uses the Area Under the Curve (AUC) + // technique to measure performance. // - // * RegressionRMSE: A regression - // MLModel uses the Root Mean Square Error (RMSE) technique to measure performance. - // RMSE measures the difference between predicted and actual values for a single + // * RegressionRMSE: A regression MLModel uses + // the Root Mean Square Error (RMSE) technique to measure performance. RMSE + // measures the difference between predicted and actual values for a single // variable. // - // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score + // * MulticlassAvgFScore: A multiclass MLModel uses the F1 score // technique to measure performance. // // For more information about performance @@ -237,19 +237,19 @@ type Evaluation struct { // The status of the evaluation. This element can have one of the following // values: // - // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request - // to evaluate an MLModel. + // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to + // evaluate an MLModel. // - // * INPROGRESS - The evaluation is underway. + // * INPROGRESS - The evaluation is underway. // - // * - // FAILED - The request to evaluate an MLModel did not run to completion. It is not - // usable. + // * FAILED - The + // request to evaluate an MLModel did not run to completion. It is not usable. // - // * COMPLETED - The evaluation process completed successfully. + // * + // COMPLETED - The evaluation process completed successfully. // - // * - // DELETED - The Evaluation is marked as deleted. It is not usable. + // * DELETED - The + // Evaluation is marked as deleted. It is not usable. Status EntityStatus } @@ -257,11 +257,11 @@ type Evaluation struct { // detailed metadata and the current status of the MLModel. type MLModel struct { - // The algorithm used to train the MLModel. The following algorithm is supported: - // + // The algorithm used to train the MLModel. The following algorithm is + // supported: // - // * SGD -- Stochastic gradient descent. The goal of SGD is to minimize the - // gradient of the loss function. + // * SGD -- Stochastic gradient descent. The goal of SGD is to minimize + // the gradient of the loss function. Algorithm Algorithm // Long integer type that is a 64-bit signed number. @@ -294,16 +294,15 @@ type MLModel struct { // Identifies the MLModel category. The following are the available types: // - // * + // * // REGRESSION - Produces a numeric result. For example, "What price should a house // be listed at?" // - // * BINARY - Produces one of two possible results. For - // example, "Is this a child-friendly web site?". + // * BINARY - Produces one of two possible results. For example, + // "Is this a child-friendly web site?". // - // * MULTICLASS - Produces one - // of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk - // trade?". + // * MULTICLASS - Produces one of several + // possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?". MLModelType MLModelType // A description of the most recent details about accessing the MLModel. @@ -328,19 +327,19 @@ type MLModel struct { // The current status of an MLModel. This element can have one of the following // values: // - // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request - // to create an MLModel. - // - // * INPROGRESS - The creation process is underway. + // * PENDING - Amazon Machine Learning (Amazon ML) submitted a request to + // create an MLModel. // + // * INPROGRESS - The creation process is underway. // - // * FAILED - The request to create an MLModel didn't run to completion. The model - // isn't usable. - // - // * COMPLETED - The creation process completed successfully. + // * FAILED - + // The request to create an MLModel didn't run to completion. The model isn't + // usable. // + // * COMPLETED - The creation process completed successfully. // - // * DELETED - The MLModel is marked as deleted. It isn't usable. + // * DELETED - + // The MLModel is marked as deleted. It isn't usable. Status EntityStatus // The ID of the training DataSource. The CreateMLModel operation uses the @@ -351,22 +350,22 @@ type MLModel struct { // map of key-value pairs. The following is the current set of training // parameters: // - // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the + // * sgd.maxMLModelSizeInBytes - The maximum allowed size of the // model. Depending on the input data, the size of the model might affect its // performance. The value is an integer that ranges from 100000 to 2147483648. The // default value is 33554432. // - // * sgd.maxPasses - The number of times that the + // * sgd.maxPasses - The number of times that the // training process traverses the observations to build the MLModel. The value is // an integer that ranges from 1 to 100. The default value is 10. // - // * + // * // sgd.shuffleType - Whether Amazon ML shuffles the training data. Shuffling the // data improves a model's ability to find the optimal solution for a variety of // data types. The valid values are auto and none. The default value is none. // - // - // * sgd.l1RegularizationAmount - The coefficient regularization L1 norm, which + // * + // sgd.l1RegularizationAmount - The coefficient regularization L1 norm, which // controls overfitting the data by penalizing large coefficients. This parameter // tends to drive coefficients to zero, resulting in sparse feature set. If you use // this parameter, start by specifying a small value, such as 1.0E-08. The value is @@ -374,7 +373,7 @@ type MLModel struct { // normalization. This parameter can't be used when L2 is specified. Use this // parameter sparingly. // - // * sgd.l2RegularizationAmount - The coefficient + // * sgd.l2RegularizationAmount - The coefficient // regularization L2 norm, which controls overfitting the data by penalizing large // coefficients. This tends to drive coefficients to small, nonzero values. If you // use this parameter, start by specifying a small value, such as 1.0E-08. The @@ -387,15 +386,15 @@ type MLModel struct { // Measurements of how well the MLModel performed on known observations. One of the // following metrics is returned, based on the type of the MLModel: // -// * -// BinaryAUC: The binary MLModel uses the Area Under the Curve (AUC) technique to -// measure performance. +// * BinaryAUC: +// The binary MLModel uses the Area Under the Curve (AUC) technique to measure +// performance. // -// * RegressionRMSE: The regression MLModel uses the Root -// Mean Square Error (RMSE) technique to measure performance. RMSE measures the -// difference between predicted and actual values for a single variable. +// * RegressionRMSE: The regression MLModel uses the Root Mean Square +// Error (RMSE) technique to measure performance. RMSE measures the difference +// between predicted and actual values for a single variable. // -// * +// * // MulticlassAvgFScore: The multiclass MLModel uses the F1 score technique to // measure performance. // @@ -410,18 +409,18 @@ type PerformanceMetrics struct { // The output from a Predict operation: // -// * Details - Contains the following +// * Details - Contains the following // attributes: DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | // MULTICLASSDetailsAttributes.ALGORITHM - SGD // -// * PredictedLabel - Present for +// * PredictedLabel - Present for // either a BINARY or MULTICLASSMLModel request. // -// * PredictedScores - Contains -// the raw classification score corresponding to each label. +// * PredictedScores - Contains the +// raw classification score corresponding to each label. // -// * PredictedValue -// - Present for a REGRESSIONMLModel request. +// * PredictedValue - +// Present for a REGRESSIONMLModel request. type Prediction struct { // Provides any additional details regarding the prediction. @@ -533,17 +532,17 @@ type RDSDataSpec struct { // of the input data is used to create the Datasource. There are multiple // parameters that control what data is used to create a datasource: // - // * + // * // percentBegin Use percentBegin to indicate the beginning of the range of the data // used to create the Datasource. If you do not include percentBegin and // percentEnd, Amazon ML includes all of the data when creating the datasource. // - // - // * percentEnd Use percentEnd to indicate the end of the range of the data used to + // * + // percentEnd Use percentEnd to indicate the end of the range of the data used to // create the Datasource. If you do not include percentBegin and percentEnd, Amazon // ML includes all of the data when creating the datasource. // - // * complement The + // * complement The // complement parameter instructs Amazon ML to use the data that is not included in // the range of percentBegin to percentEnd to create a datasource. The complement // parameter is useful if you need to create complementary datasources for training @@ -555,7 +554,7 @@ type RDSDataSpec struct { // {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: // {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} // - // * + // * // strategy To change how Amazon ML splits the data for a datasource, use the // strategy parameter. The default value for the strategy parameter is sequential, // meaning that Amazon ML takes all of the data records between the percentBegin @@ -654,13 +653,13 @@ type RealtimeEndpointInfo struct { // The current status of the real-time endpoint for the MLModel. This element can // have one of the following values: // - // * NONE - Endpoint does not exist or was + // * NONE - Endpoint does not exist or was // previously deleted. // - // * READY - Endpoint is ready to be used for real-time + // * READY - Endpoint is ready to be used for real-time // predictions. // - // * UPDATING - Updating/creating the endpoint. + // * UPDATING - Updating/creating the endpoint. EndpointStatus RealtimeEndpointStatus // The URI that specifies where to send real-time prediction requests for the @@ -743,17 +742,17 @@ type RedshiftDataSpec struct { // of the input data is used to create the Datasource. There are multiple // parameters that control what data is used to create a datasource: // - // * + // * // percentBegin Use percentBegin to indicate the beginning of the range of the data // used to create the Datasource. If you do not include percentBegin and // percentEnd, Amazon ML includes all of the data when creating the datasource. // - // - // * percentEnd Use percentEnd to indicate the end of the range of the data used to + // * + // percentEnd Use percentEnd to indicate the end of the range of the data used to // create the Datasource. If you do not include percentBegin and percentEnd, Amazon // ML includes all of the data when creating the datasource. // - // * complement The + // * complement The // complement parameter instructs Amazon ML to use the data that is not included in // the range of percentBegin to percentEnd to create a datasource. The complement // parameter is useful if you need to create complementary datasources for training @@ -765,7 +764,7 @@ type RedshiftDataSpec struct { // {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: // {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} // - // * + // * // strategy To change how Amazon ML splits the data for a datasource, use the // strategy parameter. The default value for the strategy parameter is sequential, // meaning that Amazon ML takes all of the data records between the percentBegin @@ -853,17 +852,17 @@ type S3DataSpec struct { // of the input data is used to create the Datasource. There are multiple // parameters that control what data is used to create a datasource: // - // * + // * // percentBegin Use percentBegin to indicate the beginning of the range of the data // used to create the Datasource. If you do not include percentBegin and // percentEnd, Amazon ML includes all of the data when creating the datasource. // - // - // * percentEnd Use percentEnd to indicate the end of the range of the data used to + // * + // percentEnd Use percentEnd to indicate the end of the range of the data used to // create the Datasource. If you do not include percentBegin and percentEnd, Amazon // ML includes all of the data when creating the datasource. // - // * complement The + // * complement The // complement parameter instructs Amazon ML to use the data that is not included in // the range of percentBegin to percentEnd to create a datasource. The complement // parameter is useful if you need to create complementary datasources for training @@ -875,7 +874,7 @@ type S3DataSpec struct { // {"splitting":{"percentBegin":0, "percentEnd":25}} Datasource for training: // {"splitting":{"percentBegin":0, "percentEnd":25, "complement":"true"}} // - // * + // * // strategy To change how Amazon ML splits the data for a datasource, use the // strategy parameter. The default value for the strategy parameter is sequential, // meaning that Amazon ML takes all of the data records between the percentBegin diff --git a/service/macie2/api_op_CreateClassificationJob.go b/service/macie2/api_op_CreateClassificationJob.go index 5d0f793d62b..357d198eebe 100644 --- a/service/macie2/api_op_CreateClassificationJob.go +++ b/service/macie2/api_op_CreateClassificationJob.go @@ -38,13 +38,13 @@ type CreateClassificationJobInput struct { // The schedule for running the job. Valid values are: // - // * ONE_TIME - Run the - // job only once. If you specify this value, don't specify a value for the + // * ONE_TIME - Run the job + // only once. If you specify this value, don't specify a value for the // scheduleFrequency property. // - // * SCHEDULED - Run the job on a daily, weekly, - // or monthly basis. If you specify this value, use the scheduleFrequency property - // to define the recurrence pattern for the job. + // * SCHEDULED - Run the job on a daily, weekly, or + // monthly basis. If you specify this value, use the scheduleFrequency property to + // define the recurrence pattern for the job. // // This member is required. JobType types.JobType diff --git a/service/macie2/api_op_DescribeClassificationJob.go b/service/macie2/api_op_DescribeClassificationJob.go index d6f51a469d5..fe57eff82eb 100644 --- a/service/macie2/api_op_DescribeClassificationJob.go +++ b/service/macie2/api_op_DescribeClassificationJob.go @@ -64,36 +64,35 @@ type DescribeClassificationJobOutput struct { // The current status of the job. Possible values are: // - // * CANCELLED - You - // cancelled the job, or you paused the job and didn't resume it within 30 days of - // pausing it. + // * CANCELLED - You cancelled + // the job, or you paused the job and didn't resume it within 30 days of pausing + // it. // - // * COMPLETE - For a one-time job, Amazon Macie finished - // processing all the data specified for the job. This value doesn't apply to - // recurring jobs. + // * COMPLETE - For a one-time job, Amazon Macie finished processing all the + // data specified for the job. This value doesn't apply to recurring jobs. // - // * IDLE - For a recurring job, the previous scheduled run is - // complete and the next scheduled run is pending. This value doesn't apply to - // one-time jobs. + // * IDLE + // - For a recurring job, the previous scheduled run is complete and the next + // scheduled run is pending. This value doesn't apply to one-time jobs. // - // * PAUSED - Amazon Macie started running the job but - // completion of the job would exceed one or more quotas for your account. + // * PAUSED - + // Amazon Macie started running the job but completion of the job would exceed one + // or more quotas for your account. // - // * - // RUNNING - For a one-time job, the job is in progress. For a recurring job, a - // scheduled run is in progress. + // * RUNNING - For a one-time job, the job is in + // progress. For a recurring job, a scheduled run is in progress. // - // * USER_PAUSED - You paused the job. If you - // don't resume the job within 30 days of pausing it, the job will expire and be - // cancelled. + // * USER_PAUSED - + // You paused the job. If you don't resume the job within 30 days of pausing it, + // the job will expire and be cancelled. JobStatus types.JobStatus // The schedule for running the job. Possible values are: // - // * ONE_TIME - The job + // * ONE_TIME - The job // runs only once. // - // * SCHEDULED - The job runs on a daily, weekly, or monthly + // * SCHEDULED - The job runs on a daily, weekly, or monthly // basis. The scheduleFrequency property indicates the recurrence pattern for the // job. JobType types.JobType diff --git a/service/macie2/api_op_GetFindingStatistics.go b/service/macie2/api_op_GetFindingStatistics.go index fc5ba041273..6819274a411 100644 --- a/service/macie2/api_op_GetFindingStatistics.go +++ b/service/macie2/api_op_GetFindingStatistics.go @@ -31,19 +31,18 @@ type GetFindingStatisticsInput struct { // The finding property to use to group the query results. Valid values are: // - // * + // * // classificationDetails.jobId - The unique identifier for the classification job // that produced the finding. // - // * resourcesAffected.s3Bucket.name - The name of - // the S3 bucket that the finding applies to. + // * resourcesAffected.s3Bucket.name - The name of the + // S3 bucket that the finding applies to. // - // * severity.description - The - // severity level of the finding, such as High or Medium. + // * severity.description - The severity + // level of the finding, such as High or Medium. // - // * type - The type of - // finding, such as Policy:IAMUser/S3BucketPublic and - // SensitiveData:S3Object/Personal. + // * type - The type of finding, + // such as Policy:IAMUser/S3BucketPublic and SensitiveData:S3Object/Personal. // // This member is required. GroupBy types.GroupBy diff --git a/service/macie2/api_op_UpdateClassificationJob.go b/service/macie2/api_op_UpdateClassificationJob.go index ab9ecfdcea2..ba430e5c96e 100644 --- a/service/macie2/api_op_UpdateClassificationJob.go +++ b/service/macie2/api_op_UpdateClassificationJob.go @@ -36,16 +36,16 @@ type UpdateClassificationJobInput struct { // The new status for the job. Valid values are: // - // * CANCELLED - Stops the job + // * CANCELLED - Stops the job // permanently and cancels it. You can't resume a job after you cancel it. This // value is valid only if the job's current status is IDLE, PAUSED, RUNNING, or // USER_PAUSED. // - // * RUNNING - Resumes the job. This value is valid only if the - // job's current status is USER_PAUSED. If you specify this value, Amazon Macie + // * RUNNING - Resumes the job. This value is valid only if the job's + // current status is USER_PAUSED. If you specify this value, Amazon Macie // immediately resumes the job. // - // * USER_PAUSED - Pauses the job. This value is + // * USER_PAUSED - Pauses the job. This value is // valid only if the job's current status is IDLE or RUNNING. If you specify this // value and the job is currently running, Macie immediately stops running the job. // To resume a job after you pause it, change the job's status to RUNNING. If you diff --git a/service/macie2/types/enums.go b/service/macie2/types/enums.go index 6d01c8220a1..7be69238e36 100644 --- a/service/macie2/types/enums.go +++ b/service/macie2/types/enums.go @@ -6,8 +6,8 @@ type AdminStatus string // Enum values for AdminStatus const ( - AdminStatusEnabled AdminStatus = "ENABLED" - AdminStatusDisabling_in_progress AdminStatus = "DISABLING_IN_PROGRESS" + AdminStatusEnabled AdminStatus = "ENABLED" + AdminStatusDisablingInProgress AdminStatus = "DISABLING_IN_PROGRESS" ) // Values returns all known values for AdminStatus. Note that this can be expanded @@ -68,9 +68,9 @@ type EffectivePermission string // Enum values for EffectivePermission const ( - EffectivePermissionPublic EffectivePermission = "PUBLIC" - EffectivePermissionNot_public EffectivePermission = "NOT_PUBLIC" - EffectivePermissionUnknown EffectivePermission = "UNKNOWN" + EffectivePermissionPublic EffectivePermission = "PUBLIC" + EffectivePermissionNotPublic EffectivePermission = "NOT_PUBLIC" + EffectivePermissionUnknown EffectivePermission = "UNKNOWN" ) // Values returns all known values for EffectivePermission. Note that this can be @@ -90,7 +90,7 @@ type EncryptionType string const ( EncryptionTypeNone EncryptionType = "NONE" EncryptionTypeAes256 EncryptionType = "AES256" - EncryptionTypeAws_kms EncryptionType = "aws:kms" + EncryptionTypeAwsKms EncryptionType = "aws:kms" EncryptionTypeUnknown EncryptionType = "UNKNOWN" ) @@ -128,7 +128,7 @@ type FindingActionType string // Enum values for FindingActionType const ( - FindingActionTypeAws_api_call FindingActionType = "AWS_API_CALL" + FindingActionTypeAwsApiCall FindingActionType = "AWS_API_CALL" ) // Values returns all known values for FindingActionType. Note that this can be @@ -162,9 +162,9 @@ type FindingPublishingFrequency string // Enum values for FindingPublishingFrequency const ( - FindingPublishingFrequencyFifteen_minutes FindingPublishingFrequency = "FIFTEEN_MINUTES" - FindingPublishingFrequencyOne_hour FindingPublishingFrequency = "ONE_HOUR" - FindingPublishingFrequencySix_hours FindingPublishingFrequency = "SIX_HOURS" + FindingPublishingFrequencyFifteenMinutes FindingPublishingFrequency = "FIFTEEN_MINUTES" + FindingPublishingFrequencyOneHour FindingPublishingFrequency = "ONE_HOUR" + FindingPublishingFrequencySixHours FindingPublishingFrequency = "SIX_HOURS" ) // Values returns all known values for FindingPublishingFrequency. Note that this @@ -219,16 +219,16 @@ type FindingType string // Enum values for FindingType const ( - FindingTypeSensitivedata_s3object_multiple FindingType = "SensitiveData:S3Object/Multiple" - FindingTypeSensitivedata_s3object_financial FindingType = "SensitiveData:S3Object/Financial" - FindingTypeSensitivedata_s3object_personal FindingType = "SensitiveData:S3Object/Personal" - FindingTypeSensitivedata_s3object_credentials FindingType = "SensitiveData:S3Object/Credentials" - FindingTypeSensitivedata_s3object_customidentifier FindingType = "SensitiveData:S3Object/CustomIdentifier" - FindingTypePolicy_iamuser_s3bucketpublic FindingType = "Policy:IAMUser/S3BucketPublic" - FindingTypePolicy_iamuser_s3bucketsharedexternally FindingType = "Policy:IAMUser/S3BucketSharedExternally" - FindingTypePolicy_iamuser_s3bucketreplicatedexternally FindingType = "Policy:IAMUser/S3BucketReplicatedExternally" - FindingTypePolicy_iamuser_s3bucketencryptiondisabled FindingType = "Policy:IAMUser/S3BucketEncryptionDisabled" - FindingTypePolicy_iamuser_s3blockpublicaccessdisabled FindingType = "Policy:IAMUser/S3BlockPublicAccessDisabled" + FindingTypeSensitivedataS3objectMultiple FindingType = "SensitiveData:S3Object/Multiple" + FindingTypeSensitivedataS3objectFinancial FindingType = "SensitiveData:S3Object/Financial" + FindingTypeSensitivedataS3objectPersonal FindingType = "SensitiveData:S3Object/Personal" + FindingTypeSensitivedataS3objectCredentials FindingType = "SensitiveData:S3Object/Credentials" + FindingTypeSensitivedataS3objectCustomidentifier FindingType = "SensitiveData:S3Object/CustomIdentifier" + FindingTypePolicyIamuserS3bucketpublic FindingType = "Policy:IAMUser/S3BucketPublic" + FindingTypePolicyIamuserS3bucketsharedexternally FindingType = "Policy:IAMUser/S3BucketSharedExternally" + FindingTypePolicyIamuserS3bucketreplicatedexternally FindingType = "Policy:IAMUser/S3BucketReplicatedExternally" + FindingTypePolicyIamuserS3bucketencryptiondisabled FindingType = "Policy:IAMUser/S3BucketEncryptionDisabled" + FindingTypePolicyIamuserS3blockpublicaccessdisabled FindingType = "Policy:IAMUser/S3BlockPublicAccessDisabled" ) // Values returns all known values for FindingType. Note that this can be expanded @@ -253,10 +253,10 @@ type GroupBy string // Enum values for GroupBy const ( - GroupByResourcesaffected_s3bucket_name GroupBy = "resourcesAffected.s3Bucket.name" - GroupByType GroupBy = "type" - GroupByClassificationdetails_jobid GroupBy = "classificationDetails.jobId" - GroupBySeverity_description GroupBy = "severity.description" + GroupByResourcesaffectedS3bucketName GroupBy = "resourcesAffected.s3Bucket.name" + GroupByType GroupBy = "type" + GroupByClassificationdetailsJobid GroupBy = "classificationDetails.jobId" + GroupBySeverityDescription GroupBy = "severity.description" ) // Values returns all known values for GroupBy. Note that this can be expanded in @@ -303,12 +303,12 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusRunning JobStatus = "RUNNING" - JobStatusPaused JobStatus = "PAUSED" - JobStatusCancelled JobStatus = "CANCELLED" - JobStatusComplete JobStatus = "COMPLETE" - JobStatusIdle JobStatus = "IDLE" - JobStatusUser_paused JobStatus = "USER_PAUSED" + JobStatusRunning JobStatus = "RUNNING" + JobStatusPaused JobStatus = "PAUSED" + JobStatusCancelled JobStatus = "CANCELLED" + JobStatusComplete JobStatus = "COMPLETE" + JobStatusIdle JobStatus = "IDLE" + JobStatusUserPaused JobStatus = "USER_PAUSED" ) // Values returns all known values for JobStatus. Note that this can be expanded in @@ -329,7 +329,7 @@ type JobType string // Enum values for JobType const ( - JobTypeOne_time JobType = "ONE_TIME" + JobTypeOneTime JobType = "ONE_TIME" JobTypeScheduled JobType = "SCHEDULED" ) @@ -461,11 +461,11 @@ type ScopeFilterKey string // Enum values for ScopeFilterKey const ( - ScopeFilterKeyBucket_creation_date ScopeFilterKey = "BUCKET_CREATION_DATE" - ScopeFilterKeyObject_extension ScopeFilterKey = "OBJECT_EXTENSION" - ScopeFilterKeyObject_last_modified_date ScopeFilterKey = "OBJECT_LAST_MODIFIED_DATE" - ScopeFilterKeyObject_size ScopeFilterKey = "OBJECT_SIZE" - ScopeFilterKeyTag ScopeFilterKey = "TAG" + ScopeFilterKeyBucketCreationDate ScopeFilterKey = "BUCKET_CREATION_DATE" + ScopeFilterKeyObjectExtension ScopeFilterKey = "OBJECT_EXTENSION" + ScopeFilterKeyObjectLastModifiedDate ScopeFilterKey = "OBJECT_LAST_MODIFIED_DATE" + ScopeFilterKeyObjectSize ScopeFilterKey = "OBJECT_SIZE" + ScopeFilterKeyTag ScopeFilterKey = "TAG" ) // Values returns all known values for ScopeFilterKey. Note that this can be @@ -485,10 +485,10 @@ type SensitiveDataItemCategory string // Enum values for SensitiveDataItemCategory const ( - SensitiveDataItemCategoryFinancial_information SensitiveDataItemCategory = "FINANCIAL_INFORMATION" - SensitiveDataItemCategoryPersonal_information SensitiveDataItemCategory = "PERSONAL_INFORMATION" - SensitiveDataItemCategoryCredentials SensitiveDataItemCategory = "CREDENTIALS" - SensitiveDataItemCategoryCustom_identifier SensitiveDataItemCategory = "CUSTOM_IDENTIFIER" + SensitiveDataItemCategoryFinancialInformation SensitiveDataItemCategory = "FINANCIAL_INFORMATION" + SensitiveDataItemCategoryPersonalInformation SensitiveDataItemCategory = "PERSONAL_INFORMATION" + SensitiveDataItemCategoryCredentials SensitiveDataItemCategory = "CREDENTIALS" + SensitiveDataItemCategoryCustomIdentifier SensitiveDataItemCategory = "CUSTOM_IDENTIFIER" ) // Values returns all known values for SensitiveDataItemCategory. Note that this @@ -527,10 +527,10 @@ type SharedAccess string // Enum values for SharedAccess const ( - SharedAccessExternal SharedAccess = "EXTERNAL" - SharedAccessInternal SharedAccess = "INTERNAL" - SharedAccessNot_shared SharedAccess = "NOT_SHARED" - SharedAccessUnknown SharedAccess = "UNKNOWN" + SharedAccessExternal SharedAccess = "EXTERNAL" + SharedAccessInternal SharedAccess = "INTERNAL" + SharedAccessNotShared SharedAccess = "NOT_SHARED" + SharedAccessUnknown SharedAccess = "UNKNOWN" ) // Values returns all known values for SharedAccess. Note that this can be expanded @@ -549,13 +549,13 @@ type StorageClass string // Enum values for StorageClass const ( - StorageClassStandard StorageClass = "STANDARD" - StorageClassReduced_redundancy StorageClass = "REDUCED_REDUNDANCY" - StorageClassStandard_ia StorageClass = "STANDARD_IA" - StorageClassIntelligent_tiering StorageClass = "INTELLIGENT_TIERING" - StorageClassDeep_archive StorageClass = "DEEP_ARCHIVE" - StorageClassOnezone_ia StorageClass = "ONEZONE_IA" - StorageClassGlacier StorageClass = "GLACIER" + StorageClassStandard StorageClass = "STANDARD" + StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" + StorageClassStandardIa StorageClass = "STANDARD_IA" + StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" + StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" + StorageClassOnezoneIa StorageClass = "ONEZONE_IA" + StorageClassGlacier StorageClass = "GLACIER" ) // Values returns all known values for StorageClass. Note that this can be expanded @@ -577,7 +577,7 @@ type TagTarget string // Enum values for TagTarget const ( - TagTargetS3_object TagTarget = "S3_OBJECT" + TagTargetS3Object TagTarget = "S3_OBJECT" ) // Values returns all known values for TagTarget. Note that this can be expanded in @@ -682,8 +682,8 @@ type UsageType string // Enum values for UsageType const ( - UsageTypeData_inventory_evaluation UsageType = "DATA_INVENTORY_EVALUATION" - UsageTypeSensitive_data_discovery UsageType = "SENSITIVE_DATA_DISCOVERY" + UsageTypeDataInventoryEvaluation UsageType = "DATA_INVENTORY_EVALUATION" + UsageTypeSensitiveDataDiscovery UsageType = "SENSITIVE_DATA_DISCOVERY" ) // Values returns all known values for UsageType. Note that this can be expanded in diff --git a/service/macie2/types/types.go b/service/macie2/types/types.go index 198955d1316..3f7a671a212 100644 --- a/service/macie2/types/types.go +++ b/service/macie2/types/types.go @@ -325,17 +325,17 @@ type BucketMetadata struct { // Specifies whether the bucket is shared with another AWS account. Possible values // are: // - // * EXTERNAL - The bucket is shared with an AWS account that isn't part - // of the same Amazon Macie organization. + // * EXTERNAL - The bucket is shared with an AWS account that isn't part of + // the same Amazon Macie organization. // - // * INTERNAL - The bucket is shared - // with an AWS account that's part of the same Amazon Macie organization. + // * INTERNAL - The bucket is shared with an + // AWS account that's part of the same Amazon Macie organization. // - // * - // NOT_SHARED - The bucket isn't shared with other AWS accounts. + // * NOT_SHARED - + // The bucket isn't shared with other AWS accounts. // - // * UNKNOWN - - // Amazon Macie wasn't able to evaluate the shared access settings for the bucket. + // * UNKNOWN - Amazon Macie + // wasn't able to evaluate the shared access settings for the bucket. SharedAccess SharedAccess // The total storage size, in bytes, of the bucket. @@ -393,13 +393,13 @@ type BucketPublicAccess struct { // Specifies whether the bucket is publicly accessible due to the combination of // permissions settings that apply to the bucket. Possible values are: // - // * + // * // NOT_PUBLIC - The bucket isn't publicly accessible. // - // * PUBLIC - The bucket is + // * PUBLIC - The bucket is // publicly accessible. // - // * UNKNOWN - Amazon Macie can't determine whether the + // * UNKNOWN - Amazon Macie can't determine whether the // bucket is publicly accessible. EffectivePermission EffectivePermission @@ -511,17 +511,17 @@ type ClassificationResultStatus struct { // The status of the finding. Possible values are: // - // * COMPLETE - Amazon Macie - // successfully completed its analysis of the object that the finding applies to. + // * COMPLETE - Amazon Macie + // successfully completed its analysis of the object that the finding applies + // to. // + // * PARTIAL - Macie was able to analyze only a subset of the data in the + // object that the finding applies to. For example, the object is a compressed or + // archive file that contains files in an unsupported format. // - // * PARTIAL - Macie was able to analyze only a subset of the data in the object - // that the finding applies to. For example, the object is a compressed or archive - // file that contains files in an unsupported format. - // - // * SKIPPED - Macie wasn't - // able to analyze the object that the finding applies to. For example, the object - // is a malformed file or a file that uses an unsupported format. + // * SKIPPED - Macie + // wasn't able to analyze the object that the finding applies to. For example, the + // object is a malformed file or a file that uses an unsupported format. Code *string // A brief description of the status of the finding. Amazon Macie uses this value @@ -983,36 +983,35 @@ type JobSummary struct { // The current status of the job. Possible values are: // - // * CANCELLED - You - // cancelled the job, or you paused the job and didn't resume it within 30 days of - // pausing it. + // * CANCELLED - You cancelled + // the job, or you paused the job and didn't resume it within 30 days of pausing + // it. // - // * COMPLETE - For a one-time job, Amazon Macie finished - // processing all the data specified for the job. This value doesn't apply to - // recurring jobs. + // * COMPLETE - For a one-time job, Amazon Macie finished processing all the + // data specified for the job. This value doesn't apply to recurring jobs. // - // * IDLE - For a recurring job, the previous scheduled run is - // complete and the next scheduled run is pending. This value doesn't apply to - // one-time jobs. + // * IDLE + // - For a recurring job, the previous scheduled run is complete and the next + // scheduled run is pending. This value doesn't apply to one-time jobs. // - // * PAUSED - Amazon Macie started running the job but - // completion of the job would exceed one or more quotas for your account. + // * PAUSED - + // Amazon Macie started running the job but completion of the job would exceed one + // or more quotas for your account. // - // * - // RUNNING - For a one-time job, the job is in progress. For a recurring job, a - // scheduled run is in progress. + // * RUNNING - For a one-time job, the job is in + // progress. For a recurring job, a scheduled run is in progress. // - // * USER_PAUSED - You paused the job. If you - // don't resume the job within 30 days of pausing it, the job will expire and be - // cancelled. + // * USER_PAUSED - + // You paused the job. If you don't resume the job within 30 days of pausing it, + // the job will expire and be cancelled. JobStatus JobStatus // The schedule for running the job. Possible values are: // - // * ONE_TIME - The job + // * ONE_TIME - The job // runs only once. // - // * SCHEDULED - The job runs on a daily, weekly, or monthly + // * SCHEDULED - The job runs on a daily, weekly, or monthly // basis. JobType JobType @@ -1239,30 +1238,30 @@ type Range struct { // Possible values are: // - // * In an Occurrences.lineRanges array, the number of - // lines from the beginning of the file to the end of the sensitive data. + // * In an Occurrences.lineRanges array, the number of lines + // from the beginning of the file to the end of the sensitive data. // - // * In - // an Occurrences.offsetRanges array, the number of characters from the beginning - // of the file to the end of the sensitive data. + // * In an + // Occurrences.offsetRanges array, the number of characters from the beginning of + // the file to the end of the sensitive data. // - // * In a Page object, the - // number of lines (lineRange) or characters (offsetRange) from the beginning of - // the page to the end of the sensitive data. + // * In a Page object, the number of + // lines (lineRange) or characters (offsetRange) from the beginning of the page to + // the end of the sensitive data. End *int64 // Possible values are: // - // * In an Occurrences.lineRanges array, the number of - // lines from the beginning of the file to the beginning of the sensitive data. - // + // * In an Occurrences.lineRanges array, the number of lines + // from the beginning of the file to the beginning of the sensitive data. // - // * In an Occurrences.offsetRanges array, the number of characters from the - // beginning of the file to the beginning of the sensitive data. + // * In an + // Occurrences.offsetRanges array, the number of characters from the beginning of + // the file to the beginning of the sensitive data. // - // * In a Page - // object, the number of lines (lineRange) or characters (offsetRange) from the - // beginning of the page to the beginning of the sensitive data. + // * In a Page object, the number + // of lines (lineRange) or characters (offsetRange) from the beginning of the page + // to the beginning of the sensitive data. Start *int64 // The column number for the column that contains the data, if the file contains @@ -1570,15 +1569,15 @@ type SimpleScopeTerm struct { // The operator to use in the condition. Valid operators for each supported // property (key) are: // - // * OBJECT_EXTENSION - EQ (equals) or NE (not equals) + // * OBJECT_EXTENSION - EQ (equals) or NE (not equals) // + // * + // OBJECT_LAST_MODIFIED_DATE - Any operator except CONTAINS // - // * OBJECT_LAST_MODIFIED_DATE - Any operator except CONTAINS + // * OBJECT_SIZE - Any + // operator except CONTAINS // - // * OBJECT_SIZE - - // Any operator except CONTAINS - // - // * TAG - EQ (equals) or NE (not equals) + // * TAG - EQ (equals) or NE (not equals) Comparator JobComparator // The object property to use in the condition. @@ -1589,21 +1588,21 @@ type SimpleScopeTerm struct { // Macie uses an OR operator to join the values. Otherwise, this array can specify // only one value. Valid values for each supported property (key) are: // - // * + // * // OBJECT_EXTENSION - A string that represents the file name extension of an // object. For example: doc, docx, pdf // - // * OBJECT_LAST_MODIFIED_DATE - The date - // and time (in UTC and extended ISO 8601 format) when an object was created or - // last changed, whichever is latest. For example: 2020-09-28T14:31:13Z + // * OBJECT_LAST_MODIFIED_DATE - The date and + // time (in UTC and extended ISO 8601 format) when an object was created or last + // changed, whichever is latest. For example: 2020-09-28T14:31:13Z // - // * - // OBJECT_SIZE - An integer that represents the storage size (in bytes) of an - // object. + // * OBJECT_SIZE - + // An integer that represents the storage size (in bytes) of an object. // - // * TAG - A string that represents a tag key for an object. For - // advanced options, use a TagScopeTerm object, instead of a SimpleScopeTerm - // object, to define a tag-based condition for the job. + // * TAG - A + // string that represents a tag key for an object. For advanced options, use a + // TagScopeTerm object, instead of a SimpleScopeTerm object, to define a tag-based + // condition for the job. Values []*string } @@ -1731,19 +1730,18 @@ type UsageStatisticsFilter struct { // accountId, this array can specify multiple values. Otherwise, this array can // specify only one value. Valid values for each supported field are: // - // * - // accountId - The unique identifier for an AWS account. + // * accountId + // - The unique identifier for an AWS account. // - // * freeTrialStartDate - // - The date and time, in UTC and extended ISO 8601 format, when the free trial - // started for an account. + // * freeTrialStartDate - The date and + // time, in UTC and extended ISO 8601 format, when the free trial started for an + // account. // - // * serviceLimit - A Boolean (true or false) value - // that indicates whether an account has reached its monthly quota. + // * serviceLimit - A Boolean (true or false) value that indicates + // whether an account has reached its monthly quota. // - // * total - - // A string that represents the current, estimated month-to-date cost for an - // account. + // * total - A string that + // represents the current, estimated month-to-date cost for an account. Values []*string } diff --git a/service/managedblockchain/types/enums.go b/service/managedblockchain/types/enums.go index 16b017b8c2f..6af1ad46f51 100644 --- a/service/managedblockchain/types/enums.go +++ b/service/managedblockchain/types/enums.go @@ -24,7 +24,7 @@ type Framework string // Enum values for Framework const ( - FrameworkHyperledger_fabric Framework = "HYPERLEDGER_FABRIC" + FrameworkHyperledgerFabric Framework = "HYPERLEDGER_FABRIC" ) // Values returns all known values for Framework. Note that this can be expanded in @@ -64,12 +64,12 @@ type MemberStatus string // Enum values for MemberStatus const ( - MemberStatusCreating MemberStatus = "CREATING" - MemberStatusAvailable MemberStatus = "AVAILABLE" - MemberStatusCreate_failed MemberStatus = "CREATE_FAILED" - MemberStatusUpdating MemberStatus = "UPDATING" - MemberStatusDeleting MemberStatus = "DELETING" - MemberStatusDeleted MemberStatus = "DELETED" + MemberStatusCreating MemberStatus = "CREATING" + MemberStatusAvailable MemberStatus = "AVAILABLE" + MemberStatusCreateFailed MemberStatus = "CREATE_FAILED" + MemberStatusUpdating MemberStatus = "UPDATING" + MemberStatusDeleting MemberStatus = "DELETING" + MemberStatusDeleted MemberStatus = "DELETED" ) // Values returns all known values for MemberStatus. Note that this can be expanded @@ -90,11 +90,11 @@ type NetworkStatus string // Enum values for NetworkStatus const ( - NetworkStatusCreating NetworkStatus = "CREATING" - NetworkStatusAvailable NetworkStatus = "AVAILABLE" - NetworkStatusCreate_failed NetworkStatus = "CREATE_FAILED" - NetworkStatusDeleting NetworkStatus = "DELETING" - NetworkStatusDeleted NetworkStatus = "DELETED" + NetworkStatusCreating NetworkStatus = "CREATING" + NetworkStatusAvailable NetworkStatus = "AVAILABLE" + NetworkStatusCreateFailed NetworkStatus = "CREATE_FAILED" + NetworkStatusDeleting NetworkStatus = "DELETING" + NetworkStatusDeleted NetworkStatus = "DELETED" ) // Values returns all known values for NetworkStatus. Note that this can be @@ -114,13 +114,13 @@ type NodeStatus string // Enum values for NodeStatus const ( - NodeStatusCreating NodeStatus = "CREATING" - NodeStatusAvailable NodeStatus = "AVAILABLE" - NodeStatusCreate_failed NodeStatus = "CREATE_FAILED" - NodeStatusUpdating NodeStatus = "UPDATING" - NodeStatusDeleting NodeStatus = "DELETING" - NodeStatusDeleted NodeStatus = "DELETED" - NodeStatusFailed NodeStatus = "FAILED" + NodeStatusCreating NodeStatus = "CREATING" + NodeStatusAvailable NodeStatus = "AVAILABLE" + NodeStatusCreateFailed NodeStatus = "CREATE_FAILED" + NodeStatusUpdating NodeStatus = "UPDATING" + NodeStatusDeleting NodeStatus = "DELETING" + NodeStatusDeleted NodeStatus = "DELETED" + NodeStatusFailed NodeStatus = "FAILED" ) // Values returns all known values for NodeStatus. Note that this can be expanded @@ -142,11 +142,11 @@ type ProposalStatus string // Enum values for ProposalStatus const ( - ProposalStatusIn_progress ProposalStatus = "IN_PROGRESS" - ProposalStatusApproved ProposalStatus = "APPROVED" - ProposalStatusRejected ProposalStatus = "REJECTED" - ProposalStatusExpired ProposalStatus = "EXPIRED" - ProposalStatusAction_failed ProposalStatus = "ACTION_FAILED" + ProposalStatusInProgress ProposalStatus = "IN_PROGRESS" + ProposalStatusApproved ProposalStatus = "APPROVED" + ProposalStatusRejected ProposalStatus = "REJECTED" + ProposalStatusExpired ProposalStatus = "EXPIRED" + ProposalStatusActionFailed ProposalStatus = "ACTION_FAILED" ) // Values returns all known values for ProposalStatus. Note that this can be @@ -184,8 +184,8 @@ type ThresholdComparator string // Enum values for ThresholdComparator const ( - ThresholdComparatorGreater_than ThresholdComparator = "GREATER_THAN" - ThresholdComparatorGreater_than_or_equal_to ThresholdComparator = "GREATER_THAN_OR_EQUAL_TO" + ThresholdComparatorGreaterThan ThresholdComparator = "GREATER_THAN" + ThresholdComparatorGreaterThanOrEqualTo ThresholdComparator = "GREATER_THAN_OR_EQUAL_TO" ) // Values returns all known values for ThresholdComparator. Note that this can be diff --git a/service/managedblockchain/types/types.go b/service/managedblockchain/types/types.go index b303bbf0670..2707dd6bff1 100644 --- a/service/managedblockchain/types/types.go +++ b/service/managedblockchain/types/types.go @@ -52,21 +52,21 @@ type Invitation struct { // The status of the invitation: // - // * PENDING - The invitee has not created a - // member to join the network, and the invitation has not yet expired. + // * PENDING - The invitee has not created a member + // to join the network, and the invitation has not yet expired. // - // * - // ACCEPTING - The invitee has begun creating a member, and creation has not yet - // completed. + // * ACCEPTING - The + // invitee has begun creating a member, and creation has not yet completed. // - // * ACCEPTED - The invitee created a member and joined the network - // using the InvitationID. + // * + // ACCEPTED - The invitee created a member and joined the network using the + // InvitationID. // - // * REJECTED - The invitee rejected the invitation. + // * REJECTED - The invitee rejected the invitation. // - // - // * EXPIRED - The invitee neither created a member nor rejected the invitation - // before the ExpirationDate. + // * EXPIRED - + // The invitee neither created a member nor rejected the invitation before the + // ExpirationDate. Status InvitationStatus } @@ -121,24 +121,24 @@ type Member struct { // The status of a member. // - // * CREATING - The AWS account is in the process of + // * CREATING - The AWS account is in the process of // creating a member. // - // * AVAILABLE - The member has been created and can + // * AVAILABLE - The member has been created and can // participate in the network. // - // * CREATE_FAILED - The AWS account attempted to + // * CREATE_FAILED - The AWS account attempted to // create a member and creation failed. // - // * DELETING - The member and all - // associated resources are in the process of being deleted. Either the AWS account - // that owns the member deleted it, or the member is being deleted as the result of - // an APPROVEDPROPOSAL to remove the member. + // * DELETING - The member and all associated + // resources are in the process of being deleted. Either the AWS account that owns + // the member deleted it, or the member is being deleted as the result of an + // APPROVEDPROPOSAL to remove the member. // - // * DELETED - The member can no - // longer participate on the network and all associated resources are deleted. - // Either the AWS account that owns the member deleted it, or the member is being - // deleted as the result of an APPROVEDPROPOSAL to remove the member. + // * DELETED - The member can no longer + // participate on the network and all associated resources are deleted. Either the + // AWS account that owns the member deleted it, or the member is being deleted as + // the result of an APPROVEDPROPOSAL to remove the member. Status MemberStatus } @@ -251,24 +251,24 @@ type MemberSummary struct { // The status of the member. // - // * CREATING - The AWS account is in the process of + // * CREATING - The AWS account is in the process of // creating a member. // - // * AVAILABLE - The member has been created and can + // * AVAILABLE - The member has been created and can // participate in the network. // - // * CREATE_FAILED - The AWS account attempted to + // * CREATE_FAILED - The AWS account attempted to // create a member and creation failed. // - // * DELETING - The member and all - // associated resources are in the process of being deleted. Either the AWS account - // that owns the member deleted it, or the member is being deleted as the result of - // an APPROVEDPROPOSAL to remove the member. + // * DELETING - The member and all associated + // resources are in the process of being deleted. Either the AWS account that owns + // the member deleted it, or the member is being deleted as the result of an + // APPROVEDPROPOSAL to remove the member. // - // * DELETED - The member can no - // longer participate on the network and all associated resources are deleted. - // Either the AWS account that owns the member deleted it, or the member is being - // deleted as the result of an APPROVEDPROPOSAL to remove the member. + // * DELETED - The member can no longer + // participate on the network and all associated resources are deleted. Either the + // AWS account that owns the member deleted it, or the member is being deleted as + // the result of an APPROVEDPROPOSAL to remove the member. Status MemberStatus } @@ -541,27 +541,26 @@ type Proposal struct { // The status of the proposal. Values are as follows: // - // * IN_PROGRESS - The - // proposal is active and open for member voting. + // * IN_PROGRESS - The proposal + // is active and open for member voting. // - // * APPROVED - The proposal - // was approved with sufficient YES votes among members according to the - // VotingPolicy specified for the Network. The specified proposal actions are - // carried out. + // * APPROVED - The proposal was approved + // with sufficient YES votes among members according to the VotingPolicy specified + // for the Network. The specified proposal actions are carried out. // - // * REJECTED - The proposal was rejected with insufficient YES - // votes among members according to the VotingPolicy specified for the Network. The - // specified ProposalActions are not carried out. + // * REJECTED - + // The proposal was rejected with insufficient YES votes among members according to + // the VotingPolicy specified for the Network. The specified ProposalActions are + // not carried out. // - // * EXPIRED - Members did not - // cast the number of votes required to determine the proposal outcome before the - // proposal expired. The specified ProposalActions are not carried out. + // * EXPIRED - Members did not cast the number of votes required + // to determine the proposal outcome before the proposal expired. The specified + // ProposalActions are not carried out. // - // * - // ACTION_FAILED - One or more of the specified ProposalActions in a proposal that - // was approved could not be completed because of an error. The ACTION_FAILED - // status occurs even if only one ProposalAction fails and other actions are - // successful. + // * ACTION_FAILED - One or more of the + // specified ProposalActions in a proposal that was approved could not be completed + // because of an error. The ACTION_FAILED status occurs even if only one + // ProposalAction fails and other actions are successful. Status ProposalStatus // The current total of YES votes cast on the proposal by members. @@ -608,25 +607,25 @@ type ProposalSummary struct { // The status of the proposal. Values are as follows: // - // * IN_PROGRESS - The - // proposal is active and open for member voting. + // * IN_PROGRESS - The proposal + // is active and open for member voting. // - // * APPROVED - The proposal - // was approved with sufficient YES votes among members according to the - // VotingPolicy specified for the Network. The specified proposal actions are - // carried out. + // * APPROVED - The proposal was approved + // with sufficient YES votes among members according to the VotingPolicy specified + // for the Network. The specified proposal actions are carried out. // - // * REJECTED - The proposal was rejected with insufficient YES - // votes among members according to the VotingPolicy specified for the Network. The - // specified ProposalActions are not carried out. + // * REJECTED - + // The proposal was rejected with insufficient YES votes among members according to + // the VotingPolicy specified for the Network. The specified ProposalActions are + // not carried out. // - // * EXPIRED - Members did not - // cast the number of votes required to determine the proposal outcome before the - // proposal expired. The specified ProposalActions are not carried out. + // * EXPIRED - Members did not cast the number of votes required + // to determine the proposal outcome before the proposal expired. The specified + // ProposalActions are not carried out. // - // * - // ACTION_FAILED - One or more of the specified ProposalActions in a proposal that - // was approved could not be completed because of an error. + // * ACTION_FAILED - One or more of the + // specified ProposalActions in a proposal that was approved could not be completed + // because of an error. Status ProposalStatus } diff --git a/service/marketplacecatalog/types/types.go b/service/marketplacecatalog/types/types.go index b255bbbf25a..ec0afafd722 100644 --- a/service/marketplacecatalog/types/types.go +++ b/service/marketplacecatalog/types/types.go @@ -141,29 +141,29 @@ type Filter struct { // ListEntities - This is a list of unique EntityIds. ListChangeSets - The // supported filter names and associated ValueLists is as follows: // - // * - // ChangeSetName - The supported ValueList is a list of non-unique ChangeSetNames. - // These are defined when you call the StartChangeSet action. + // * ChangeSetName + // - The supported ValueList is a list of non-unique ChangeSetNames. These are + // defined when you call the StartChangeSet action. // - // * Status - The - // supported ValueList is a list of statuses for all change set requests. + // * Status - The supported + // ValueList is a list of statuses for all change set requests. // - // * - // EntityId - The supported ValueList is a list of unique EntityIds. + // * EntityId - The + // supported ValueList is a list of unique EntityIds. // - // * - // BeforeStartTime - The supported ValueList is a list of all change sets that - // started before the filter value. + // * BeforeStartTime - The + // supported ValueList is a list of all change sets that started before the filter + // value. // - // * AfterStartTime - The supported ValueList - // is a list of all change sets that started after the filter value. + // * AfterStartTime - The supported ValueList is a list of all change sets + // that started after the filter value. // - // * - // BeforeEndTime - The supported ValueList is a list of all change sets that ended - // before the filter value. + // * BeforeEndTime - The supported ValueList + // is a list of all change sets that ended before the filter value. // - // * AfterEndTime - The supported ValueList is a list - // of all change sets that ended after the filter value. + // * AfterEndTime + // - The supported ValueList is a list of all change sets that ended after the + // filter value. ValueList []*string } diff --git a/service/marketplacecommerceanalytics/api_op_GenerateDataSet.go b/service/marketplacecommerceanalytics/api_op_GenerateDataSet.go index 80386b6ece4..61915cfa276 100644 --- a/service/marketplacecommerceanalytics/api_op_GenerateDataSet.go +++ b/service/marketplacecommerceanalytics/api_op_GenerateDataSet.go @@ -53,82 +53,81 @@ type GenerateDataSetInput struct { // The desired data set type. // - // * - // customer_subscriber_hourly_monthly_subscriptions From 2017-09-15 to present: - // Available daily by 24:00 UTC. - // - // * customer_subscriber_annual_subscriptions + // * customer_subscriber_hourly_monthly_subscriptions // From 2017-09-15 to present: Available daily by 24:00 UTC. // - // * - // daily_business_usage_by_instance_type From 2017-09-15 to present: Available + // * + // customer_subscriber_annual_subscriptions From 2017-09-15 to present: Available // daily by 24:00 UTC. // - // * daily_business_fees From 2017-09-15 to present: - // Available daily by 24:00 UTC. + // * daily_business_usage_by_instance_type From 2017-09-15 to + // present: Available daily by 24:00 UTC. // - // * daily_business_free_trial_conversions From - // 2017-09-15 to present: Available daily by 24:00 UTC. + // * daily_business_fees From 2017-09-15 to + // present: Available daily by 24:00 UTC. // - // * + // * daily_business_free_trial_conversions + // From 2017-09-15 to present: Available daily by 24:00 UTC. + // + // * // daily_business_new_instances From 2017-09-15 to present: Available daily by // 24:00 UTC. // - // * daily_business_new_product_subscribers From 2017-09-15 to - // present: Available daily by 24:00 UTC. - // - // * - // daily_business_canceled_product_subscribers From 2017-09-15 to present: + // * daily_business_new_product_subscribers From 2017-09-15 to present: // Available daily by 24:00 UTC. // - // * monthly_revenue_billing_and_revenue_data - // From 2017-09-15 to present: Available monthly on the 15th day of the month by - // 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month - // prior. + // * daily_business_canceled_product_subscribers + // From 2017-09-15 to present: Available daily by 24:00 UTC. // - // * monthly_revenue_annual_subscriptions From 2017-09-15 to present: - // Available monthly on the 15th day of the month by 24:00 UTC. Data includes - // up-front software charges (e.g. annual) from one month prior. + // * + // monthly_revenue_billing_and_revenue_data From 2017-09-15 to present: Available + // monthly on the 15th day of the month by 24:00 UTC. Data includes metered + // transactions (e.g. hourly) from one month prior. // - // * + // * + // monthly_revenue_annual_subscriptions From 2017-09-15 to present: Available + // monthly on the 15th day of the month by 24:00 UTC. Data includes up-front + // software charges (e.g. annual) from one month prior. + // + // * // monthly_revenue_field_demonstration_usage From 2018-03-15 to present: Available // monthly on the 15th day of the month by 24:00 UTC. // - // * + // * // monthly_revenue_flexible_payment_schedule From 2018-11-15 to present: Available // monthly on the 15th day of the month by 24:00 UTC. // - // * + // * // disbursed_amount_by_product From 2017-09-15 to present: Available every 30 days // by 24:00 UTC. // - // * disbursed_amount_by_instance_hours From 2017-09-15 to - // present: Available every 30 days by 24:00 UTC. - // - // * - // disbursed_amount_by_customer_geo From 2017-09-15 to present: Available every 30 - // days by 24:00 UTC. + // * disbursed_amount_by_instance_hours From 2017-09-15 to present: + // Available every 30 days by 24:00 UTC. // - // * disbursed_amount_by_age_of_uncollected_funds From + // * disbursed_amount_by_customer_geo From // 2017-09-15 to present: Available every 30 days by 24:00 UTC. // - // * + // * + // disbursed_amount_by_age_of_uncollected_funds From 2017-09-15 to present: + // Available every 30 days by 24:00 UTC. + // + // * // disbursed_amount_by_age_of_disbursed_funds From 2017-09-15 to present: Available // every 30 days by 24:00 UTC. // - // * disbursed_amount_by_age_of_past_due_funds - // From 2018-04-07 to present: Available every 30 days by 24:00 UTC. + // * disbursed_amount_by_age_of_past_due_funds From + // 2018-04-07 to present: Available every 30 days by 24:00 UTC. // - // * + // * // disbursed_amount_by_uncollected_funds_breakdown From 2019-10-04 to present: // Available every 30 days by 24:00 UTC. // - // * sales_compensation_billed_revenue - // From 2017-09-15 to present: Available monthly on the 15th day of the month by - // 24:00 UTC. Data includes metered transactions (e.g. hourly) from one month - // prior, and up-front software charges (e.g. annual) from one month prior. + // * sales_compensation_billed_revenue From + // 2017-09-15 to present: Available monthly on the 15th day of the month by 24:00 + // UTC. Data includes metered transactions (e.g. hourly) from one month prior, and + // up-front software charges (e.g. annual) from one month prior. // - // * + // * // us_sales_and_use_tax_records From 2017-09-15 to present: Available monthly on // the 15th day of the month by 24:00 UTC. // diff --git a/service/marketplacecommerceanalytics/api_op_StartSupportDataExport.go b/service/marketplacecommerceanalytics/api_op_StartSupportDataExport.go index 0f5c2bd6eeb..d503ed8ae0f 100644 --- a/service/marketplacecommerceanalytics/api_op_StartSupportDataExport.go +++ b/service/marketplacecommerceanalytics/api_op_StartSupportDataExport.go @@ -48,12 +48,12 @@ type StartSupportDataExportInput struct { // AWS Account Id, Given Name, Surname, Telephone Number, Email, Title, Country // Code, ZIP Code, Operation Type, and Operation Time. // - // * + // * // customer_support_contacts_data Customer support contact data. The data set will // contain all changes (Creates, Updates, and Deletes) to customer support contact // data from the date specified in the from_date parameter. // - // * + // * // test_customer_support_contacts_data An example data set containing static test // data in the same format as customer_support_contacts_data // diff --git a/service/marketplacecommerceanalytics/types/enums.go b/service/marketplacecommerceanalytics/types/enums.go index 0e08c1592a0..1ef88b09c15 100644 --- a/service/marketplacecommerceanalytics/types/enums.go +++ b/service/marketplacecommerceanalytics/types/enums.go @@ -6,31 +6,31 @@ type DataSetType string // Enum values for DataSetType const ( - DataSetTypeCustomer_subscriber_hourly_monthly_subscriptions DataSetType = "customer_subscriber_hourly_monthly_subscriptions" - DataSetTypeCustomer_subscriber_annual_subscriptions DataSetType = "customer_subscriber_annual_subscriptions" - DataSetTypeDaily_business_usage_by_instance_type DataSetType = "daily_business_usage_by_instance_type" - DataSetTypeDaily_business_fees DataSetType = "daily_business_fees" - DataSetTypeDaily_business_free_trial_conversions DataSetType = "daily_business_free_trial_conversions" - DataSetTypeDaily_business_new_instances DataSetType = "daily_business_new_instances" - DataSetTypeDaily_business_new_product_subscribers DataSetType = "daily_business_new_product_subscribers" - DataSetTypeDaily_business_canceled_product_subscribers DataSetType = "daily_business_canceled_product_subscribers" - DataSetTypeMonthly_revenue_billing_and_revenue_data DataSetType = "monthly_revenue_billing_and_revenue_data" - DataSetTypeMonthly_revenue_annual_subscriptions DataSetType = "monthly_revenue_annual_subscriptions" - DataSetTypeMonthly_revenue_field_demonstration_usage DataSetType = "monthly_revenue_field_demonstration_usage" - DataSetTypeMonthly_revenue_flexible_payment_schedule DataSetType = "monthly_revenue_flexible_payment_schedule" - DataSetTypeDisbursed_amount_by_product DataSetType = "disbursed_amount_by_product" - DataSetTypeDisbursed_amount_by_product_with_uncollected_funds DataSetType = "disbursed_amount_by_product_with_uncollected_funds" - DataSetTypeDisbursed_amount_by_instance_hours DataSetType = "disbursed_amount_by_instance_hours" - DataSetTypeDisbursed_amount_by_customer_geo DataSetType = "disbursed_amount_by_customer_geo" - DataSetTypeDisbursed_amount_by_age_of_uncollected_funds DataSetType = "disbursed_amount_by_age_of_uncollected_funds" - DataSetTypeDisbursed_amount_by_age_of_disbursed_funds DataSetType = "disbursed_amount_by_age_of_disbursed_funds" - DataSetTypeDisbursed_amount_by_age_of_past_due_funds DataSetType = "disbursed_amount_by_age_of_past_due_funds" - DataSetTypeDisbursed_amount_by_uncollected_funds_breakdown DataSetType = "disbursed_amount_by_uncollected_funds_breakdown" - DataSetTypeCustomer_profile_by_industry DataSetType = "customer_profile_by_industry" - DataSetTypeCustomer_profile_by_revenue DataSetType = "customer_profile_by_revenue" - DataSetTypeCustomer_profile_by_geography DataSetType = "customer_profile_by_geography" - DataSetTypeSales_compensation_billed_revenue DataSetType = "sales_compensation_billed_revenue" - DataSetTypeUs_sales_and_use_tax_records DataSetType = "us_sales_and_use_tax_records" + DataSetTypeCustomerSubscriberHourlyMonthlySubscriptions DataSetType = "customer_subscriber_hourly_monthly_subscriptions" + DataSetTypeCustomerSubscriberAnnualSubscriptions DataSetType = "customer_subscriber_annual_subscriptions" + DataSetTypeDailyBusinessUsageByInstanceType DataSetType = "daily_business_usage_by_instance_type" + DataSetTypeDailyBusinessFees DataSetType = "daily_business_fees" + DataSetTypeDailyBusinessFreeTrialConversions DataSetType = "daily_business_free_trial_conversions" + DataSetTypeDailyBusinessNewInstances DataSetType = "daily_business_new_instances" + DataSetTypeDailyBusinessNewProductSubscribers DataSetType = "daily_business_new_product_subscribers" + DataSetTypeDailyBusinessCanceledProductSubscribers DataSetType = "daily_business_canceled_product_subscribers" + DataSetTypeMonthlyRevenueBillingAndRevenueData DataSetType = "monthly_revenue_billing_and_revenue_data" + DataSetTypeMonthlyRevenueAnnualSubscriptions DataSetType = "monthly_revenue_annual_subscriptions" + DataSetTypeMonthlyRevenueFieldDemonstrationUsage DataSetType = "monthly_revenue_field_demonstration_usage" + DataSetTypeMonthlyRevenueFlexiblePaymentSchedule DataSetType = "monthly_revenue_flexible_payment_schedule" + DataSetTypeDisbursedAmountByProduct DataSetType = "disbursed_amount_by_product" + DataSetTypeDisbursedAmountByProductWithUncollectedFunds DataSetType = "disbursed_amount_by_product_with_uncollected_funds" + DataSetTypeDisbursedAmountByInstanceHours DataSetType = "disbursed_amount_by_instance_hours" + DataSetTypeDisbursedAmountByCustomerGeo DataSetType = "disbursed_amount_by_customer_geo" + DataSetTypeDisbursedAmountByAgeOfUncollectedFunds DataSetType = "disbursed_amount_by_age_of_uncollected_funds" + DataSetTypeDisbursedAmountByAgeOfDisbursedFunds DataSetType = "disbursed_amount_by_age_of_disbursed_funds" + DataSetTypeDisbursedAmountByAgeOfPastDueFunds DataSetType = "disbursed_amount_by_age_of_past_due_funds" + DataSetTypeDisbursedAmountByUncollectedFundsBreakdown DataSetType = "disbursed_amount_by_uncollected_funds_breakdown" + DataSetTypeCustomerProfileByIndustry DataSetType = "customer_profile_by_industry" + DataSetTypeCustomerProfileByRevenue DataSetType = "customer_profile_by_revenue" + DataSetTypeCustomerProfileByGeography DataSetType = "customer_profile_by_geography" + DataSetTypeSalesCompensationBilledRevenue DataSetType = "sales_compensation_billed_revenue" + DataSetTypeUsSalesAndUseTaxRecords DataSetType = "us_sales_and_use_tax_records" ) // Values returns all known values for DataSetType. Note that this can be expanded @@ -70,8 +70,8 @@ type SupportDataSetType string // Enum values for SupportDataSetType const ( - SupportDataSetTypeCustomer_support_contacts_data SupportDataSetType = "customer_support_contacts_data" - SupportDataSetTypeTest_customer_support_contacts_data SupportDataSetType = "test_customer_support_contacts_data" + SupportDataSetTypeCustomerSupportContactsData SupportDataSetType = "customer_support_contacts_data" + SupportDataSetTypeTestCustomerSupportContactsData SupportDataSetType = "test_customer_support_contacts_data" ) // Values returns all known values for SupportDataSetType. Note that this can be diff --git a/service/marketplaceentitlementservice/doc.go b/service/marketplaceentitlementservice/doc.go index 2fc81b4e908..881d3280e07 100644 --- a/service/marketplaceentitlementservice/doc.go +++ b/service/marketplaceentitlementservice/doc.go @@ -11,5 +11,5 @@ // some amount of data capacity in a multi-tenant database. Getting Entitlement // Records // -// * GetEntitlements- Gets the entitlements for a Marketplace product. +// * GetEntitlements- Gets the entitlements for a Marketplace product. package marketplaceentitlementservice diff --git a/service/marketplaceentitlementservice/types/enums.go b/service/marketplaceentitlementservice/types/enums.go index 3a614754573..4746028113c 100644 --- a/service/marketplaceentitlementservice/types/enums.go +++ b/service/marketplaceentitlementservice/types/enums.go @@ -6,8 +6,8 @@ type GetEntitlementFilterName string // Enum values for GetEntitlementFilterName const ( - GetEntitlementFilterNameCustomer_identifier GetEntitlementFilterName = "CUSTOMER_IDENTIFIER" - GetEntitlementFilterNameDimension GetEntitlementFilterName = "DIMENSION" + GetEntitlementFilterNameCustomerIdentifier GetEntitlementFilterName = "CUSTOMER_IDENTIFIER" + GetEntitlementFilterNameDimension GetEntitlementFilterName = "DIMENSION" ) // Values returns all known values for GetEntitlementFilterName. Note that this can diff --git a/service/marketplacemetering/api_op_RegisterUsage.go b/service/marketplacemetering/api_op_RegisterUsage.go index 4abdcd43517..36cf20bdf7b 100644 --- a/service/marketplacemetering/api_op_RegisterUsage.go +++ b/service/marketplacemetering/api_op_RegisterUsage.go @@ -19,8 +19,8 @@ import ( // explain the behavior of RegisterUsage. RegisterUsage performs two primary // functions: metering and entitlement. // -// * Entitlement: RegisterUsage allows -// you to verify that the customer running your paid software is subscribed to your +// * Entitlement: RegisterUsage allows you to +// verify that the customer running your paid software is subscribed to your // product on AWS Marketplace, enabling you to guard against unauthorized use. Your // container image that integrates with RegisterUsage is only required to guard // against unauthorized use at container startup, as such a @@ -30,19 +30,19 @@ import ( // CustomerNotSubscribedException, even if the customer unsubscribes while the // Amazon ECS task or Amazon EKS pod is still running. // -// * Metering: -// RegisterUsage meters software use per ECS task, per hour, or per pod for Amazon -// EKS with usage prorated to the second. A minimum of 1 minute of usage applies to -// tasks that are short lived. For example, if a customer has a 10 node Amazon ECS -// or Amazon EKS cluster and a service configured as a Daemon Set, then Amazon ECS -// or Amazon EKS will launch a task on all 10 cluster nodes and the customer will -// be charged: (10 * hourly_rate). Metering for software use is automatically -// handled by the AWS Marketplace Metering Control Plane -- your software is not -// required to perform any metering specific actions, other than call RegisterUsage -// once for metering of software use to commence. The AWS Marketplace Metering -// Control Plane will also continue to bill customers for running ECS tasks and -// Amazon EKS pods, regardless of the customers subscription state, removing the -// need for your software to perform entitlement checks at runtime. +// * Metering: RegisterUsage +// meters software use per ECS task, per hour, or per pod for Amazon EKS with usage +// prorated to the second. A minimum of 1 minute of usage applies to tasks that are +// short lived. For example, if a customer has a 10 node Amazon ECS or Amazon EKS +// cluster and a service configured as a Daemon Set, then Amazon ECS or Amazon EKS +// will launch a task on all 10 cluster nodes and the customer will be charged: (10 +// * hourly_rate). Metering for software use is automatically handled by the AWS +// Marketplace Metering Control Plane -- your software is not required to perform +// any metering specific actions, other than call RegisterUsage once for metering +// of software use to commence. The AWS Marketplace Metering Control Plane will +// also continue to bill customers for running ECS tasks and Amazon EKS pods, +// regardless of the customers subscription state, removing the need for your +// software to perform entitlement checks at runtime. func (c *Client) RegisterUsage(ctx context.Context, params *RegisterUsageInput, optFns ...func(*Options)) (*RegisterUsageOutput, error) { if params == nil { params = &RegisterUsageInput{} diff --git a/service/marketplacemetering/doc.go b/service/marketplacemetering/doc.go index e1c2e0d049e..af3f729db44 100644 --- a/service/marketplacemetering/doc.go +++ b/service/marketplacemetering/doc.go @@ -11,32 +11,32 @@ // (https://docs.aws.amazon.com/marketplace/latest/userguide/iam-user-policy-for-aws-marketplace-actions.html) // in the AWS Marketplace Seller Guide. Submitting Metering Records // -// * -// MeterUsage- Submits the metering record for a Marketplace product. MeterUsage is -// called from an EC2 instance or a container running on EKS or ECS. +// * MeterUsage- +// Submits the metering record for a Marketplace product. MeterUsage is called from +// an EC2 instance or a container running on EKS or ECS. // -// * -// BatchMeterUsage- Submits the metering record for a set of customers. -// BatchMeterUsage is called from a software-as-a-service (SaaS) -// application. +// * BatchMeterUsage- +// Submits the metering record for a set of customers. BatchMeterUsage is called +// from a software-as-a-service (SaaS) application. // // Accepting New Customers // -// * ResolveCustomer- Called by a SaaS -// application during the registration process. When a buyer visits your website -// during the registration process, the buyer submits a Registration Token through -// the browser. The Registration Token is resolved through this API to obtain a -// CustomerIdentifier and Product Code. -// -// Entitlement and Metering for Paid -// Container Products -// -// * Paid container software products sold through AWS -// Marketplace must integrate with the AWS Marketplace Metering Service and call -// the RegisterUsage operation for software entitlement and metering. Free and BYOL -// products for Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but -// you can do so if you want to receive usage data in your seller reports. For more -// information on using the RegisterUsage operation, see Container-Based Products +// * +// ResolveCustomer- Called by a SaaS application during the registration process. +// When a buyer visits your website during the registration process, the buyer +// submits a Registration Token through the browser. The Registration Token is +// resolved through this API to obtain a CustomerIdentifier and Product +// Code. +// +// Entitlement and Metering for Paid Container Products +// +// * Paid container +// software products sold through AWS Marketplace must integrate with the AWS +// Marketplace Metering Service and call the RegisterUsage operation for software +// entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS +// aren't required to call RegisterUsage, but you can do so if you want to receive +// usage data in your seller reports. For more information on using the +// RegisterUsage operation, see Container-Based Products // (https://docs.aws.amazon.com/marketplace/latest/userguide/container-based-products.html). // // BatchMeterUsage diff --git a/service/marketplacemetering/types/enums.go b/service/marketplacemetering/types/enums.go index 9cfefcfd311..592309b530d 100644 --- a/service/marketplacemetering/types/enums.go +++ b/service/marketplacemetering/types/enums.go @@ -6,9 +6,9 @@ type UsageRecordResultStatus string // Enum values for UsageRecordResultStatus const ( - UsageRecordResultStatusSuccess UsageRecordResultStatus = "Success" - UsageRecordResultStatusCustomer_not_subscribed UsageRecordResultStatus = "CustomerNotSubscribed" - UsageRecordResultStatusDuplicate_record UsageRecordResultStatus = "DuplicateRecord" + UsageRecordResultStatusSuccess UsageRecordResultStatus = "Success" + UsageRecordResultStatusCustomerNotSubscribed UsageRecordResultStatus = "CustomerNotSubscribed" + UsageRecordResultStatusDuplicateRecord UsageRecordResultStatus = "DuplicateRecord" ) // Values returns all known values for UsageRecordResultStatus. Note that this can diff --git a/service/marketplacemetering/types/types.go b/service/marketplacemetering/types/types.go index d5b701ffb71..5be12c51877 100644 --- a/service/marketplacemetering/types/types.go +++ b/service/marketplacemetering/types/types.go @@ -46,17 +46,17 @@ type UsageRecordResult struct { // The UsageRecordResult Status indicates the status of an individual UsageRecord // processed by BatchMeterUsage. // - // * Success- The UsageRecord was accepted and + // * Success- The UsageRecord was accepted and // honored by BatchMeterUsage. // - // * CustomerNotSubscribed- The CustomerIdentifier + // * CustomerNotSubscribed- The CustomerIdentifier // specified is not subscribed to your product. The UsageRecord was not honored. // Future UsageRecords for this customer will fail until the customer subscribes to // your product. // - // * DuplicateRecord- Indicates that the UsageRecord was invalid - // and not honored. A previously metered UsageRecord had the same customer, - // dimension, and time, but a different quantity. + // * DuplicateRecord- Indicates that the UsageRecord was invalid and + // not honored. A previously metered UsageRecord had the same customer, dimension, + // and time, but a different quantity. Status UsageRecordResultStatus // The UsageRecord that was part of the BatchMeterUsage request. diff --git a/service/mediaconnect/types/enums.go b/service/mediaconnect/types/enums.go index 1f2bca1ea99..f67869b381c 100644 --- a/service/mediaconnect/types/enums.go +++ b/service/mediaconnect/types/enums.go @@ -60,8 +60,8 @@ type KeyType string // Enum values for KeyType const ( - KeyTypeSpeke KeyType = "speke" - KeyTypeStatic_key KeyType = "static-key" + KeyTypeSpeke KeyType = "speke" + KeyTypeStaticKey KeyType = "static-key" ) // Values returns all known values for KeyType. Note that this can be expanded in @@ -94,11 +94,11 @@ type Protocol string // Enum values for Protocol const ( - ProtocolZixi_push Protocol = "zixi-push" - ProtocolRtp_fec Protocol = "rtp-fec" - ProtocolRtp Protocol = "rtp" - ProtocolZixi_pull Protocol = "zixi-pull" - ProtocolRist Protocol = "rist" + ProtocolZixiPush Protocol = "zixi-push" + ProtocolRtpFec Protocol = "rtp-fec" + ProtocolRtp Protocol = "rtp" + ProtocolZixiPull Protocol = "zixi-pull" + ProtocolRist Protocol = "rist" ) // Values returns all known values for Protocol. Note that this can be expanded in @@ -140,7 +140,7 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeMbps_outbound_bandwidth ResourceType = "Mbps_Outbound_Bandwidth" + ResourceTypeMbpsOutboundBandwidth ResourceType = "Mbps_Outbound_Bandwidth" ) // Values returns all known values for ResourceType. Note that this can be expanded diff --git a/service/mediaconvert/types/enums.go b/service/mediaconvert/types/enums.go index 32e59d491e1..8b0fd6c205e 100644 --- a/service/mediaconvert/types/enums.go +++ b/service/mediaconvert/types/enums.go @@ -6,8 +6,8 @@ type AacAudioDescriptionBroadcasterMix string // Enum values for AacAudioDescriptionBroadcasterMix const ( - AacAudioDescriptionBroadcasterMixBroadcaster_mixed_ad AacAudioDescriptionBroadcasterMix = "BROADCASTER_MIXED_AD" - AacAudioDescriptionBroadcasterMixNormal AacAudioDescriptionBroadcasterMix = "NORMAL" + AacAudioDescriptionBroadcasterMixBroadcasterMixedAd AacAudioDescriptionBroadcasterMix = "BROADCASTER_MIXED_AD" + AacAudioDescriptionBroadcasterMixNormal AacAudioDescriptionBroadcasterMix = "NORMAL" ) // Values returns all known values for AacAudioDescriptionBroadcasterMix. Note that @@ -45,11 +45,11 @@ type AacCodingMode string // Enum values for AacCodingMode const ( - AacCodingModeAd_receiver_mix AacCodingMode = "AD_RECEIVER_MIX" - AacCodingModeCoding_mode_1_0 AacCodingMode = "CODING_MODE_1_0" - AacCodingModeCoding_mode_1_1 AacCodingMode = "CODING_MODE_1_1" - AacCodingModeCoding_mode_2_0 AacCodingMode = "CODING_MODE_2_0" - AacCodingModeCoding_mode_5_1 AacCodingMode = "CODING_MODE_5_1" + AacCodingModeAdReceiverMix AacCodingMode = "AD_RECEIVER_MIX" + AacCodingModeCodingMode10 AacCodingMode = "CODING_MODE_1_0" + AacCodingModeCodingMode11 AacCodingMode = "CODING_MODE_1_1" + AacCodingModeCodingMode20 AacCodingMode = "CODING_MODE_2_0" + AacCodingModeCodingMode51 AacCodingMode = "CODING_MODE_5_1" ) // Values returns all known values for AacCodingMode. Note that this can be @@ -87,8 +87,8 @@ type AacRawFormat string // Enum values for AacRawFormat const ( - AacRawFormatLatm_loas AacRawFormat = "LATM_LOAS" - AacRawFormatNone AacRawFormat = "NONE" + AacRawFormatLatmLoas AacRawFormat = "LATM_LOAS" + AacRawFormatNone AacRawFormat = "NONE" ) // Values returns all known values for AacRawFormat. Note that this can be expanded @@ -123,10 +123,10 @@ type AacVbrQuality string // Enum values for AacVbrQuality const ( - AacVbrQualityLow AacVbrQuality = "LOW" - AacVbrQualityMedium_low AacVbrQuality = "MEDIUM_LOW" - AacVbrQualityMedium_high AacVbrQuality = "MEDIUM_HIGH" - AacVbrQualityHigh AacVbrQuality = "HIGH" + AacVbrQualityLow AacVbrQuality = "LOW" + AacVbrQualityMediumLow AacVbrQuality = "MEDIUM_LOW" + AacVbrQualityMediumHigh AacVbrQuality = "MEDIUM_HIGH" + AacVbrQualityHigh AacVbrQuality = "HIGH" ) // Values returns all known values for AacVbrQuality. Note that this can be @@ -145,14 +145,14 @@ type Ac3BitstreamMode string // Enum values for Ac3BitstreamMode const ( - Ac3BitstreamModeComplete_main Ac3BitstreamMode = "COMPLETE_MAIN" - Ac3BitstreamModeCommentary Ac3BitstreamMode = "COMMENTARY" - Ac3BitstreamModeDialogue Ac3BitstreamMode = "DIALOGUE" - Ac3BitstreamModeEmergency Ac3BitstreamMode = "EMERGENCY" - Ac3BitstreamModeHearing_impaired Ac3BitstreamMode = "HEARING_IMPAIRED" - Ac3BitstreamModeMusic_and_effects Ac3BitstreamMode = "MUSIC_AND_EFFECTS" - Ac3BitstreamModeVisually_impaired Ac3BitstreamMode = "VISUALLY_IMPAIRED" - Ac3BitstreamModeVoice_over Ac3BitstreamMode = "VOICE_OVER" + Ac3BitstreamModeCompleteMain Ac3BitstreamMode = "COMPLETE_MAIN" + Ac3BitstreamModeCommentary Ac3BitstreamMode = "COMMENTARY" + Ac3BitstreamModeDialogue Ac3BitstreamMode = "DIALOGUE" + Ac3BitstreamModeEmergency Ac3BitstreamMode = "EMERGENCY" + Ac3BitstreamModeHearingImpaired Ac3BitstreamMode = "HEARING_IMPAIRED" + Ac3BitstreamModeMusicAndEffects Ac3BitstreamMode = "MUSIC_AND_EFFECTS" + Ac3BitstreamModeVisuallyImpaired Ac3BitstreamMode = "VISUALLY_IMPAIRED" + Ac3BitstreamModeVoiceOver Ac3BitstreamMode = "VOICE_OVER" ) // Values returns all known values for Ac3BitstreamMode. Note that this can be @@ -175,10 +175,10 @@ type Ac3CodingMode string // Enum values for Ac3CodingMode const ( - Ac3CodingModeCoding_mode_1_0 Ac3CodingMode = "CODING_MODE_1_0" - Ac3CodingModeCoding_mode_1_1 Ac3CodingMode = "CODING_MODE_1_1" - Ac3CodingModeCoding_mode_2_0 Ac3CodingMode = "CODING_MODE_2_0" - Ac3CodingModeCoding_mode_3_2_lfe Ac3CodingMode = "CODING_MODE_3_2_LFE" + Ac3CodingModeCodingMode10 Ac3CodingMode = "CODING_MODE_1_0" + Ac3CodingModeCodingMode11 Ac3CodingMode = "CODING_MODE_1_1" + Ac3CodingModeCodingMode20 Ac3CodingMode = "CODING_MODE_2_0" + Ac3CodingModeCodingMode32Lfe Ac3CodingMode = "CODING_MODE_3_2_LFE" ) // Values returns all known values for Ac3CodingMode. Note that this can be @@ -197,8 +197,8 @@ type Ac3DynamicRangeCompressionProfile string // Enum values for Ac3DynamicRangeCompressionProfile const ( - Ac3DynamicRangeCompressionProfileFilm_standard Ac3DynamicRangeCompressionProfile = "FILM_STANDARD" - Ac3DynamicRangeCompressionProfileNone Ac3DynamicRangeCompressionProfile = "NONE" + Ac3DynamicRangeCompressionProfileFilmStandard Ac3DynamicRangeCompressionProfile = "FILM_STANDARD" + Ac3DynamicRangeCompressionProfileNone Ac3DynamicRangeCompressionProfile = "NONE" ) // Values returns all known values for Ac3DynamicRangeCompressionProfile. Note that @@ -234,8 +234,8 @@ type Ac3MetadataControl string // Enum values for Ac3MetadataControl const ( - Ac3MetadataControlFollow_input Ac3MetadataControl = "FOLLOW_INPUT" - Ac3MetadataControlUse_configured Ac3MetadataControl = "USE_CONFIGURED" + Ac3MetadataControlFollowInput Ac3MetadataControl = "FOLLOW_INPUT" + Ac3MetadataControlUseConfigured Ac3MetadataControl = "USE_CONFIGURED" ) // Values returns all known values for Ac3MetadataControl. Note that this can be @@ -272,10 +272,10 @@ type AccelerationStatus string // Enum values for AccelerationStatus const ( - AccelerationStatusNot_applicable AccelerationStatus = "NOT_APPLICABLE" - AccelerationStatusIn_progress AccelerationStatus = "IN_PROGRESS" - AccelerationStatusAccelerated AccelerationStatus = "ACCELERATED" - AccelerationStatusNot_accelerated AccelerationStatus = "NOT_ACCELERATED" + AccelerationStatusNotApplicable AccelerationStatus = "NOT_APPLICABLE" + AccelerationStatusInProgress AccelerationStatus = "IN_PROGRESS" + AccelerationStatusAccelerated AccelerationStatus = "ACCELERATED" + AccelerationStatusNotAccelerated AccelerationStatus = "NOT_ACCELERATED" ) // Values returns all known values for AccelerationStatus. Note that this can be @@ -314,8 +314,8 @@ type AlphaBehavior string // Enum values for AlphaBehavior const ( - AlphaBehaviorDiscard AlphaBehavior = "DISCARD" - AlphaBehaviorRemap_to_luma AlphaBehavior = "REMAP_TO_LUMA" + AlphaBehaviorDiscard AlphaBehavior = "DISCARD" + AlphaBehaviorRemapToLuma AlphaBehavior = "REMAP_TO_LUMA" ) // Values returns all known values for AlphaBehavior. Note that this can be @@ -350,8 +350,8 @@ type AncillaryTerminateCaptions string // Enum values for AncillaryTerminateCaptions const ( - AncillaryTerminateCaptionsEnd_of_input AncillaryTerminateCaptions = "END_OF_INPUT" - AncillaryTerminateCaptionsDisabled AncillaryTerminateCaptions = "DISABLED" + AncillaryTerminateCaptionsEndOfInput AncillaryTerminateCaptions = "END_OF_INPUT" + AncillaryTerminateCaptionsDisabled AncillaryTerminateCaptions = "DISABLED" ) // Values returns all known values for AncillaryTerminateCaptions. Note that this @@ -437,7 +437,7 @@ const ( AudioCodecAiff AudioCodec = "AIFF" AudioCodecAc3 AudioCodec = "AC3" AudioCodecEac3 AudioCodec = "EAC3" - AudioCodecEac3_atmos AudioCodec = "EAC3_ATMOS" + AudioCodecEac3Atmos AudioCodec = "EAC3_ATMOS" AudioCodecVorbis AudioCodec = "VORBIS" AudioCodecOpus AudioCodec = "OPUS" AudioCodecPassthrough AudioCodec = "PASSTHROUGH" @@ -466,8 +466,8 @@ type AudioDefaultSelection string // Enum values for AudioDefaultSelection const ( - AudioDefaultSelectionDefault AudioDefaultSelection = "DEFAULT" - AudioDefaultSelectionNot_default AudioDefaultSelection = "NOT_DEFAULT" + AudioDefaultSelectionDefault AudioDefaultSelection = "DEFAULT" + AudioDefaultSelectionNotDefault AudioDefaultSelection = "NOT_DEFAULT" ) // Values returns all known values for AudioDefaultSelection. Note that this can be @@ -484,8 +484,8 @@ type AudioLanguageCodeControl string // Enum values for AudioLanguageCodeControl const ( - AudioLanguageCodeControlFollow_input AudioLanguageCodeControl = "FOLLOW_INPUT" - AudioLanguageCodeControlUse_configured AudioLanguageCodeControl = "USE_CONFIGURED" + AudioLanguageCodeControlFollowInput AudioLanguageCodeControl = "FOLLOW_INPUT" + AudioLanguageCodeControlUseConfigured AudioLanguageCodeControl = "USE_CONFIGURED" ) // Values returns all known values for AudioLanguageCodeControl. Note that this can @@ -502,10 +502,10 @@ type AudioNormalizationAlgorithm string // Enum values for AudioNormalizationAlgorithm const ( - AudioNormalizationAlgorithmItu_bs_1770_1 AudioNormalizationAlgorithm = "ITU_BS_1770_1" - AudioNormalizationAlgorithmItu_bs_1770_2 AudioNormalizationAlgorithm = "ITU_BS_1770_2" - AudioNormalizationAlgorithmItu_bs_1770_3 AudioNormalizationAlgorithm = "ITU_BS_1770_3" - AudioNormalizationAlgorithmItu_bs_1770_4 AudioNormalizationAlgorithm = "ITU_BS_1770_4" + AudioNormalizationAlgorithmItuBs17701 AudioNormalizationAlgorithm = "ITU_BS_1770_1" + AudioNormalizationAlgorithmItuBs17702 AudioNormalizationAlgorithm = "ITU_BS_1770_2" + AudioNormalizationAlgorithmItuBs17703 AudioNormalizationAlgorithm = "ITU_BS_1770_3" + AudioNormalizationAlgorithmItuBs17704 AudioNormalizationAlgorithm = "ITU_BS_1770_4" ) // Values returns all known values for AudioNormalizationAlgorithm. Note that this @@ -524,8 +524,8 @@ type AudioNormalizationAlgorithmControl string // Enum values for AudioNormalizationAlgorithmControl const ( - AudioNormalizationAlgorithmControlCorrect_audio AudioNormalizationAlgorithmControl = "CORRECT_AUDIO" - AudioNormalizationAlgorithmControlMeasure_only AudioNormalizationAlgorithmControl = "MEASURE_ONLY" + AudioNormalizationAlgorithmControlCorrectAudio AudioNormalizationAlgorithmControl = "CORRECT_AUDIO" + AudioNormalizationAlgorithmControlMeasureOnly AudioNormalizationAlgorithmControl = "MEASURE_ONLY" ) // Values returns all known values for AudioNormalizationAlgorithmControl. Note @@ -543,8 +543,8 @@ type AudioNormalizationLoudnessLogging string // Enum values for AudioNormalizationLoudnessLogging const ( - AudioNormalizationLoudnessLoggingLog AudioNormalizationLoudnessLogging = "LOG" - AudioNormalizationLoudnessLoggingDont_log AudioNormalizationLoudnessLogging = "DONT_LOG" + AudioNormalizationLoudnessLoggingLog AudioNormalizationLoudnessLogging = "LOG" + AudioNormalizationLoudnessLoggingDontLog AudioNormalizationLoudnessLogging = "DONT_LOG" ) // Values returns all known values for AudioNormalizationLoudnessLogging. Note that @@ -562,8 +562,8 @@ type AudioNormalizationPeakCalculation string // Enum values for AudioNormalizationPeakCalculation const ( - AudioNormalizationPeakCalculationTrue_peak AudioNormalizationPeakCalculation = "TRUE_PEAK" - AudioNormalizationPeakCalculationNone AudioNormalizationPeakCalculation = "NONE" + AudioNormalizationPeakCalculationTruePeak AudioNormalizationPeakCalculation = "TRUE_PEAK" + AudioNormalizationPeakCalculationNone AudioNormalizationPeakCalculation = "NONE" ) // Values returns all known values for AudioNormalizationPeakCalculation. Note that @@ -581,9 +581,9 @@ type AudioSelectorType string // Enum values for AudioSelectorType const ( - AudioSelectorTypePid AudioSelectorType = "PID" - AudioSelectorTypeTrack AudioSelectorType = "TRACK" - AudioSelectorTypeLanguage_code AudioSelectorType = "LANGUAGE_CODE" + AudioSelectorTypePid AudioSelectorType = "PID" + AudioSelectorTypeTrack AudioSelectorType = "TRACK" + AudioSelectorTypeLanguageCode AudioSelectorType = "LANGUAGE_CODE" ) // Values returns all known values for AudioSelectorType. Note that this can be @@ -601,8 +601,8 @@ type AudioTypeControl string // Enum values for AudioTypeControl const ( - AudioTypeControlFollow_input AudioTypeControl = "FOLLOW_INPUT" - AudioTypeControlUse_configured AudioTypeControl = "USE_CONFIGURED" + AudioTypeControlFollowInput AudioTypeControl = "FOLLOW_INPUT" + AudioTypeControlUseConfigured AudioTypeControl = "USE_CONFIGURED" ) // Values returns all known values for AudioTypeControl. Note that this can be @@ -645,8 +645,8 @@ type Av1FramerateControl string // Enum values for Av1FramerateControl const ( - Av1FramerateControlInitialize_from_source Av1FramerateControl = "INITIALIZE_FROM_SOURCE" - Av1FramerateControlSpecified Av1FramerateControl = "SPECIFIED" + Av1FramerateControlInitializeFromSource Av1FramerateControl = "INITIALIZE_FROM_SOURCE" + Av1FramerateControlSpecified Av1FramerateControl = "SPECIFIED" ) // Values returns all known values for Av1FramerateControl. Note that this can be @@ -663,9 +663,9 @@ type Av1FramerateConversionAlgorithm string // Enum values for Av1FramerateConversionAlgorithm const ( - Av1FramerateConversionAlgorithmDuplicate_drop Av1FramerateConversionAlgorithm = "DUPLICATE_DROP" - Av1FramerateConversionAlgorithmInterpolate Av1FramerateConversionAlgorithm = "INTERPOLATE" - Av1FramerateConversionAlgorithmFrameformer Av1FramerateConversionAlgorithm = "FRAMEFORMER" + Av1FramerateConversionAlgorithmDuplicateDrop Av1FramerateConversionAlgorithm = "DUPLICATE_DROP" + Av1FramerateConversionAlgorithmInterpolate Av1FramerateConversionAlgorithm = "INTERPOLATE" + Av1FramerateConversionAlgorithmFrameformer Av1FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for Av1FramerateConversionAlgorithm. Note that @@ -719,9 +719,9 @@ type AvcIntraClass string // Enum values for AvcIntraClass const ( - AvcIntraClassClass_50 AvcIntraClass = "CLASS_50" - AvcIntraClassClass_100 AvcIntraClass = "CLASS_100" - AvcIntraClassClass_200 AvcIntraClass = "CLASS_200" + AvcIntraClassClass50 AvcIntraClass = "CLASS_50" + AvcIntraClassClass100 AvcIntraClass = "CLASS_100" + AvcIntraClassClass200 AvcIntraClass = "CLASS_200" ) // Values returns all known values for AvcIntraClass. Note that this can be @@ -739,8 +739,8 @@ type AvcIntraFramerateControl string // Enum values for AvcIntraFramerateControl const ( - AvcIntraFramerateControlInitialize_from_source AvcIntraFramerateControl = "INITIALIZE_FROM_SOURCE" - AvcIntraFramerateControlSpecified AvcIntraFramerateControl = "SPECIFIED" + AvcIntraFramerateControlInitializeFromSource AvcIntraFramerateControl = "INITIALIZE_FROM_SOURCE" + AvcIntraFramerateControlSpecified AvcIntraFramerateControl = "SPECIFIED" ) // Values returns all known values for AvcIntraFramerateControl. Note that this can @@ -757,9 +757,9 @@ type AvcIntraFramerateConversionAlgorithm string // Enum values for AvcIntraFramerateConversionAlgorithm const ( - AvcIntraFramerateConversionAlgorithmDuplicate_drop AvcIntraFramerateConversionAlgorithm = "DUPLICATE_DROP" - AvcIntraFramerateConversionAlgorithmInterpolate AvcIntraFramerateConversionAlgorithm = "INTERPOLATE" - AvcIntraFramerateConversionAlgorithmFrameformer AvcIntraFramerateConversionAlgorithm = "FRAMEFORMER" + AvcIntraFramerateConversionAlgorithmDuplicateDrop AvcIntraFramerateConversionAlgorithm = "DUPLICATE_DROP" + AvcIntraFramerateConversionAlgorithmInterpolate AvcIntraFramerateConversionAlgorithm = "INTERPOLATE" + AvcIntraFramerateConversionAlgorithmFrameformer AvcIntraFramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for AvcIntraFramerateConversionAlgorithm. Note @@ -778,11 +778,11 @@ type AvcIntraInterlaceMode string // Enum values for AvcIntraInterlaceMode const ( - AvcIntraInterlaceModeProgressive AvcIntraInterlaceMode = "PROGRESSIVE" - AvcIntraInterlaceModeTop_field AvcIntraInterlaceMode = "TOP_FIELD" - AvcIntraInterlaceModeBottom_field AvcIntraInterlaceMode = "BOTTOM_FIELD" - AvcIntraInterlaceModeFollow_top_field AvcIntraInterlaceMode = "FOLLOW_TOP_FIELD" - AvcIntraInterlaceModeFollow_bottom_field AvcIntraInterlaceMode = "FOLLOW_BOTTOM_FIELD" + AvcIntraInterlaceModeProgressive AvcIntraInterlaceMode = "PROGRESSIVE" + AvcIntraInterlaceModeTopField AvcIntraInterlaceMode = "TOP_FIELD" + AvcIntraInterlaceModeBottomField AvcIntraInterlaceMode = "BOTTOM_FIELD" + AvcIntraInterlaceModeFollowTopField AvcIntraInterlaceMode = "FOLLOW_TOP_FIELD" + AvcIntraInterlaceModeFollowBottomField AvcIntraInterlaceMode = "FOLLOW_BOTTOM_FIELD" ) // Values returns all known values for AvcIntraInterlaceMode. Note that this can be @@ -838,10 +838,10 @@ type BillingTagsSource string // Enum values for BillingTagsSource const ( - BillingTagsSourceQueue BillingTagsSource = "QUEUE" - BillingTagsSourcePreset BillingTagsSource = "PRESET" - BillingTagsSourceJob_template BillingTagsSource = "JOB_TEMPLATE" - BillingTagsSourceJob BillingTagsSource = "JOB" + BillingTagsSourceQueue BillingTagsSource = "QUEUE" + BillingTagsSourcePreset BillingTagsSource = "PRESET" + BillingTagsSourceJobTemplate BillingTagsSource = "JOB_TEMPLATE" + BillingTagsSourceJob BillingTagsSource = "JOB" ) // Values returns all known values for BillingTagsSource. Note that this can be @@ -971,7 +971,7 @@ type BurninSubtitleTeletextSpacing string // Enum values for BurninSubtitleTeletextSpacing const ( - BurninSubtitleTeletextSpacingFixed_grid BurninSubtitleTeletextSpacing = "FIXED_GRID" + BurninSubtitleTeletextSpacingFixedGrid BurninSubtitleTeletextSpacing = "FIXED_GRID" BurninSubtitleTeletextSpacingProportional BurninSubtitleTeletextSpacing = "PROPORTIONAL" ) @@ -990,18 +990,18 @@ type CaptionDestinationType string // Enum values for CaptionDestinationType const ( - CaptionDestinationTypeBurn_in CaptionDestinationType = "BURN_IN" - CaptionDestinationTypeDvb_sub CaptionDestinationType = "DVB_SUB" - CaptionDestinationTypeEmbedded CaptionDestinationType = "EMBEDDED" - CaptionDestinationTypeEmbedded_plus_scte20 CaptionDestinationType = "EMBEDDED_PLUS_SCTE20" - CaptionDestinationTypeImsc CaptionDestinationType = "IMSC" - CaptionDestinationTypeScte20_plus_embedded CaptionDestinationType = "SCTE20_PLUS_EMBEDDED" - CaptionDestinationTypeScc CaptionDestinationType = "SCC" - CaptionDestinationTypeSrt CaptionDestinationType = "SRT" - CaptionDestinationTypeSmi CaptionDestinationType = "SMI" - CaptionDestinationTypeTeletext CaptionDestinationType = "TELETEXT" - CaptionDestinationTypeTtml CaptionDestinationType = "TTML" - CaptionDestinationTypeWebvtt CaptionDestinationType = "WEBVTT" + CaptionDestinationTypeBurnIn CaptionDestinationType = "BURN_IN" + CaptionDestinationTypeDvbSub CaptionDestinationType = "DVB_SUB" + CaptionDestinationTypeEmbedded CaptionDestinationType = "EMBEDDED" + CaptionDestinationTypeEmbeddedPlusScte20 CaptionDestinationType = "EMBEDDED_PLUS_SCTE20" + CaptionDestinationTypeImsc CaptionDestinationType = "IMSC" + CaptionDestinationTypeScte20PlusEmbedded CaptionDestinationType = "SCTE20_PLUS_EMBEDDED" + CaptionDestinationTypeScc CaptionDestinationType = "SCC" + CaptionDestinationTypeSrt CaptionDestinationType = "SRT" + CaptionDestinationTypeSmi CaptionDestinationType = "SMI" + CaptionDestinationTypeTeletext CaptionDestinationType = "TELETEXT" + CaptionDestinationTypeTtml CaptionDestinationType = "TTML" + CaptionDestinationTypeWebvtt CaptionDestinationType = "WEBVTT" ) // Values returns all known values for CaptionDestinationType. Note that this can @@ -1028,18 +1028,18 @@ type CaptionSourceType string // Enum values for CaptionSourceType const ( - CaptionSourceTypeAncillary CaptionSourceType = "ANCILLARY" - CaptionSourceTypeDvb_sub CaptionSourceType = "DVB_SUB" - CaptionSourceTypeEmbedded CaptionSourceType = "EMBEDDED" - CaptionSourceTypeScte20 CaptionSourceType = "SCTE20" - CaptionSourceTypeScc CaptionSourceType = "SCC" - CaptionSourceTypeTtml CaptionSourceType = "TTML" - CaptionSourceTypeStl CaptionSourceType = "STL" - CaptionSourceTypeSrt CaptionSourceType = "SRT" - CaptionSourceTypeSmi CaptionSourceType = "SMI" - CaptionSourceTypeTeletext CaptionSourceType = "TELETEXT" - CaptionSourceTypeNull_source CaptionSourceType = "NULL_SOURCE" - CaptionSourceTypeImsc CaptionSourceType = "IMSC" + CaptionSourceTypeAncillary CaptionSourceType = "ANCILLARY" + CaptionSourceTypeDvbSub CaptionSourceType = "DVB_SUB" + CaptionSourceTypeEmbedded CaptionSourceType = "EMBEDDED" + CaptionSourceTypeScte20 CaptionSourceType = "SCTE20" + CaptionSourceTypeScc CaptionSourceType = "SCC" + CaptionSourceTypeTtml CaptionSourceType = "TTML" + CaptionSourceTypeStl CaptionSourceType = "STL" + CaptionSourceTypeSrt CaptionSourceType = "SRT" + CaptionSourceTypeSmi CaptionSourceType = "SMI" + CaptionSourceTypeTeletext CaptionSourceType = "TELETEXT" + CaptionSourceTypeNullSource CaptionSourceType = "NULL_SOURCE" + CaptionSourceTypeImsc CaptionSourceType = "IMSC" ) // Values returns all known values for CaptionSourceType. Note that this can be @@ -1084,8 +1084,8 @@ type CmafCodecSpecification string // Enum values for CmafCodecSpecification const ( - CmafCodecSpecificationRfc_6381 CmafCodecSpecification = "RFC_6381" - CmafCodecSpecificationRfc_4281 CmafCodecSpecification = "RFC_4281" + CmafCodecSpecificationRfc6381 CmafCodecSpecification = "RFC_6381" + CmafCodecSpecificationRfc4281 CmafCodecSpecification = "RFC_4281" ) // Values returns all known values for CmafCodecSpecification. Note that this can @@ -1102,8 +1102,8 @@ type CmafEncryptionType string // Enum values for CmafEncryptionType const ( - CmafEncryptionTypeSample_aes CmafEncryptionType = "SAMPLE_AES" - CmafEncryptionTypeAes_ctr CmafEncryptionType = "AES_CTR" + CmafEncryptionTypeSampleAes CmafEncryptionType = "SAMPLE_AES" + CmafEncryptionTypeAesCtr CmafEncryptionType = "AES_CTR" ) // Values returns all known values for CmafEncryptionType. Note that this can be @@ -1139,8 +1139,8 @@ type CmafKeyProviderType string // Enum values for CmafKeyProviderType const ( - CmafKeyProviderTypeSpeke CmafKeyProviderType = "SPEKE" - CmafKeyProviderTypeStatic_key CmafKeyProviderType = "STATIC_KEY" + CmafKeyProviderTypeSpeke CmafKeyProviderType = "SPEKE" + CmafKeyProviderTypeStaticKey CmafKeyProviderType = "STATIC_KEY" ) // Values returns all known values for CmafKeyProviderType. Note that this can be @@ -1175,8 +1175,8 @@ type CmafManifestDurationFormat string // Enum values for CmafManifestDurationFormat const ( - CmafManifestDurationFormatFloating_point CmafManifestDurationFormat = "FLOATING_POINT" - CmafManifestDurationFormatInteger CmafManifestDurationFormat = "INTEGER" + CmafManifestDurationFormatFloatingPoint CmafManifestDurationFormat = "FLOATING_POINT" + CmafManifestDurationFormatInteger CmafManifestDurationFormat = "INTEGER" ) // Values returns all known values for CmafManifestDurationFormat. Note that this @@ -1193,8 +1193,8 @@ type CmafMpdProfile string // Enum values for CmafMpdProfile const ( - CmafMpdProfileMain_profile CmafMpdProfile = "MAIN_PROFILE" - CmafMpdProfileOn_demand_profile CmafMpdProfile = "ON_DEMAND_PROFILE" + CmafMpdProfileMainProfile CmafMpdProfile = "MAIN_PROFILE" + CmafMpdProfileOnDemandProfile CmafMpdProfile = "ON_DEMAND_PROFILE" ) // Values returns all known values for CmafMpdProfile. Note that this can be @@ -1211,8 +1211,8 @@ type CmafSegmentControl string // Enum values for CmafSegmentControl const ( - CmafSegmentControlSingle_file CmafSegmentControl = "SINGLE_FILE" - CmafSegmentControlSegmented_files CmafSegmentControl = "SEGMENTED_FILES" + CmafSegmentControlSingleFile CmafSegmentControl = "SINGLE_FILE" + CmafSegmentControlSegmentedFiles CmafSegmentControl = "SEGMENTED_FILES" ) // Values returns all known values for CmafSegmentControl. Note that this can be @@ -1356,11 +1356,11 @@ type ColorSpace string // Enum values for ColorSpace const ( - ColorSpaceFollow ColorSpace = "FOLLOW" - ColorSpaceRec_601 ColorSpace = "REC_601" - ColorSpaceRec_709 ColorSpace = "REC_709" - ColorSpaceHdr10 ColorSpace = "HDR10" - ColorSpaceHlg_2020 ColorSpace = "HLG_2020" + ColorSpaceFollow ColorSpace = "FOLLOW" + ColorSpaceRec601 ColorSpace = "REC_601" + ColorSpaceRec709 ColorSpace = "REC_709" + ColorSpaceHdr10 ColorSpace = "HDR10" + ColorSpaceHlg2020 ColorSpace = "HLG_2020" ) // Values returns all known values for ColorSpace. Note that this can be expanded @@ -1380,11 +1380,11 @@ type ColorSpaceConversion string // Enum values for ColorSpaceConversion const ( - ColorSpaceConversionNone ColorSpaceConversion = "NONE" - ColorSpaceConversionForce_601 ColorSpaceConversion = "FORCE_601" - ColorSpaceConversionForce_709 ColorSpaceConversion = "FORCE_709" - ColorSpaceConversionForce_hdr10 ColorSpaceConversion = "FORCE_HDR10" - ColorSpaceConversionForce_hlg_2020 ColorSpaceConversion = "FORCE_HLG_2020" + ColorSpaceConversionNone ColorSpaceConversion = "NONE" + ColorSpaceConversionForce601 ColorSpaceConversion = "FORCE_601" + ColorSpaceConversionForce709 ColorSpaceConversion = "FORCE_709" + ColorSpaceConversionForceHdr10 ColorSpaceConversion = "FORCE_HDR10" + ColorSpaceConversionForceHlg2020 ColorSpaceConversion = "FORCE_HLG_2020" ) // Values returns all known values for ColorSpaceConversion. Note that this can be @@ -1422,7 +1422,7 @@ type Commitment string // Enum values for Commitment const ( - CommitmentOne_year Commitment = "ONE_YEAR" + CommitmentOneYear Commitment = "ONE_YEAR" ) // Values returns all known values for Commitment. Note that this can be expanded @@ -1474,8 +1474,8 @@ type DashIsoHbbtvCompliance string // Enum values for DashIsoHbbtvCompliance const ( - DashIsoHbbtvComplianceHbbtv_1_5 DashIsoHbbtvCompliance = "HBBTV_1_5" - DashIsoHbbtvComplianceNone DashIsoHbbtvCompliance = "NONE" + DashIsoHbbtvComplianceHbbtv15 DashIsoHbbtvCompliance = "HBBTV_1_5" + DashIsoHbbtvComplianceNone DashIsoHbbtvCompliance = "NONE" ) // Values returns all known values for DashIsoHbbtvCompliance. Note that this can @@ -1492,8 +1492,8 @@ type DashIsoMpdProfile string // Enum values for DashIsoMpdProfile const ( - DashIsoMpdProfileMain_profile DashIsoMpdProfile = "MAIN_PROFILE" - DashIsoMpdProfileOn_demand_profile DashIsoMpdProfile = "ON_DEMAND_PROFILE" + DashIsoMpdProfileMainProfile DashIsoMpdProfile = "MAIN_PROFILE" + DashIsoMpdProfileOnDemandProfile DashIsoMpdProfile = "ON_DEMAND_PROFILE" ) // Values returns all known values for DashIsoMpdProfile. Note that this can be @@ -1510,8 +1510,8 @@ type DashIsoPlaybackDeviceCompatibility string // Enum values for DashIsoPlaybackDeviceCompatibility const ( - DashIsoPlaybackDeviceCompatibilityCenc_v1 DashIsoPlaybackDeviceCompatibility = "CENC_V1" - DashIsoPlaybackDeviceCompatibilityUnencrypted_sei DashIsoPlaybackDeviceCompatibility = "UNENCRYPTED_SEI" + DashIsoPlaybackDeviceCompatibilityCencV1 DashIsoPlaybackDeviceCompatibility = "CENC_V1" + DashIsoPlaybackDeviceCompatibilityUnencryptedSei DashIsoPlaybackDeviceCompatibility = "UNENCRYPTED_SEI" ) // Values returns all known values for DashIsoPlaybackDeviceCompatibility. Note @@ -1529,8 +1529,8 @@ type DashIsoSegmentControl string // Enum values for DashIsoSegmentControl const ( - DashIsoSegmentControlSingle_file DashIsoSegmentControl = "SINGLE_FILE" - DashIsoSegmentControlSegmented_files DashIsoSegmentControl = "SEGMENTED_FILES" + DashIsoSegmentControlSingleFile DashIsoSegmentControl = "SINGLE_FILE" + DashIsoSegmentControlSegmentedFiles DashIsoSegmentControl = "SEGMENTED_FILES" ) // Values returns all known values for DashIsoSegmentControl. Note that this can be @@ -1566,9 +1566,9 @@ type DecryptionMode string // Enum values for DecryptionMode const ( - DecryptionModeAes_ctr DecryptionMode = "AES_CTR" - DecryptionModeAes_cbc DecryptionMode = "AES_CBC" - DecryptionModeAes_gcm DecryptionMode = "AES_GCM" + DecryptionModeAesCtr DecryptionMode = "AES_CTR" + DecryptionModeAesCbc DecryptionMode = "AES_CBC" + DecryptionModeAesGcm DecryptionMode = "AES_GCM" ) // Values returns all known values for DecryptionMode. Note that this can be @@ -1586,10 +1586,10 @@ type DeinterlaceAlgorithm string // Enum values for DeinterlaceAlgorithm const ( - DeinterlaceAlgorithmInterpolate DeinterlaceAlgorithm = "INTERPOLATE" - DeinterlaceAlgorithmInterpolate_ticker DeinterlaceAlgorithm = "INTERPOLATE_TICKER" - DeinterlaceAlgorithmBlend DeinterlaceAlgorithm = "BLEND" - DeinterlaceAlgorithmBlend_ticker DeinterlaceAlgorithm = "BLEND_TICKER" + DeinterlaceAlgorithmInterpolate DeinterlaceAlgorithm = "INTERPOLATE" + DeinterlaceAlgorithmInterpolateTicker DeinterlaceAlgorithm = "INTERPOLATE_TICKER" + DeinterlaceAlgorithmBlend DeinterlaceAlgorithm = "BLEND" + DeinterlaceAlgorithmBlendTicker DeinterlaceAlgorithm = "BLEND_TICKER" ) // Values returns all known values for DeinterlaceAlgorithm. Note that this can be @@ -1608,8 +1608,8 @@ type DeinterlacerControl string // Enum values for DeinterlacerControl const ( - DeinterlacerControlForce_all_frames DeinterlacerControl = "FORCE_ALL_FRAMES" - DeinterlacerControlNormal DeinterlacerControl = "NORMAL" + DeinterlacerControlForceAllFrames DeinterlacerControl = "FORCE_ALL_FRAMES" + DeinterlacerControlNormal DeinterlacerControl = "NORMAL" ) // Values returns all known values for DeinterlacerControl. Note that this can be @@ -1626,9 +1626,9 @@ type DeinterlacerMode string // Enum values for DeinterlacerMode const ( - DeinterlacerModeDeinterlace DeinterlacerMode = "DEINTERLACE" - DeinterlacerModeInverse_telecine DeinterlacerMode = "INVERSE_TELECINE" - DeinterlacerModeAdaptive DeinterlacerMode = "ADAPTIVE" + DeinterlacerModeDeinterlace DeinterlacerMode = "DEINTERLACE" + DeinterlacerModeInverseTelecine DeinterlacerMode = "INVERSE_TELECINE" + DeinterlacerModeAdaptive DeinterlacerMode = "ADAPTIVE" ) // Values returns all known values for DeinterlacerMode. Note that this can be @@ -1646,8 +1646,8 @@ type DescribeEndpointsMode string // Enum values for DescribeEndpointsMode const ( - DescribeEndpointsModeDefault DescribeEndpointsMode = "DEFAULT" - DescribeEndpointsModeGet_only DescribeEndpointsMode = "GET_ONLY" + DescribeEndpointsModeDefault DescribeEndpointsMode = "DEFAULT" + DescribeEndpointsModeGetOnly DescribeEndpointsMode = "GET_ONLY" ) // Values returns all known values for DescribeEndpointsMode. Note that this can be @@ -1684,7 +1684,7 @@ type DolbyVisionProfile string // Enum values for DolbyVisionProfile const ( - DolbyVisionProfileProfile_5 DolbyVisionProfile = "PROFILE_5" + DolbyVisionProfileProfile5 DolbyVisionProfile = "PROFILE_5" ) // Values returns all known values for DolbyVisionProfile. Note that this can be @@ -1828,7 +1828,7 @@ type DvbSubtitleTeletextSpacing string // Enum values for DvbSubtitleTeletextSpacing const ( - DvbSubtitleTeletextSpacingFixed_grid DvbSubtitleTeletextSpacing = "FIXED_GRID" + DvbSubtitleTeletextSpacingFixedGrid DvbSubtitleTeletextSpacing = "FIXED_GRID" DvbSubtitleTeletextSpacingProportional DvbSubtitleTeletextSpacing = "PROPORTIONAL" ) @@ -1846,8 +1846,8 @@ type DvbSubtitlingType string // Enum values for DvbSubtitlingType const ( - DvbSubtitlingTypeHearing_impaired DvbSubtitlingType = "HEARING_IMPAIRED" - DvbSubtitlingTypeStandard DvbSubtitlingType = "STANDARD" + DvbSubtitlingTypeHearingImpaired DvbSubtitlingType = "HEARING_IMPAIRED" + DvbSubtitlingTypeStandard DvbSubtitlingType = "STANDARD" ) // Values returns all known values for DvbSubtitlingType. Note that this can be @@ -1864,7 +1864,7 @@ type Eac3AtmosBitstreamMode string // Enum values for Eac3AtmosBitstreamMode const ( - Eac3AtmosBitstreamModeComplete_main Eac3AtmosBitstreamMode = "COMPLETE_MAIN" + Eac3AtmosBitstreamModeCompleteMain Eac3AtmosBitstreamMode = "COMPLETE_MAIN" ) // Values returns all known values for Eac3AtmosBitstreamMode. Note that this can @@ -1880,7 +1880,7 @@ type Eac3AtmosCodingMode string // Enum values for Eac3AtmosCodingMode const ( - Eac3AtmosCodingModeCoding_mode_9_1_6 Eac3AtmosCodingMode = "CODING_MODE_9_1_6" + Eac3AtmosCodingModeCodingMode916 Eac3AtmosCodingMode = "CODING_MODE_9_1_6" ) // Values returns all known values for Eac3AtmosCodingMode. Note that this can be @@ -1915,12 +1915,12 @@ type Eac3AtmosDynamicRangeCompressionLine string // Enum values for Eac3AtmosDynamicRangeCompressionLine const ( - Eac3AtmosDynamicRangeCompressionLineNone Eac3AtmosDynamicRangeCompressionLine = "NONE" - Eac3AtmosDynamicRangeCompressionLineFilm_standard Eac3AtmosDynamicRangeCompressionLine = "FILM_STANDARD" - Eac3AtmosDynamicRangeCompressionLineFilm_light Eac3AtmosDynamicRangeCompressionLine = "FILM_LIGHT" - Eac3AtmosDynamicRangeCompressionLineMusic_standard Eac3AtmosDynamicRangeCompressionLine = "MUSIC_STANDARD" - Eac3AtmosDynamicRangeCompressionLineMusic_light Eac3AtmosDynamicRangeCompressionLine = "MUSIC_LIGHT" - Eac3AtmosDynamicRangeCompressionLineSpeech Eac3AtmosDynamicRangeCompressionLine = "SPEECH" + Eac3AtmosDynamicRangeCompressionLineNone Eac3AtmosDynamicRangeCompressionLine = "NONE" + Eac3AtmosDynamicRangeCompressionLineFilmStandard Eac3AtmosDynamicRangeCompressionLine = "FILM_STANDARD" + Eac3AtmosDynamicRangeCompressionLineFilmLight Eac3AtmosDynamicRangeCompressionLine = "FILM_LIGHT" + Eac3AtmosDynamicRangeCompressionLineMusicStandard Eac3AtmosDynamicRangeCompressionLine = "MUSIC_STANDARD" + Eac3AtmosDynamicRangeCompressionLineMusicLight Eac3AtmosDynamicRangeCompressionLine = "MUSIC_LIGHT" + Eac3AtmosDynamicRangeCompressionLineSpeech Eac3AtmosDynamicRangeCompressionLine = "SPEECH" ) // Values returns all known values for Eac3AtmosDynamicRangeCompressionLine. Note @@ -1942,12 +1942,12 @@ type Eac3AtmosDynamicRangeCompressionRf string // Enum values for Eac3AtmosDynamicRangeCompressionRf const ( - Eac3AtmosDynamicRangeCompressionRfNone Eac3AtmosDynamicRangeCompressionRf = "NONE" - Eac3AtmosDynamicRangeCompressionRfFilm_standard Eac3AtmosDynamicRangeCompressionRf = "FILM_STANDARD" - Eac3AtmosDynamicRangeCompressionRfFilm_light Eac3AtmosDynamicRangeCompressionRf = "FILM_LIGHT" - Eac3AtmosDynamicRangeCompressionRfMusic_standard Eac3AtmosDynamicRangeCompressionRf = "MUSIC_STANDARD" - Eac3AtmosDynamicRangeCompressionRfMusic_light Eac3AtmosDynamicRangeCompressionRf = "MUSIC_LIGHT" - Eac3AtmosDynamicRangeCompressionRfSpeech Eac3AtmosDynamicRangeCompressionRf = "SPEECH" + Eac3AtmosDynamicRangeCompressionRfNone Eac3AtmosDynamicRangeCompressionRf = "NONE" + Eac3AtmosDynamicRangeCompressionRfFilmStandard Eac3AtmosDynamicRangeCompressionRf = "FILM_STANDARD" + Eac3AtmosDynamicRangeCompressionRfFilmLight Eac3AtmosDynamicRangeCompressionRf = "FILM_LIGHT" + Eac3AtmosDynamicRangeCompressionRfMusicStandard Eac3AtmosDynamicRangeCompressionRf = "MUSIC_STANDARD" + Eac3AtmosDynamicRangeCompressionRfMusicLight Eac3AtmosDynamicRangeCompressionRf = "MUSIC_LIGHT" + Eac3AtmosDynamicRangeCompressionRfSpeech Eac3AtmosDynamicRangeCompressionRf = "SPEECH" ) // Values returns all known values for Eac3AtmosDynamicRangeCompressionRf. Note @@ -1969,11 +1969,11 @@ type Eac3AtmosMeteringMode string // Enum values for Eac3AtmosMeteringMode const ( - Eac3AtmosMeteringModeLeq_a Eac3AtmosMeteringMode = "LEQ_A" - Eac3AtmosMeteringModeItu_bs_1770_1 Eac3AtmosMeteringMode = "ITU_BS_1770_1" - Eac3AtmosMeteringModeItu_bs_1770_2 Eac3AtmosMeteringMode = "ITU_BS_1770_2" - Eac3AtmosMeteringModeItu_bs_1770_3 Eac3AtmosMeteringMode = "ITU_BS_1770_3" - Eac3AtmosMeteringModeItu_bs_1770_4 Eac3AtmosMeteringMode = "ITU_BS_1770_4" + Eac3AtmosMeteringModeLeqA Eac3AtmosMeteringMode = "LEQ_A" + Eac3AtmosMeteringModeItuBs17701 Eac3AtmosMeteringMode = "ITU_BS_1770_1" + Eac3AtmosMeteringModeItuBs17702 Eac3AtmosMeteringMode = "ITU_BS_1770_2" + Eac3AtmosMeteringModeItuBs17703 Eac3AtmosMeteringMode = "ITU_BS_1770_3" + Eac3AtmosMeteringModeItuBs17704 Eac3AtmosMeteringMode = "ITU_BS_1770_4" ) // Values returns all known values for Eac3AtmosMeteringMode. Note that this can be @@ -1993,10 +1993,10 @@ type Eac3AtmosStereoDownmix string // Enum values for Eac3AtmosStereoDownmix const ( - Eac3AtmosStereoDownmixNot_indicated Eac3AtmosStereoDownmix = "NOT_INDICATED" - Eac3AtmosStereoDownmixStereo Eac3AtmosStereoDownmix = "STEREO" - Eac3AtmosStereoDownmixSurround Eac3AtmosStereoDownmix = "SURROUND" - Eac3AtmosStereoDownmixDpl2 Eac3AtmosStereoDownmix = "DPL2" + Eac3AtmosStereoDownmixNotIndicated Eac3AtmosStereoDownmix = "NOT_INDICATED" + Eac3AtmosStereoDownmixStereo Eac3AtmosStereoDownmix = "STEREO" + Eac3AtmosStereoDownmixSurround Eac3AtmosStereoDownmix = "SURROUND" + Eac3AtmosStereoDownmixDpl2 Eac3AtmosStereoDownmix = "DPL2" ) // Values returns all known values for Eac3AtmosStereoDownmix. Note that this can @@ -2015,9 +2015,9 @@ type Eac3AtmosSurroundExMode string // Enum values for Eac3AtmosSurroundExMode const ( - Eac3AtmosSurroundExModeNot_indicated Eac3AtmosSurroundExMode = "NOT_INDICATED" - Eac3AtmosSurroundExModeEnabled Eac3AtmosSurroundExMode = "ENABLED" - Eac3AtmosSurroundExModeDisabled Eac3AtmosSurroundExMode = "DISABLED" + Eac3AtmosSurroundExModeNotIndicated Eac3AtmosSurroundExMode = "NOT_INDICATED" + Eac3AtmosSurroundExModeEnabled Eac3AtmosSurroundExMode = "ENABLED" + Eac3AtmosSurroundExModeDisabled Eac3AtmosSurroundExMode = "DISABLED" ) // Values returns all known values for Eac3AtmosSurroundExMode. Note that this can @@ -2035,8 +2035,8 @@ type Eac3AttenuationControl string // Enum values for Eac3AttenuationControl const ( - Eac3AttenuationControlAttenuate_3_db Eac3AttenuationControl = "ATTENUATE_3_DB" - Eac3AttenuationControlNone Eac3AttenuationControl = "NONE" + Eac3AttenuationControlAttenuate3Db Eac3AttenuationControl = "ATTENUATE_3_DB" + Eac3AttenuationControlNone Eac3AttenuationControl = "NONE" ) // Values returns all known values for Eac3AttenuationControl. Note that this can @@ -2053,11 +2053,11 @@ type Eac3BitstreamMode string // Enum values for Eac3BitstreamMode const ( - Eac3BitstreamModeComplete_main Eac3BitstreamMode = "COMPLETE_MAIN" - Eac3BitstreamModeCommentary Eac3BitstreamMode = "COMMENTARY" - Eac3BitstreamModeEmergency Eac3BitstreamMode = "EMERGENCY" - Eac3BitstreamModeHearing_impaired Eac3BitstreamMode = "HEARING_IMPAIRED" - Eac3BitstreamModeVisually_impaired Eac3BitstreamMode = "VISUALLY_IMPAIRED" + Eac3BitstreamModeCompleteMain Eac3BitstreamMode = "COMPLETE_MAIN" + Eac3BitstreamModeCommentary Eac3BitstreamMode = "COMMENTARY" + Eac3BitstreamModeEmergency Eac3BitstreamMode = "EMERGENCY" + Eac3BitstreamModeHearingImpaired Eac3BitstreamMode = "HEARING_IMPAIRED" + Eac3BitstreamModeVisuallyImpaired Eac3BitstreamMode = "VISUALLY_IMPAIRED" ) // Values returns all known values for Eac3BitstreamMode. Note that this can be @@ -2077,9 +2077,9 @@ type Eac3CodingMode string // Enum values for Eac3CodingMode const ( - Eac3CodingModeCoding_mode_1_0 Eac3CodingMode = "CODING_MODE_1_0" - Eac3CodingModeCoding_mode_2_0 Eac3CodingMode = "CODING_MODE_2_0" - Eac3CodingModeCoding_mode_3_2 Eac3CodingMode = "CODING_MODE_3_2" + Eac3CodingModeCodingMode10 Eac3CodingMode = "CODING_MODE_1_0" + Eac3CodingModeCodingMode20 Eac3CodingMode = "CODING_MODE_2_0" + Eac3CodingModeCodingMode32 Eac3CodingMode = "CODING_MODE_3_2" ) // Values returns all known values for Eac3CodingMode. Note that this can be @@ -2115,12 +2115,12 @@ type Eac3DynamicRangeCompressionLine string // Enum values for Eac3DynamicRangeCompressionLine const ( - Eac3DynamicRangeCompressionLineNone Eac3DynamicRangeCompressionLine = "NONE" - Eac3DynamicRangeCompressionLineFilm_standard Eac3DynamicRangeCompressionLine = "FILM_STANDARD" - Eac3DynamicRangeCompressionLineFilm_light Eac3DynamicRangeCompressionLine = "FILM_LIGHT" - Eac3DynamicRangeCompressionLineMusic_standard Eac3DynamicRangeCompressionLine = "MUSIC_STANDARD" - Eac3DynamicRangeCompressionLineMusic_light Eac3DynamicRangeCompressionLine = "MUSIC_LIGHT" - Eac3DynamicRangeCompressionLineSpeech Eac3DynamicRangeCompressionLine = "SPEECH" + Eac3DynamicRangeCompressionLineNone Eac3DynamicRangeCompressionLine = "NONE" + Eac3DynamicRangeCompressionLineFilmStandard Eac3DynamicRangeCompressionLine = "FILM_STANDARD" + Eac3DynamicRangeCompressionLineFilmLight Eac3DynamicRangeCompressionLine = "FILM_LIGHT" + Eac3DynamicRangeCompressionLineMusicStandard Eac3DynamicRangeCompressionLine = "MUSIC_STANDARD" + Eac3DynamicRangeCompressionLineMusicLight Eac3DynamicRangeCompressionLine = "MUSIC_LIGHT" + Eac3DynamicRangeCompressionLineSpeech Eac3DynamicRangeCompressionLine = "SPEECH" ) // Values returns all known values for Eac3DynamicRangeCompressionLine. Note that @@ -2142,12 +2142,12 @@ type Eac3DynamicRangeCompressionRf string // Enum values for Eac3DynamicRangeCompressionRf const ( - Eac3DynamicRangeCompressionRfNone Eac3DynamicRangeCompressionRf = "NONE" - Eac3DynamicRangeCompressionRfFilm_standard Eac3DynamicRangeCompressionRf = "FILM_STANDARD" - Eac3DynamicRangeCompressionRfFilm_light Eac3DynamicRangeCompressionRf = "FILM_LIGHT" - Eac3DynamicRangeCompressionRfMusic_standard Eac3DynamicRangeCompressionRf = "MUSIC_STANDARD" - Eac3DynamicRangeCompressionRfMusic_light Eac3DynamicRangeCompressionRf = "MUSIC_LIGHT" - Eac3DynamicRangeCompressionRfSpeech Eac3DynamicRangeCompressionRf = "SPEECH" + Eac3DynamicRangeCompressionRfNone Eac3DynamicRangeCompressionRf = "NONE" + Eac3DynamicRangeCompressionRfFilmStandard Eac3DynamicRangeCompressionRf = "FILM_STANDARD" + Eac3DynamicRangeCompressionRfFilmLight Eac3DynamicRangeCompressionRf = "FILM_LIGHT" + Eac3DynamicRangeCompressionRfMusicStandard Eac3DynamicRangeCompressionRf = "MUSIC_STANDARD" + Eac3DynamicRangeCompressionRfMusicLight Eac3DynamicRangeCompressionRf = "MUSIC_LIGHT" + Eac3DynamicRangeCompressionRfSpeech Eac3DynamicRangeCompressionRf = "SPEECH" ) // Values returns all known values for Eac3DynamicRangeCompressionRf. Note that @@ -2169,8 +2169,8 @@ type Eac3LfeControl string // Enum values for Eac3LfeControl const ( - Eac3LfeControlLfe Eac3LfeControl = "LFE" - Eac3LfeControlNo_lfe Eac3LfeControl = "NO_LFE" + Eac3LfeControlLfe Eac3LfeControl = "LFE" + Eac3LfeControlNoLfe Eac3LfeControl = "NO_LFE" ) // Values returns all known values for Eac3LfeControl. Note that this can be @@ -2205,8 +2205,8 @@ type Eac3MetadataControl string // Enum values for Eac3MetadataControl const ( - Eac3MetadataControlFollow_input Eac3MetadataControl = "FOLLOW_INPUT" - Eac3MetadataControlUse_configured Eac3MetadataControl = "USE_CONFIGURED" + Eac3MetadataControlFollowInput Eac3MetadataControl = "FOLLOW_INPUT" + Eac3MetadataControlUseConfigured Eac3MetadataControl = "USE_CONFIGURED" ) // Values returns all known values for Eac3MetadataControl. Note that this can be @@ -2223,8 +2223,8 @@ type Eac3PassthroughControl string // Enum values for Eac3PassthroughControl const ( - Eac3PassthroughControlWhen_possible Eac3PassthroughControl = "WHEN_POSSIBLE" - Eac3PassthroughControlNo_passthrough Eac3PassthroughControl = "NO_PASSTHROUGH" + Eac3PassthroughControlWhenPossible Eac3PassthroughControl = "WHEN_POSSIBLE" + Eac3PassthroughControlNoPassthrough Eac3PassthroughControl = "NO_PASSTHROUGH" ) // Values returns all known values for Eac3PassthroughControl. Note that this can @@ -2241,8 +2241,8 @@ type Eac3PhaseControl string // Enum values for Eac3PhaseControl const ( - Eac3PhaseControlShift_90_degrees Eac3PhaseControl = "SHIFT_90_DEGREES" - Eac3PhaseControlNo_shift Eac3PhaseControl = "NO_SHIFT" + Eac3PhaseControlShift90Degrees Eac3PhaseControl = "SHIFT_90_DEGREES" + Eac3PhaseControlNoShift Eac3PhaseControl = "NO_SHIFT" ) // Values returns all known values for Eac3PhaseControl. Note that this can be @@ -2259,10 +2259,10 @@ type Eac3StereoDownmix string // Enum values for Eac3StereoDownmix const ( - Eac3StereoDownmixNot_indicated Eac3StereoDownmix = "NOT_INDICATED" - Eac3StereoDownmixLo_ro Eac3StereoDownmix = "LO_RO" - Eac3StereoDownmixLt_rt Eac3StereoDownmix = "LT_RT" - Eac3StereoDownmixDpl2 Eac3StereoDownmix = "DPL2" + Eac3StereoDownmixNotIndicated Eac3StereoDownmix = "NOT_INDICATED" + Eac3StereoDownmixLoRo Eac3StereoDownmix = "LO_RO" + Eac3StereoDownmixLtRt Eac3StereoDownmix = "LT_RT" + Eac3StereoDownmixDpl2 Eac3StereoDownmix = "DPL2" ) // Values returns all known values for Eac3StereoDownmix. Note that this can be @@ -2281,9 +2281,9 @@ type Eac3SurroundExMode string // Enum values for Eac3SurroundExMode const ( - Eac3SurroundExModeNot_indicated Eac3SurroundExMode = "NOT_INDICATED" - Eac3SurroundExModeEnabled Eac3SurroundExMode = "ENABLED" - Eac3SurroundExModeDisabled Eac3SurroundExMode = "DISABLED" + Eac3SurroundExModeNotIndicated Eac3SurroundExMode = "NOT_INDICATED" + Eac3SurroundExModeEnabled Eac3SurroundExMode = "ENABLED" + Eac3SurroundExModeDisabled Eac3SurroundExMode = "DISABLED" ) // Values returns all known values for Eac3SurroundExMode. Note that this can be @@ -2301,9 +2301,9 @@ type Eac3SurroundMode string // Enum values for Eac3SurroundMode const ( - Eac3SurroundModeNot_indicated Eac3SurroundMode = "NOT_INDICATED" - Eac3SurroundModeEnabled Eac3SurroundMode = "ENABLED" - Eac3SurroundModeDisabled Eac3SurroundMode = "DISABLED" + Eac3SurroundModeNotIndicated Eac3SurroundMode = "NOT_INDICATED" + Eac3SurroundModeEnabled Eac3SurroundMode = "ENABLED" + Eac3SurroundModeDisabled Eac3SurroundMode = "DISABLED" ) // Values returns all known values for Eac3SurroundMode. Note that this can be @@ -2339,8 +2339,8 @@ type EmbeddedTerminateCaptions string // Enum values for EmbeddedTerminateCaptions const ( - EmbeddedTerminateCaptionsEnd_of_input EmbeddedTerminateCaptions = "END_OF_INPUT" - EmbeddedTerminateCaptionsDisabled EmbeddedTerminateCaptions = "DISABLED" + EmbeddedTerminateCaptionsEndOfInput EmbeddedTerminateCaptions = "END_OF_INPUT" + EmbeddedTerminateCaptionsDisabled EmbeddedTerminateCaptions = "DISABLED" ) // Values returns all known values for EmbeddedTerminateCaptions. Note that this @@ -2357,8 +2357,8 @@ type F4vMoovPlacement string // Enum values for F4vMoovPlacement const ( - F4vMoovPlacementProgressive_download F4vMoovPlacement = "PROGRESSIVE_DOWNLOAD" - F4vMoovPlacementNormal F4vMoovPlacement = "NORMAL" + F4vMoovPlacementProgressiveDownload F4vMoovPlacement = "PROGRESSIVE_DOWNLOAD" + F4vMoovPlacementNormal F4vMoovPlacement = "NORMAL" ) // Values returns all known values for F4vMoovPlacement. Note that this can be @@ -2439,23 +2439,23 @@ type H264CodecLevel string // Enum values for H264CodecLevel const ( - H264CodecLevelAuto H264CodecLevel = "AUTO" - H264CodecLevelLevel_1 H264CodecLevel = "LEVEL_1" - H264CodecLevelLevel_1_1 H264CodecLevel = "LEVEL_1_1" - H264CodecLevelLevel_1_2 H264CodecLevel = "LEVEL_1_2" - H264CodecLevelLevel_1_3 H264CodecLevel = "LEVEL_1_3" - H264CodecLevelLevel_2 H264CodecLevel = "LEVEL_2" - H264CodecLevelLevel_2_1 H264CodecLevel = "LEVEL_2_1" - H264CodecLevelLevel_2_2 H264CodecLevel = "LEVEL_2_2" - H264CodecLevelLevel_3 H264CodecLevel = "LEVEL_3" - H264CodecLevelLevel_3_1 H264CodecLevel = "LEVEL_3_1" - H264CodecLevelLevel_3_2 H264CodecLevel = "LEVEL_3_2" - H264CodecLevelLevel_4 H264CodecLevel = "LEVEL_4" - H264CodecLevelLevel_4_1 H264CodecLevel = "LEVEL_4_1" - H264CodecLevelLevel_4_2 H264CodecLevel = "LEVEL_4_2" - H264CodecLevelLevel_5 H264CodecLevel = "LEVEL_5" - H264CodecLevelLevel_5_1 H264CodecLevel = "LEVEL_5_1" - H264CodecLevelLevel_5_2 H264CodecLevel = "LEVEL_5_2" + H264CodecLevelAuto H264CodecLevel = "AUTO" + H264CodecLevelLevel1 H264CodecLevel = "LEVEL_1" + H264CodecLevelLevel11 H264CodecLevel = "LEVEL_1_1" + H264CodecLevelLevel12 H264CodecLevel = "LEVEL_1_2" + H264CodecLevelLevel13 H264CodecLevel = "LEVEL_1_3" + H264CodecLevelLevel2 H264CodecLevel = "LEVEL_2" + H264CodecLevelLevel21 H264CodecLevel = "LEVEL_2_1" + H264CodecLevelLevel22 H264CodecLevel = "LEVEL_2_2" + H264CodecLevelLevel3 H264CodecLevel = "LEVEL_3" + H264CodecLevelLevel31 H264CodecLevel = "LEVEL_3_1" + H264CodecLevelLevel32 H264CodecLevel = "LEVEL_3_2" + H264CodecLevelLevel4 H264CodecLevel = "LEVEL_4" + H264CodecLevelLevel41 H264CodecLevel = "LEVEL_4_1" + H264CodecLevelLevel42 H264CodecLevel = "LEVEL_4_2" + H264CodecLevelLevel5 H264CodecLevel = "LEVEL_5" + H264CodecLevelLevel51 H264CodecLevel = "LEVEL_5_1" + H264CodecLevelLevel52 H264CodecLevel = "LEVEL_5_2" ) // Values returns all known values for H264CodecLevel. Note that this can be @@ -2487,12 +2487,12 @@ type H264CodecProfile string // Enum values for H264CodecProfile const ( - H264CodecProfileBaseline H264CodecProfile = "BASELINE" - H264CodecProfileHigh H264CodecProfile = "HIGH" - H264CodecProfileHigh_10bit H264CodecProfile = "HIGH_10BIT" - H264CodecProfileHigh_422 H264CodecProfile = "HIGH_422" - H264CodecProfileHigh_422_10bit H264CodecProfile = "HIGH_422_10BIT" - H264CodecProfileMain H264CodecProfile = "MAIN" + H264CodecProfileBaseline H264CodecProfile = "BASELINE" + H264CodecProfileHigh H264CodecProfile = "HIGH" + H264CodecProfileHigh10bit H264CodecProfile = "HIGH_10BIT" + H264CodecProfileHigh422 H264CodecProfile = "HIGH_422" + H264CodecProfileHigh42210bit H264CodecProfile = "HIGH_422_10BIT" + H264CodecProfileMain H264CodecProfile = "MAIN" ) // Values returns all known values for H264CodecProfile. Note that this can be @@ -2549,8 +2549,8 @@ type H264FieldEncoding string // Enum values for H264FieldEncoding const ( - H264FieldEncodingPaff H264FieldEncoding = "PAFF" - H264FieldEncodingForce_field H264FieldEncoding = "FORCE_FIELD" + H264FieldEncodingPaff H264FieldEncoding = "PAFF" + H264FieldEncodingForceField H264FieldEncoding = "FORCE_FIELD" ) // Values returns all known values for H264FieldEncoding. Note that this can be @@ -2586,8 +2586,8 @@ type H264FramerateControl string // Enum values for H264FramerateControl const ( - H264FramerateControlInitialize_from_source H264FramerateControl = "INITIALIZE_FROM_SOURCE" - H264FramerateControlSpecified H264FramerateControl = "SPECIFIED" + H264FramerateControlInitializeFromSource H264FramerateControl = "INITIALIZE_FROM_SOURCE" + H264FramerateControlSpecified H264FramerateControl = "SPECIFIED" ) // Values returns all known values for H264FramerateControl. Note that this can be @@ -2604,9 +2604,9 @@ type H264FramerateConversionAlgorithm string // Enum values for H264FramerateConversionAlgorithm const ( - H264FramerateConversionAlgorithmDuplicate_drop H264FramerateConversionAlgorithm = "DUPLICATE_DROP" - H264FramerateConversionAlgorithmInterpolate H264FramerateConversionAlgorithm = "INTERPOLATE" - H264FramerateConversionAlgorithmFrameformer H264FramerateConversionAlgorithm = "FRAMEFORMER" + H264FramerateConversionAlgorithmDuplicateDrop H264FramerateConversionAlgorithm = "DUPLICATE_DROP" + H264FramerateConversionAlgorithmInterpolate H264FramerateConversionAlgorithm = "INTERPOLATE" + H264FramerateConversionAlgorithmFrameformer H264FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for H264FramerateConversionAlgorithm. Note that @@ -2661,11 +2661,11 @@ type H264InterlaceMode string // Enum values for H264InterlaceMode const ( - H264InterlaceModeProgressive H264InterlaceMode = "PROGRESSIVE" - H264InterlaceModeTop_field H264InterlaceMode = "TOP_FIELD" - H264InterlaceModeBottom_field H264InterlaceMode = "BOTTOM_FIELD" - H264InterlaceModeFollow_top_field H264InterlaceMode = "FOLLOW_TOP_FIELD" - H264InterlaceModeFollow_bottom_field H264InterlaceMode = "FOLLOW_BOTTOM_FIELD" + H264InterlaceModeProgressive H264InterlaceMode = "PROGRESSIVE" + H264InterlaceModeTopField H264InterlaceMode = "TOP_FIELD" + H264InterlaceModeBottomField H264InterlaceMode = "BOTTOM_FIELD" + H264InterlaceModeFollowTopField H264InterlaceMode = "FOLLOW_TOP_FIELD" + H264InterlaceModeFollowBottomField H264InterlaceMode = "FOLLOW_BOTTOM_FIELD" ) // Values returns all known values for H264InterlaceMode. Note that this can be @@ -2685,8 +2685,8 @@ type H264ParControl string // Enum values for H264ParControl const ( - H264ParControlInitialize_from_source H264ParControl = "INITIALIZE_FROM_SOURCE" - H264ParControlSpecified H264ParControl = "SPECIFIED" + H264ParControlInitializeFromSource H264ParControl = "INITIALIZE_FROM_SOURCE" + H264ParControlSpecified H264ParControl = "SPECIFIED" ) // Values returns all known values for H264ParControl. Note that this can be @@ -2703,9 +2703,9 @@ type H264QualityTuningLevel string // Enum values for H264QualityTuningLevel const ( - H264QualityTuningLevelSingle_pass H264QualityTuningLevel = "SINGLE_PASS" - H264QualityTuningLevelSingle_pass_hq H264QualityTuningLevel = "SINGLE_PASS_HQ" - H264QualityTuningLevelMulti_pass_hq H264QualityTuningLevel = "MULTI_PASS_HQ" + H264QualityTuningLevelSinglePass H264QualityTuningLevel = "SINGLE_PASS" + H264QualityTuningLevelSinglePassHq H264QualityTuningLevel = "SINGLE_PASS_HQ" + H264QualityTuningLevelMultiPassHq H264QualityTuningLevel = "MULTI_PASS_HQ" ) // Values returns all known values for H264QualityTuningLevel. Note that this can @@ -2761,9 +2761,9 @@ type H264SceneChangeDetect string // Enum values for H264SceneChangeDetect const ( - H264SceneChangeDetectDisabled H264SceneChangeDetect = "DISABLED" - H264SceneChangeDetectEnabled H264SceneChangeDetect = "ENABLED" - H264SceneChangeDetectTransition_detection H264SceneChangeDetect = "TRANSITION_DETECTION" + H264SceneChangeDetectDisabled H264SceneChangeDetect = "DISABLED" + H264SceneChangeDetectEnabled H264SceneChangeDetect = "ENABLED" + H264SceneChangeDetectTransitionDetection H264SceneChangeDetect = "TRANSITION_DETECTION" ) // Values returns all known values for H264SceneChangeDetect. Note that this can be @@ -2938,20 +2938,20 @@ type H265CodecLevel string // Enum values for H265CodecLevel const ( - H265CodecLevelAuto H265CodecLevel = "AUTO" - H265CodecLevelLevel_1 H265CodecLevel = "LEVEL_1" - H265CodecLevelLevel_2 H265CodecLevel = "LEVEL_2" - H265CodecLevelLevel_2_1 H265CodecLevel = "LEVEL_2_1" - H265CodecLevelLevel_3 H265CodecLevel = "LEVEL_3" - H265CodecLevelLevel_3_1 H265CodecLevel = "LEVEL_3_1" - H265CodecLevelLevel_4 H265CodecLevel = "LEVEL_4" - H265CodecLevelLevel_4_1 H265CodecLevel = "LEVEL_4_1" - H265CodecLevelLevel_5 H265CodecLevel = "LEVEL_5" - H265CodecLevelLevel_5_1 H265CodecLevel = "LEVEL_5_1" - H265CodecLevelLevel_5_2 H265CodecLevel = "LEVEL_5_2" - H265CodecLevelLevel_6 H265CodecLevel = "LEVEL_6" - H265CodecLevelLevel_6_1 H265CodecLevel = "LEVEL_6_1" - H265CodecLevelLevel_6_2 H265CodecLevel = "LEVEL_6_2" + H265CodecLevelAuto H265CodecLevel = "AUTO" + H265CodecLevelLevel1 H265CodecLevel = "LEVEL_1" + H265CodecLevelLevel2 H265CodecLevel = "LEVEL_2" + H265CodecLevelLevel21 H265CodecLevel = "LEVEL_2_1" + H265CodecLevelLevel3 H265CodecLevel = "LEVEL_3" + H265CodecLevelLevel31 H265CodecLevel = "LEVEL_3_1" + H265CodecLevelLevel4 H265CodecLevel = "LEVEL_4" + H265CodecLevelLevel41 H265CodecLevel = "LEVEL_4_1" + H265CodecLevelLevel5 H265CodecLevel = "LEVEL_5" + H265CodecLevelLevel51 H265CodecLevel = "LEVEL_5_1" + H265CodecLevelLevel52 H265CodecLevel = "LEVEL_5_2" + H265CodecLevelLevel6 H265CodecLevel = "LEVEL_6" + H265CodecLevelLevel61 H265CodecLevel = "LEVEL_6_1" + H265CodecLevelLevel62 H265CodecLevel = "LEVEL_6_2" ) // Values returns all known values for H265CodecLevel. Note that this can be @@ -2980,14 +2980,14 @@ type H265CodecProfile string // Enum values for H265CodecProfile const ( - H265CodecProfileMain_main H265CodecProfile = "MAIN_MAIN" - H265CodecProfileMain_high H265CodecProfile = "MAIN_HIGH" - H265CodecProfileMain10_main H265CodecProfile = "MAIN10_MAIN" - H265CodecProfileMain10_high H265CodecProfile = "MAIN10_HIGH" - H265CodecProfileMain_422_8bit_main H265CodecProfile = "MAIN_422_8BIT_MAIN" - H265CodecProfileMain_422_8bit_high H265CodecProfile = "MAIN_422_8BIT_HIGH" - H265CodecProfileMain_422_10bit_main H265CodecProfile = "MAIN_422_10BIT_MAIN" - H265CodecProfileMain_422_10bit_high H265CodecProfile = "MAIN_422_10BIT_HIGH" + H265CodecProfileMainMain H265CodecProfile = "MAIN_MAIN" + H265CodecProfileMainHigh H265CodecProfile = "MAIN_HIGH" + H265CodecProfileMain10Main H265CodecProfile = "MAIN10_MAIN" + H265CodecProfileMain10High H265CodecProfile = "MAIN10_HIGH" + H265CodecProfileMain4228bitMain H265CodecProfile = "MAIN_422_8BIT_MAIN" + H265CodecProfileMain4228bitHigh H265CodecProfile = "MAIN_422_8BIT_HIGH" + H265CodecProfileMain42210bitMain H265CodecProfile = "MAIN_422_10BIT_MAIN" + H265CodecProfileMain42210bitHigh H265CodecProfile = "MAIN_422_10BIT_HIGH" ) // Values returns all known values for H265CodecProfile. Note that this can be @@ -3047,8 +3047,8 @@ type H265FramerateControl string // Enum values for H265FramerateControl const ( - H265FramerateControlInitialize_from_source H265FramerateControl = "INITIALIZE_FROM_SOURCE" - H265FramerateControlSpecified H265FramerateControl = "SPECIFIED" + H265FramerateControlInitializeFromSource H265FramerateControl = "INITIALIZE_FROM_SOURCE" + H265FramerateControlSpecified H265FramerateControl = "SPECIFIED" ) // Values returns all known values for H265FramerateControl. Note that this can be @@ -3065,9 +3065,9 @@ type H265FramerateConversionAlgorithm string // Enum values for H265FramerateConversionAlgorithm const ( - H265FramerateConversionAlgorithmDuplicate_drop H265FramerateConversionAlgorithm = "DUPLICATE_DROP" - H265FramerateConversionAlgorithmInterpolate H265FramerateConversionAlgorithm = "INTERPOLATE" - H265FramerateConversionAlgorithmFrameformer H265FramerateConversionAlgorithm = "FRAMEFORMER" + H265FramerateConversionAlgorithmDuplicateDrop H265FramerateConversionAlgorithm = "DUPLICATE_DROP" + H265FramerateConversionAlgorithmInterpolate H265FramerateConversionAlgorithm = "INTERPOLATE" + H265FramerateConversionAlgorithmFrameformer H265FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for H265FramerateConversionAlgorithm. Note that @@ -3122,11 +3122,11 @@ type H265InterlaceMode string // Enum values for H265InterlaceMode const ( - H265InterlaceModeProgressive H265InterlaceMode = "PROGRESSIVE" - H265InterlaceModeTop_field H265InterlaceMode = "TOP_FIELD" - H265InterlaceModeBottom_field H265InterlaceMode = "BOTTOM_FIELD" - H265InterlaceModeFollow_top_field H265InterlaceMode = "FOLLOW_TOP_FIELD" - H265InterlaceModeFollow_bottom_field H265InterlaceMode = "FOLLOW_BOTTOM_FIELD" + H265InterlaceModeProgressive H265InterlaceMode = "PROGRESSIVE" + H265InterlaceModeTopField H265InterlaceMode = "TOP_FIELD" + H265InterlaceModeBottomField H265InterlaceMode = "BOTTOM_FIELD" + H265InterlaceModeFollowTopField H265InterlaceMode = "FOLLOW_TOP_FIELD" + H265InterlaceModeFollowBottomField H265InterlaceMode = "FOLLOW_BOTTOM_FIELD" ) // Values returns all known values for H265InterlaceMode. Note that this can be @@ -3146,8 +3146,8 @@ type H265ParControl string // Enum values for H265ParControl const ( - H265ParControlInitialize_from_source H265ParControl = "INITIALIZE_FROM_SOURCE" - H265ParControlSpecified H265ParControl = "SPECIFIED" + H265ParControlInitializeFromSource H265ParControl = "INITIALIZE_FROM_SOURCE" + H265ParControlSpecified H265ParControl = "SPECIFIED" ) // Values returns all known values for H265ParControl. Note that this can be @@ -3164,9 +3164,9 @@ type H265QualityTuningLevel string // Enum values for H265QualityTuningLevel const ( - H265QualityTuningLevelSingle_pass H265QualityTuningLevel = "SINGLE_PASS" - H265QualityTuningLevelSingle_pass_hq H265QualityTuningLevel = "SINGLE_PASS_HQ" - H265QualityTuningLevelMulti_pass_hq H265QualityTuningLevel = "MULTI_PASS_HQ" + H265QualityTuningLevelSinglePass H265QualityTuningLevel = "SINGLE_PASS" + H265QualityTuningLevelSinglePassHq H265QualityTuningLevel = "SINGLE_PASS_HQ" + H265QualityTuningLevelMultiPassHq H265QualityTuningLevel = "MULTI_PASS_HQ" ) // Values returns all known values for H265QualityTuningLevel. Note that this can @@ -3225,9 +3225,9 @@ type H265SceneChangeDetect string // Enum values for H265SceneChangeDetect const ( - H265SceneChangeDetectDisabled H265SceneChangeDetect = "DISABLED" - H265SceneChangeDetectEnabled H265SceneChangeDetect = "ENABLED" - H265SceneChangeDetectTransition_detection H265SceneChangeDetect = "TRANSITION_DETECTION" + H265SceneChangeDetectDisabled H265SceneChangeDetect = "DISABLED" + H265SceneChangeDetectEnabled H265SceneChangeDetect = "ENABLED" + H265SceneChangeDetectTransitionDetection H265SceneChangeDetect = "TRANSITION_DETECTION" ) // Values returns all known values for H265SceneChangeDetect. Note that this can be @@ -3393,8 +3393,8 @@ type HlsAdMarkers string // Enum values for HlsAdMarkers const ( - HlsAdMarkersElemental HlsAdMarkers = "ELEMENTAL" - HlsAdMarkersElemental_scte35 HlsAdMarkers = "ELEMENTAL_SCTE35" + HlsAdMarkersElemental HlsAdMarkers = "ELEMENTAL" + HlsAdMarkersElementalScte35 HlsAdMarkers = "ELEMENTAL_SCTE35" ) // Values returns all known values for HlsAdMarkers. Note that this can be expanded @@ -3447,10 +3447,10 @@ type HlsAudioTrackType string // Enum values for HlsAudioTrackType const ( - HlsAudioTrackTypeAlternate_audio_auto_select_default HlsAudioTrackType = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" - HlsAudioTrackTypeAlternate_audio_auto_select HlsAudioTrackType = "ALTERNATE_AUDIO_AUTO_SELECT" - HlsAudioTrackTypeAlternate_audio_not_auto_select HlsAudioTrackType = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" - HlsAudioTrackTypeAudio_only_variant_stream HlsAudioTrackType = "AUDIO_ONLY_VARIANT_STREAM" + HlsAudioTrackTypeAlternateAudioAutoSelectDefault HlsAudioTrackType = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" + HlsAudioTrackTypeAlternateAudioAutoSelect HlsAudioTrackType = "ALTERNATE_AUDIO_AUTO_SELECT" + HlsAudioTrackTypeAlternateAudioNotAutoSelect HlsAudioTrackType = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" + HlsAudioTrackTypeAudioOnlyVariantStream HlsAudioTrackType = "AUDIO_ONLY_VARIANT_STREAM" ) // Values returns all known values for HlsAudioTrackType. Note that this can be @@ -3507,8 +3507,8 @@ type HlsCodecSpecification string // Enum values for HlsCodecSpecification const ( - HlsCodecSpecificationRfc_6381 HlsCodecSpecification = "RFC_6381" - HlsCodecSpecificationRfc_4281 HlsCodecSpecification = "RFC_4281" + HlsCodecSpecificationRfc6381 HlsCodecSpecification = "RFC_6381" + HlsCodecSpecificationRfc4281 HlsCodecSpecification = "RFC_4281" ) // Values returns all known values for HlsCodecSpecification. Note that this can be @@ -3525,8 +3525,8 @@ type HlsDirectoryStructure string // Enum values for HlsDirectoryStructure const ( - HlsDirectoryStructureSingle_directory HlsDirectoryStructure = "SINGLE_DIRECTORY" - HlsDirectoryStructureSubdirectory_per_stream HlsDirectoryStructure = "SUBDIRECTORY_PER_STREAM" + HlsDirectoryStructureSingleDirectory HlsDirectoryStructure = "SINGLE_DIRECTORY" + HlsDirectoryStructureSubdirectoryPerStream HlsDirectoryStructure = "SUBDIRECTORY_PER_STREAM" ) // Values returns all known values for HlsDirectoryStructure. Note that this can be @@ -3543,8 +3543,8 @@ type HlsEncryptionType string // Enum values for HlsEncryptionType const ( - HlsEncryptionTypeAes128 HlsEncryptionType = "AES128" - HlsEncryptionTypeSample_aes HlsEncryptionType = "SAMPLE_AES" + HlsEncryptionTypeAes128 HlsEncryptionType = "AES128" + HlsEncryptionTypeSampleAes HlsEncryptionType = "SAMPLE_AES" ) // Values returns all known values for HlsEncryptionType. Note that this can be @@ -3598,8 +3598,8 @@ type HlsKeyProviderType string // Enum values for HlsKeyProviderType const ( - HlsKeyProviderTypeSpeke HlsKeyProviderType = "SPEKE" - HlsKeyProviderTypeStatic_key HlsKeyProviderType = "STATIC_KEY" + HlsKeyProviderTypeSpeke HlsKeyProviderType = "SPEKE" + HlsKeyProviderTypeStaticKey HlsKeyProviderType = "STATIC_KEY" ) // Values returns all known values for HlsKeyProviderType. Note that this can be @@ -3634,8 +3634,8 @@ type HlsManifestDurationFormat string // Enum values for HlsManifestDurationFormat const ( - HlsManifestDurationFormatFloating_point HlsManifestDurationFormat = "FLOATING_POINT" - HlsManifestDurationFormatInteger HlsManifestDurationFormat = "INTEGER" + HlsManifestDurationFormatFloatingPoint HlsManifestDurationFormat = "FLOATING_POINT" + HlsManifestDurationFormatInteger HlsManifestDurationFormat = "INTEGER" ) // Values returns all known values for HlsManifestDurationFormat. Note that this @@ -3670,8 +3670,8 @@ type HlsOutputSelection string // Enum values for HlsOutputSelection const ( - HlsOutputSelectionManifests_and_segments HlsOutputSelection = "MANIFESTS_AND_SEGMENTS" - HlsOutputSelectionSegments_only HlsOutputSelection = "SEGMENTS_ONLY" + HlsOutputSelectionManifestsAndSegments HlsOutputSelection = "MANIFESTS_AND_SEGMENTS" + HlsOutputSelectionSegmentsOnly HlsOutputSelection = "SEGMENTS_ONLY" ) // Values returns all known values for HlsOutputSelection. Note that this can be @@ -3706,8 +3706,8 @@ type HlsSegmentControl string // Enum values for HlsSegmentControl const ( - HlsSegmentControlSingle_file HlsSegmentControl = "SINGLE_FILE" - HlsSegmentControlSegmented_files HlsSegmentControl = "SEGMENTED_FILES" + HlsSegmentControlSingleFile HlsSegmentControl = "SINGLE_FILE" + HlsSegmentControlSegmentedFiles HlsSegmentControl = "SEGMENTED_FILES" ) // Values returns all known values for HlsSegmentControl. Note that this can be @@ -3836,8 +3836,8 @@ type InputPsiControl string // Enum values for InputPsiControl const ( - InputPsiControlIgnore_psi InputPsiControl = "IGNORE_PSI" - InputPsiControlUse_psi InputPsiControl = "USE_PSI" + InputPsiControlIgnorePsi InputPsiControl = "IGNORE_PSI" + InputPsiControlUsePsi InputPsiControl = "USE_PSI" ) // Values returns all known values for InputPsiControl. Note that this can be @@ -3854,11 +3854,11 @@ type InputRotate string // Enum values for InputRotate const ( - InputRotateDegree_0 InputRotate = "DEGREE_0" - InputRotateDegrees_90 InputRotate = "DEGREES_90" - InputRotateDegrees_180 InputRotate = "DEGREES_180" - InputRotateDegrees_270 InputRotate = "DEGREES_270" - InputRotateAuto InputRotate = "AUTO" + InputRotateDegree0 InputRotate = "DEGREE_0" + InputRotateDegrees90 InputRotate = "DEGREES_90" + InputRotateDegrees180 InputRotate = "DEGREES_180" + InputRotateDegrees270 InputRotate = "DEGREES_270" + InputRotateAuto InputRotate = "AUTO" ) // Values returns all known values for InputRotate. Note that this can be expanded @@ -3960,9 +3960,9 @@ type JobTemplateListBy string // Enum values for JobTemplateListBy const ( - JobTemplateListByName JobTemplateListBy = "NAME" - JobTemplateListByCreation_date JobTemplateListBy = "CREATION_DATE" - JobTemplateListBySystem JobTemplateListBy = "SYSTEM" + JobTemplateListByName JobTemplateListBy = "NAME" + JobTemplateListByCreationDate JobTemplateListBy = "CREATION_DATE" + JobTemplateListBySystem JobTemplateListBy = "SYSTEM" ) // Values returns all known values for JobTemplateListBy. Note that this can be @@ -4412,8 +4412,8 @@ type M2tsEbpAudioInterval string // Enum values for M2tsEbpAudioInterval const ( - M2tsEbpAudioIntervalVideo_and_fixed_intervals M2tsEbpAudioInterval = "VIDEO_AND_FIXED_INTERVALS" - M2tsEbpAudioIntervalVideo_interval M2tsEbpAudioInterval = "VIDEO_INTERVAL" + M2tsEbpAudioIntervalVideoAndFixedIntervals M2tsEbpAudioInterval = "VIDEO_AND_FIXED_INTERVALS" + M2tsEbpAudioIntervalVideoInterval M2tsEbpAudioInterval = "VIDEO_INTERVAL" ) // Values returns all known values for M2tsEbpAudioInterval. Note that this can be @@ -4430,8 +4430,8 @@ type M2tsEbpPlacement string // Enum values for M2tsEbpPlacement const ( - M2tsEbpPlacementVideo_and_audio_pids M2tsEbpPlacement = "VIDEO_AND_AUDIO_PIDS" - M2tsEbpPlacementVideo_pid M2tsEbpPlacement = "VIDEO_PID" + M2tsEbpPlacementVideoAndAudioPids M2tsEbpPlacement = "VIDEO_AND_AUDIO_PIDS" + M2tsEbpPlacementVideoPid M2tsEbpPlacement = "VIDEO_PID" ) // Values returns all known values for M2tsEbpPlacement. Note that this can be @@ -4502,8 +4502,8 @@ type M2tsPcrControl string // Enum values for M2tsPcrControl const ( - M2tsPcrControlPcr_every_pes_packet M2tsPcrControl = "PCR_EVERY_PES_PACKET" - M2tsPcrControlConfigured_pcr_period M2tsPcrControl = "CONFIGURED_PCR_PERIOD" + M2tsPcrControlPcrEveryPesPacket M2tsPcrControl = "PCR_EVERY_PES_PACKET" + M2tsPcrControlConfiguredPcrPeriod M2tsPcrControl = "CONFIGURED_PCR_PERIOD" ) // Values returns all known values for M2tsPcrControl. Note that this can be @@ -4556,12 +4556,12 @@ type M2tsSegmentationMarkers string // Enum values for M2tsSegmentationMarkers const ( - M2tsSegmentationMarkersNone M2tsSegmentationMarkers = "NONE" - M2tsSegmentationMarkersRai_segstart M2tsSegmentationMarkers = "RAI_SEGSTART" - M2tsSegmentationMarkersRai_adapt M2tsSegmentationMarkers = "RAI_ADAPT" - M2tsSegmentationMarkersPsi_segstart M2tsSegmentationMarkers = "PSI_SEGSTART" - M2tsSegmentationMarkersEbp M2tsSegmentationMarkers = "EBP" - M2tsSegmentationMarkersEbp_legacy M2tsSegmentationMarkers = "EBP_LEGACY" + M2tsSegmentationMarkersNone M2tsSegmentationMarkers = "NONE" + M2tsSegmentationMarkersRaiSegstart M2tsSegmentationMarkers = "RAI_SEGSTART" + M2tsSegmentationMarkersRaiAdapt M2tsSegmentationMarkers = "RAI_ADAPT" + M2tsSegmentationMarkersPsiSegstart M2tsSegmentationMarkers = "PSI_SEGSTART" + M2tsSegmentationMarkersEbp M2tsSegmentationMarkers = "EBP" + M2tsSegmentationMarkersEbpLegacy M2tsSegmentationMarkers = "EBP_LEGACY" ) // Values returns all known values for M2tsSegmentationMarkers. Note that this can @@ -4582,8 +4582,8 @@ type M2tsSegmentationStyle string // Enum values for M2tsSegmentationStyle const ( - M2tsSegmentationStyleMaintain_cadence M2tsSegmentationStyle = "MAINTAIN_CADENCE" - M2tsSegmentationStyleReset_cadence M2tsSegmentationStyle = "RESET_CADENCE" + M2tsSegmentationStyleMaintainCadence M2tsSegmentationStyle = "MAINTAIN_CADENCE" + M2tsSegmentationStyleResetCadence M2tsSegmentationStyle = "RESET_CADENCE" ) // Values returns all known values for M2tsSegmentationStyle. Note that this can be @@ -4618,8 +4618,8 @@ type M3u8PcrControl string // Enum values for M3u8PcrControl const ( - M3u8PcrControlPcr_every_pes_packet M3u8PcrControl = "PCR_EVERY_PES_PACKET" - M3u8PcrControlConfigured_pcr_period M3u8PcrControl = "CONFIGURED_PCR_PERIOD" + M3u8PcrControlPcrEveryPesPacket M3u8PcrControl = "PCR_EVERY_PES_PACKET" + M3u8PcrControlConfiguredPcrPeriod M3u8PcrControl = "CONFIGURED_PCR_PERIOD" ) // Values returns all known values for M3u8PcrControl. Note that this can be @@ -4762,8 +4762,8 @@ type MovReference string // Enum values for MovReference const ( - MovReferenceSelf_contained MovReference = "SELF_CONTAINED" - MovReferenceExternal MovReference = "EXTERNAL" + MovReferenceSelfContained MovReference = "SELF_CONTAINED" + MovReferenceExternal MovReference = "EXTERNAL" ) // Values returns all known values for MovReference. Note that this can be expanded @@ -4834,8 +4834,8 @@ type Mp4MoovPlacement string // Enum values for Mp4MoovPlacement const ( - Mp4MoovPlacementProgressive_download Mp4MoovPlacement = "PROGRESSIVE_DOWNLOAD" - Mp4MoovPlacementNormal Mp4MoovPlacement = "NORMAL" + Mp4MoovPlacementProgressiveDownload Mp4MoovPlacement = "PROGRESSIVE_DOWNLOAD" + Mp4MoovPlacementNormal Mp4MoovPlacement = "NORMAL" ) // Values returns all known values for Mp4MoovPlacement. Note that this can be @@ -4852,8 +4852,8 @@ type MpdCaptionContainerType string // Enum values for MpdCaptionContainerType const ( - MpdCaptionContainerTypeRaw MpdCaptionContainerType = "RAW" - MpdCaptionContainerTypeFragmented_mp4 MpdCaptionContainerType = "FRAGMENTED_MP4" + MpdCaptionContainerTypeRaw MpdCaptionContainerType = "RAW" + MpdCaptionContainerTypeFragmentedMp4 MpdCaptionContainerType = "FRAGMENTED_MP4" ) // Values returns all known values for MpdCaptionContainerType. Note that this can @@ -4952,8 +4952,8 @@ type Mpeg2CodecProfile string // Enum values for Mpeg2CodecProfile const ( - Mpeg2CodecProfileMain Mpeg2CodecProfile = "MAIN" - Mpeg2CodecProfileProfile_422 Mpeg2CodecProfile = "PROFILE_422" + Mpeg2CodecProfileMain Mpeg2CodecProfile = "MAIN" + Mpeg2CodecProfileProfile422 Mpeg2CodecProfile = "PROFILE_422" ) // Values returns all known values for Mpeg2CodecProfile. Note that this can be @@ -4988,8 +4988,8 @@ type Mpeg2FramerateControl string // Enum values for Mpeg2FramerateControl const ( - Mpeg2FramerateControlInitialize_from_source Mpeg2FramerateControl = "INITIALIZE_FROM_SOURCE" - Mpeg2FramerateControlSpecified Mpeg2FramerateControl = "SPECIFIED" + Mpeg2FramerateControlInitializeFromSource Mpeg2FramerateControl = "INITIALIZE_FROM_SOURCE" + Mpeg2FramerateControlSpecified Mpeg2FramerateControl = "SPECIFIED" ) // Values returns all known values for Mpeg2FramerateControl. Note that this can be @@ -5006,9 +5006,9 @@ type Mpeg2FramerateConversionAlgorithm string // Enum values for Mpeg2FramerateConversionAlgorithm const ( - Mpeg2FramerateConversionAlgorithmDuplicate_drop Mpeg2FramerateConversionAlgorithm = "DUPLICATE_DROP" - Mpeg2FramerateConversionAlgorithmInterpolate Mpeg2FramerateConversionAlgorithm = "INTERPOLATE" - Mpeg2FramerateConversionAlgorithmFrameformer Mpeg2FramerateConversionAlgorithm = "FRAMEFORMER" + Mpeg2FramerateConversionAlgorithmDuplicateDrop Mpeg2FramerateConversionAlgorithm = "DUPLICATE_DROP" + Mpeg2FramerateConversionAlgorithmInterpolate Mpeg2FramerateConversionAlgorithm = "INTERPOLATE" + Mpeg2FramerateConversionAlgorithmFrameformer Mpeg2FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for Mpeg2FramerateConversionAlgorithm. Note that @@ -5045,11 +5045,11 @@ type Mpeg2InterlaceMode string // Enum values for Mpeg2InterlaceMode const ( - Mpeg2InterlaceModeProgressive Mpeg2InterlaceMode = "PROGRESSIVE" - Mpeg2InterlaceModeTop_field Mpeg2InterlaceMode = "TOP_FIELD" - Mpeg2InterlaceModeBottom_field Mpeg2InterlaceMode = "BOTTOM_FIELD" - Mpeg2InterlaceModeFollow_top_field Mpeg2InterlaceMode = "FOLLOW_TOP_FIELD" - Mpeg2InterlaceModeFollow_bottom_field Mpeg2InterlaceMode = "FOLLOW_BOTTOM_FIELD" + Mpeg2InterlaceModeProgressive Mpeg2InterlaceMode = "PROGRESSIVE" + Mpeg2InterlaceModeTopField Mpeg2InterlaceMode = "TOP_FIELD" + Mpeg2InterlaceModeBottomField Mpeg2InterlaceMode = "BOTTOM_FIELD" + Mpeg2InterlaceModeFollowTopField Mpeg2InterlaceMode = "FOLLOW_TOP_FIELD" + Mpeg2InterlaceModeFollowBottomField Mpeg2InterlaceMode = "FOLLOW_BOTTOM_FIELD" ) // Values returns all known values for Mpeg2InterlaceMode. Note that this can be @@ -5069,11 +5069,11 @@ type Mpeg2IntraDcPrecision string // Enum values for Mpeg2IntraDcPrecision const ( - Mpeg2IntraDcPrecisionAuto Mpeg2IntraDcPrecision = "AUTO" - Mpeg2IntraDcPrecisionIntra_dc_precision_8 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_8" - Mpeg2IntraDcPrecisionIntra_dc_precision_9 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_9" - Mpeg2IntraDcPrecisionIntra_dc_precision_10 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_10" - Mpeg2IntraDcPrecisionIntra_dc_precision_11 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_11" + Mpeg2IntraDcPrecisionAuto Mpeg2IntraDcPrecision = "AUTO" + Mpeg2IntraDcPrecisionIntraDcPrecision8 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_8" + Mpeg2IntraDcPrecisionIntraDcPrecision9 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_9" + Mpeg2IntraDcPrecisionIntraDcPrecision10 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_10" + Mpeg2IntraDcPrecisionIntraDcPrecision11 Mpeg2IntraDcPrecision = "INTRA_DC_PRECISION_11" ) // Values returns all known values for Mpeg2IntraDcPrecision. Note that this can be @@ -5093,8 +5093,8 @@ type Mpeg2ParControl string // Enum values for Mpeg2ParControl const ( - Mpeg2ParControlInitialize_from_source Mpeg2ParControl = "INITIALIZE_FROM_SOURCE" - Mpeg2ParControlSpecified Mpeg2ParControl = "SPECIFIED" + Mpeg2ParControlInitializeFromSource Mpeg2ParControl = "INITIALIZE_FROM_SOURCE" + Mpeg2ParControlSpecified Mpeg2ParControl = "SPECIFIED" ) // Values returns all known values for Mpeg2ParControl. Note that this can be @@ -5111,8 +5111,8 @@ type Mpeg2QualityTuningLevel string // Enum values for Mpeg2QualityTuningLevel const ( - Mpeg2QualityTuningLevelSingle_pass Mpeg2QualityTuningLevel = "SINGLE_PASS" - Mpeg2QualityTuningLevelMulti_pass Mpeg2QualityTuningLevel = "MULTI_PASS" + Mpeg2QualityTuningLevelSinglePass Mpeg2QualityTuningLevel = "SINGLE_PASS" + Mpeg2QualityTuningLevelMultiPass Mpeg2QualityTuningLevel = "MULTI_PASS" ) // Values returns all known values for Mpeg2QualityTuningLevel. Note that this can @@ -5203,7 +5203,7 @@ type Mpeg2Syntax string // Enum values for Mpeg2Syntax const ( Mpeg2SyntaxDefault Mpeg2Syntax = "DEFAULT" - Mpeg2SyntaxD_10 Mpeg2Syntax = "D_10" + Mpeg2SyntaxD10 Mpeg2Syntax = "D_10" ) // Values returns all known values for Mpeg2Syntax. Note that this can be expanded @@ -5259,8 +5259,8 @@ type MsSmoothAudioDeduplication string // Enum values for MsSmoothAudioDeduplication const ( - MsSmoothAudioDeduplicationCombine_duplicate_streams MsSmoothAudioDeduplication = "COMBINE_DUPLICATE_STREAMS" - MsSmoothAudioDeduplicationNone MsSmoothAudioDeduplication = "NONE" + MsSmoothAudioDeduplicationCombineDuplicateStreams MsSmoothAudioDeduplication = "COMBINE_DUPLICATE_STREAMS" + MsSmoothAudioDeduplicationNone MsSmoothAudioDeduplication = "NONE" ) // Values returns all known values for MsSmoothAudioDeduplication. Note that this @@ -5295,8 +5295,8 @@ type MxfAfdSignaling string // Enum values for MxfAfdSignaling const ( - MxfAfdSignalingNo_copy MxfAfdSignaling = "NO_COPY" - MxfAfdSignalingCopy_from_video MxfAfdSignaling = "COPY_FROM_VIDEO" + MxfAfdSignalingNoCopy MxfAfdSignaling = "NO_COPY" + MxfAfdSignalingCopyFromVideo MxfAfdSignaling = "COPY_FROM_VIDEO" ) // Values returns all known values for MxfAfdSignaling. Note that this can be @@ -5313,7 +5313,7 @@ type MxfProfile string // Enum values for MxfProfile const ( - MxfProfileD_10 MxfProfile = "D_10" + MxfProfileD10 MxfProfile = "D_10" MxfProfileXdcam MxfProfile = "XDCAM" MxfProfileOp1a MxfProfile = "OP1A" ) @@ -5333,9 +5333,9 @@ type NielsenActiveWatermarkProcessType string // Enum values for NielsenActiveWatermarkProcessType const ( - NielsenActiveWatermarkProcessTypeNaes2_and_nw NielsenActiveWatermarkProcessType = "NAES2_AND_NW" - NielsenActiveWatermarkProcessTypeCbet NielsenActiveWatermarkProcessType = "CBET" - NielsenActiveWatermarkProcessTypeNaes2_and_nw_and_cbet NielsenActiveWatermarkProcessType = "NAES2_AND_NW_AND_CBET" + NielsenActiveWatermarkProcessTypeNaes2AndNw NielsenActiveWatermarkProcessType = "NAES2_AND_NW" + NielsenActiveWatermarkProcessTypeCbet NielsenActiveWatermarkProcessType = "CBET" + NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet NielsenActiveWatermarkProcessType = "NAES2_AND_NW_AND_CBET" ) // Values returns all known values for NielsenActiveWatermarkProcessType. Note that @@ -5373,8 +5373,8 @@ type NielsenUniqueTicPerAudioTrackType string // Enum values for NielsenUniqueTicPerAudioTrackType const ( - NielsenUniqueTicPerAudioTrackTypeReserve_unique_tics_per_track NielsenUniqueTicPerAudioTrackType = "RESERVE_UNIQUE_TICS_PER_TRACK" - NielsenUniqueTicPerAudioTrackTypeSame_tics_per_track NielsenUniqueTicPerAudioTrackType = "SAME_TICS_PER_TRACK" + NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack NielsenUniqueTicPerAudioTrackType = "RESERVE_UNIQUE_TICS_PER_TRACK" + NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack NielsenUniqueTicPerAudioTrackType = "SAME_TICS_PER_TRACK" ) // Values returns all known values for NielsenUniqueTicPerAudioTrackType. Note that @@ -5461,11 +5461,11 @@ type OutputGroupType string // Enum values for OutputGroupType const ( - OutputGroupTypeHls_group_settings OutputGroupType = "HLS_GROUP_SETTINGS" - OutputGroupTypeDash_iso_group_settings OutputGroupType = "DASH_ISO_GROUP_SETTINGS" - OutputGroupTypeFile_group_settings OutputGroupType = "FILE_GROUP_SETTINGS" - OutputGroupTypeMs_smooth_group_settings OutputGroupType = "MS_SMOOTH_GROUP_SETTINGS" - OutputGroupTypeCmaf_group_settings OutputGroupType = "CMAF_GROUP_SETTINGS" + OutputGroupTypeHlsGroupSettings OutputGroupType = "HLS_GROUP_SETTINGS" + OutputGroupTypeDashIsoGroupSettings OutputGroupType = "DASH_ISO_GROUP_SETTINGS" + OutputGroupTypeFileGroupSettings OutputGroupType = "FILE_GROUP_SETTINGS" + OutputGroupTypeMsSmoothGroupSettings OutputGroupType = "MS_SMOOTH_GROUP_SETTINGS" + OutputGroupTypeCmafGroupSettings OutputGroupType = "CMAF_GROUP_SETTINGS" ) // Values returns all known values for OutputGroupType. Note that this can be @@ -5485,10 +5485,10 @@ type OutputSdt string // Enum values for OutputSdt const ( - OutputSdtSdt_follow OutputSdt = "SDT_FOLLOW" - OutputSdtSdt_follow_if_present OutputSdt = "SDT_FOLLOW_IF_PRESENT" - OutputSdtSdt_manual OutputSdt = "SDT_MANUAL" - OutputSdtSdt_none OutputSdt = "SDT_NONE" + OutputSdtSdtFollow OutputSdt = "SDT_FOLLOW" + OutputSdtSdtFollowIfPresent OutputSdt = "SDT_FOLLOW_IF_PRESENT" + OutputSdtSdtManual OutputSdt = "SDT_MANUAL" + OutputSdtSdtNone OutputSdt = "SDT_NONE" ) // Values returns all known values for OutputSdt. Note that this can be expanded in @@ -5507,9 +5507,9 @@ type PresetListBy string // Enum values for PresetListBy const ( - PresetListByName PresetListBy = "NAME" - PresetListByCreation_date PresetListBy = "CREATION_DATE" - PresetListBySystem PresetListBy = "SYSTEM" + PresetListByName PresetListBy = "NAME" + PresetListByCreationDate PresetListBy = "CREATION_DATE" + PresetListBySystem PresetListBy = "SYSTEM" ) // Values returns all known values for PresetListBy. Note that this can be expanded @@ -5527,8 +5527,8 @@ type PricingPlan string // Enum values for PricingPlan const ( - PricingPlanOn_demand PricingPlan = "ON_DEMAND" - PricingPlanReserved PricingPlan = "RESERVED" + PricingPlanOnDemand PricingPlan = "ON_DEMAND" + PricingPlanReserved PricingPlan = "RESERVED" ) // Values returns all known values for PricingPlan. Note that this can be expanded @@ -5545,10 +5545,10 @@ type ProresCodecProfile string // Enum values for ProresCodecProfile const ( - ProresCodecProfileApple_prores_422 ProresCodecProfile = "APPLE_PRORES_422" - ProresCodecProfileApple_prores_422_hq ProresCodecProfile = "APPLE_PRORES_422_HQ" - ProresCodecProfileApple_prores_422_lt ProresCodecProfile = "APPLE_PRORES_422_LT" - ProresCodecProfileApple_prores_422_proxy ProresCodecProfile = "APPLE_PRORES_422_PROXY" + ProresCodecProfileAppleProres422 ProresCodecProfile = "APPLE_PRORES_422" + ProresCodecProfileAppleProres422Hq ProresCodecProfile = "APPLE_PRORES_422_HQ" + ProresCodecProfileAppleProres422Lt ProresCodecProfile = "APPLE_PRORES_422_LT" + ProresCodecProfileAppleProres422Proxy ProresCodecProfile = "APPLE_PRORES_422_PROXY" ) // Values returns all known values for ProresCodecProfile. Note that this can be @@ -5567,8 +5567,8 @@ type ProresFramerateControl string // Enum values for ProresFramerateControl const ( - ProresFramerateControlInitialize_from_source ProresFramerateControl = "INITIALIZE_FROM_SOURCE" - ProresFramerateControlSpecified ProresFramerateControl = "SPECIFIED" + ProresFramerateControlInitializeFromSource ProresFramerateControl = "INITIALIZE_FROM_SOURCE" + ProresFramerateControlSpecified ProresFramerateControl = "SPECIFIED" ) // Values returns all known values for ProresFramerateControl. Note that this can @@ -5585,9 +5585,9 @@ type ProresFramerateConversionAlgorithm string // Enum values for ProresFramerateConversionAlgorithm const ( - ProresFramerateConversionAlgorithmDuplicate_drop ProresFramerateConversionAlgorithm = "DUPLICATE_DROP" - ProresFramerateConversionAlgorithmInterpolate ProresFramerateConversionAlgorithm = "INTERPOLATE" - ProresFramerateConversionAlgorithmFrameformer ProresFramerateConversionAlgorithm = "FRAMEFORMER" + ProresFramerateConversionAlgorithmDuplicateDrop ProresFramerateConversionAlgorithm = "DUPLICATE_DROP" + ProresFramerateConversionAlgorithmInterpolate ProresFramerateConversionAlgorithm = "INTERPOLATE" + ProresFramerateConversionAlgorithmFrameformer ProresFramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for ProresFramerateConversionAlgorithm. Note @@ -5606,11 +5606,11 @@ type ProresInterlaceMode string // Enum values for ProresInterlaceMode const ( - ProresInterlaceModeProgressive ProresInterlaceMode = "PROGRESSIVE" - ProresInterlaceModeTop_field ProresInterlaceMode = "TOP_FIELD" - ProresInterlaceModeBottom_field ProresInterlaceMode = "BOTTOM_FIELD" - ProresInterlaceModeFollow_top_field ProresInterlaceMode = "FOLLOW_TOP_FIELD" - ProresInterlaceModeFollow_bottom_field ProresInterlaceMode = "FOLLOW_BOTTOM_FIELD" + ProresInterlaceModeProgressive ProresInterlaceMode = "PROGRESSIVE" + ProresInterlaceModeTopField ProresInterlaceMode = "TOP_FIELD" + ProresInterlaceModeBottomField ProresInterlaceMode = "BOTTOM_FIELD" + ProresInterlaceModeFollowTopField ProresInterlaceMode = "FOLLOW_TOP_FIELD" + ProresInterlaceModeFollowBottomField ProresInterlaceMode = "FOLLOW_BOTTOM_FIELD" ) // Values returns all known values for ProresInterlaceMode. Note that this can be @@ -5630,8 +5630,8 @@ type ProresParControl string // Enum values for ProresParControl const ( - ProresParControlInitialize_from_source ProresParControl = "INITIALIZE_FROM_SOURCE" - ProresParControlSpecified ProresParControl = "SPECIFIED" + ProresParControlInitializeFromSource ProresParControl = "INITIALIZE_FROM_SOURCE" + ProresParControlSpecified ProresParControl = "SPECIFIED" ) // Values returns all known values for ProresParControl. Note that this can be @@ -5684,8 +5684,8 @@ type QueueListBy string // Enum values for QueueListBy const ( - QueueListByName QueueListBy = "NAME" - QueueListByCreation_date QueueListBy = "CREATION_DATE" + QueueListByName QueueListBy = "NAME" + QueueListByCreationDate QueueListBy = "CREATION_DATE" ) // Values returns all known values for QueueListBy. Note that this can be expanded @@ -5720,8 +5720,8 @@ type RenewalType string // Enum values for RenewalType const ( - RenewalTypeAuto_renew RenewalType = "AUTO_RENEW" - RenewalTypeExpire RenewalType = "EXPIRE" + RenewalTypeAutoRenew RenewalType = "AUTO_RENEW" + RenewalTypeExpire RenewalType = "EXPIRE" ) // Values returns all known values for RenewalType. Note that this can be expanded @@ -5776,10 +5776,10 @@ type S3ObjectCannedAcl string // Enum values for S3ObjectCannedAcl const ( - S3ObjectCannedAclPublic_read S3ObjectCannedAcl = "PUBLIC_READ" - S3ObjectCannedAclAuthenticated_read S3ObjectCannedAcl = "AUTHENTICATED_READ" - S3ObjectCannedAclBucket_owner_read S3ObjectCannedAcl = "BUCKET_OWNER_READ" - S3ObjectCannedAclBucket_owner_full_control S3ObjectCannedAcl = "BUCKET_OWNER_FULL_CONTROL" + S3ObjectCannedAclPublicRead S3ObjectCannedAcl = "PUBLIC_READ" + S3ObjectCannedAclAuthenticatedRead S3ObjectCannedAcl = "AUTHENTICATED_READ" + S3ObjectCannedAclBucketOwnerRead S3ObjectCannedAcl = "BUCKET_OWNER_READ" + S3ObjectCannedAclBucketOwnerFullControl S3ObjectCannedAcl = "BUCKET_OWNER_FULL_CONTROL" ) // Values returns all known values for S3ObjectCannedAcl. Note that this can be @@ -5798,8 +5798,8 @@ type S3ServerSideEncryptionType string // Enum values for S3ServerSideEncryptionType const ( - S3ServerSideEncryptionTypeServer_side_encryption_s3 S3ServerSideEncryptionType = "SERVER_SIDE_ENCRYPTION_S3" - S3ServerSideEncryptionTypeServer_side_encryption_kms S3ServerSideEncryptionType = "SERVER_SIDE_ENCRYPTION_KMS" + S3ServerSideEncryptionTypeServerSideEncryptionS3 S3ServerSideEncryptionType = "SERVER_SIDE_ENCRYPTION_S3" + S3ServerSideEncryptionTypeServerSideEncryptionKms S3ServerSideEncryptionType = "SERVER_SIDE_ENCRYPTION_KMS" ) // Values returns all known values for S3ServerSideEncryptionType. Note that this @@ -5816,8 +5816,8 @@ type ScalingBehavior string // Enum values for ScalingBehavior const ( - ScalingBehaviorDefault ScalingBehavior = "DEFAULT" - ScalingBehaviorStretch_to_output ScalingBehavior = "STRETCH_TO_OUTPUT" + ScalingBehaviorDefault ScalingBehavior = "DEFAULT" + ScalingBehaviorStretchToOutput ScalingBehavior = "STRETCH_TO_OUTPUT" ) // Values returns all known values for ScalingBehavior. Note that this can be @@ -5834,11 +5834,11 @@ type SccDestinationFramerate string // Enum values for SccDestinationFramerate const ( - SccDestinationFramerateFramerate_23_97 SccDestinationFramerate = "FRAMERATE_23_97" - SccDestinationFramerateFramerate_24 SccDestinationFramerate = "FRAMERATE_24" - SccDestinationFramerateFramerate_25 SccDestinationFramerate = "FRAMERATE_25" - SccDestinationFramerateFramerate_29_97_dropframe SccDestinationFramerate = "FRAMERATE_29_97_DROPFRAME" - SccDestinationFramerateFramerate_29_97_non_dropframe SccDestinationFramerate = "FRAMERATE_29_97_NON_DROPFRAME" + SccDestinationFramerateFramerate2397 SccDestinationFramerate = "FRAMERATE_23_97" + SccDestinationFramerateFramerate24 SccDestinationFramerate = "FRAMERATE_24" + SccDestinationFramerateFramerate25 SccDestinationFramerate = "FRAMERATE_25" + SccDestinationFramerateFramerate2997Dropframe SccDestinationFramerate = "FRAMERATE_29_97_DROPFRAME" + SccDestinationFramerateFramerate2997NonDropframe SccDestinationFramerate = "FRAMERATE_29_97_NON_DROPFRAME" ) // Values returns all known values for SccDestinationFramerate. Note that this can @@ -5876,21 +5876,21 @@ type StatusUpdateInterval string // Enum values for StatusUpdateInterval const ( - StatusUpdateIntervalSeconds_10 StatusUpdateInterval = "SECONDS_10" - StatusUpdateIntervalSeconds_12 StatusUpdateInterval = "SECONDS_12" - StatusUpdateIntervalSeconds_15 StatusUpdateInterval = "SECONDS_15" - StatusUpdateIntervalSeconds_20 StatusUpdateInterval = "SECONDS_20" - StatusUpdateIntervalSeconds_30 StatusUpdateInterval = "SECONDS_30" - StatusUpdateIntervalSeconds_60 StatusUpdateInterval = "SECONDS_60" - StatusUpdateIntervalSeconds_120 StatusUpdateInterval = "SECONDS_120" - StatusUpdateIntervalSeconds_180 StatusUpdateInterval = "SECONDS_180" - StatusUpdateIntervalSeconds_240 StatusUpdateInterval = "SECONDS_240" - StatusUpdateIntervalSeconds_300 StatusUpdateInterval = "SECONDS_300" - StatusUpdateIntervalSeconds_360 StatusUpdateInterval = "SECONDS_360" - StatusUpdateIntervalSeconds_420 StatusUpdateInterval = "SECONDS_420" - StatusUpdateIntervalSeconds_480 StatusUpdateInterval = "SECONDS_480" - StatusUpdateIntervalSeconds_540 StatusUpdateInterval = "SECONDS_540" - StatusUpdateIntervalSeconds_600 StatusUpdateInterval = "SECONDS_600" + StatusUpdateIntervalSeconds10 StatusUpdateInterval = "SECONDS_10" + StatusUpdateIntervalSeconds12 StatusUpdateInterval = "SECONDS_12" + StatusUpdateIntervalSeconds15 StatusUpdateInterval = "SECONDS_15" + StatusUpdateIntervalSeconds20 StatusUpdateInterval = "SECONDS_20" + StatusUpdateIntervalSeconds30 StatusUpdateInterval = "SECONDS_30" + StatusUpdateIntervalSeconds60 StatusUpdateInterval = "SECONDS_60" + StatusUpdateIntervalSeconds120 StatusUpdateInterval = "SECONDS_120" + StatusUpdateIntervalSeconds180 StatusUpdateInterval = "SECONDS_180" + StatusUpdateIntervalSeconds240 StatusUpdateInterval = "SECONDS_240" + StatusUpdateIntervalSeconds300 StatusUpdateInterval = "SECONDS_300" + StatusUpdateIntervalSeconds360 StatusUpdateInterval = "SECONDS_360" + StatusUpdateIntervalSeconds420 StatusUpdateInterval = "SECONDS_420" + StatusUpdateIntervalSeconds480 StatusUpdateInterval = "SECONDS_480" + StatusUpdateIntervalSeconds540 StatusUpdateInterval = "SECONDS_540" + StatusUpdateIntervalSeconds600 StatusUpdateInterval = "SECONDS_600" ) // Values returns all known values for StatusUpdateInterval. Note that this can be @@ -5920,11 +5920,11 @@ type TeletextPageType string // Enum values for TeletextPageType const ( - TeletextPageTypePage_type_initial TeletextPageType = "PAGE_TYPE_INITIAL" - TeletextPageTypePage_type_subtitle TeletextPageType = "PAGE_TYPE_SUBTITLE" - TeletextPageTypePage_type_addl_info TeletextPageType = "PAGE_TYPE_ADDL_INFO" - TeletextPageTypePage_type_program_schedule TeletextPageType = "PAGE_TYPE_PROGRAM_SCHEDULE" - TeletextPageTypePage_type_hearing_impaired_subtitle TeletextPageType = "PAGE_TYPE_HEARING_IMPAIRED_SUBTITLE" + TeletextPageTypePageTypeInitial TeletextPageType = "PAGE_TYPE_INITIAL" + TeletextPageTypePageTypeSubtitle TeletextPageType = "PAGE_TYPE_SUBTITLE" + TeletextPageTypePageTypeAddlInfo TeletextPageType = "PAGE_TYPE_ADDL_INFO" + TeletextPageTypePageTypeProgramSchedule TeletextPageType = "PAGE_TYPE_PROGRAM_SCHEDULE" + TeletextPageTypePageTypeHearingImpairedSubtitle TeletextPageType = "PAGE_TYPE_HEARING_IMPAIRED_SUBTITLE" ) // Values returns all known values for TeletextPageType. Note that this can be @@ -5944,15 +5944,15 @@ type TimecodeBurninPosition string // Enum values for TimecodeBurninPosition const ( - TimecodeBurninPositionTop_center TimecodeBurninPosition = "TOP_CENTER" - TimecodeBurninPositionTop_left TimecodeBurninPosition = "TOP_LEFT" - TimecodeBurninPositionTop_right TimecodeBurninPosition = "TOP_RIGHT" - TimecodeBurninPositionMiddle_left TimecodeBurninPosition = "MIDDLE_LEFT" - TimecodeBurninPositionMiddle_center TimecodeBurninPosition = "MIDDLE_CENTER" - TimecodeBurninPositionMiddle_right TimecodeBurninPosition = "MIDDLE_RIGHT" - TimecodeBurninPositionBottom_left TimecodeBurninPosition = "BOTTOM_LEFT" - TimecodeBurninPositionBottom_center TimecodeBurninPosition = "BOTTOM_CENTER" - TimecodeBurninPositionBottom_right TimecodeBurninPosition = "BOTTOM_RIGHT" + TimecodeBurninPositionTopCenter TimecodeBurninPosition = "TOP_CENTER" + TimecodeBurninPositionTopLeft TimecodeBurninPosition = "TOP_LEFT" + TimecodeBurninPositionTopRight TimecodeBurninPosition = "TOP_RIGHT" + TimecodeBurninPositionMiddleLeft TimecodeBurninPosition = "MIDDLE_LEFT" + TimecodeBurninPositionMiddleCenter TimecodeBurninPosition = "MIDDLE_CENTER" + TimecodeBurninPositionMiddleRight TimecodeBurninPosition = "MIDDLE_RIGHT" + TimecodeBurninPositionBottomLeft TimecodeBurninPosition = "BOTTOM_LEFT" + TimecodeBurninPositionBottomCenter TimecodeBurninPosition = "BOTTOM_CENTER" + TimecodeBurninPositionBottomRight TimecodeBurninPosition = "BOTTOM_RIGHT" ) // Values returns all known values for TimecodeBurninPosition. Note that this can @@ -6050,9 +6050,9 @@ type Vc3Class string // Enum values for Vc3Class const ( - Vc3ClassClass_145_8bit Vc3Class = "CLASS_145_8BIT" - Vc3ClassClass_220_8bit Vc3Class = "CLASS_220_8BIT" - Vc3ClassClass_220_10bit Vc3Class = "CLASS_220_10BIT" + Vc3ClassClass1458bit Vc3Class = "CLASS_145_8BIT" + Vc3ClassClass2208bit Vc3Class = "CLASS_220_8BIT" + Vc3ClassClass22010bit Vc3Class = "CLASS_220_10BIT" ) // Values returns all known values for Vc3Class. Note that this can be expanded in @@ -6070,8 +6070,8 @@ type Vc3FramerateControl string // Enum values for Vc3FramerateControl const ( - Vc3FramerateControlInitialize_from_source Vc3FramerateControl = "INITIALIZE_FROM_SOURCE" - Vc3FramerateControlSpecified Vc3FramerateControl = "SPECIFIED" + Vc3FramerateControlInitializeFromSource Vc3FramerateControl = "INITIALIZE_FROM_SOURCE" + Vc3FramerateControlSpecified Vc3FramerateControl = "SPECIFIED" ) // Values returns all known values for Vc3FramerateControl. Note that this can be @@ -6088,9 +6088,9 @@ type Vc3FramerateConversionAlgorithm string // Enum values for Vc3FramerateConversionAlgorithm const ( - Vc3FramerateConversionAlgorithmDuplicate_drop Vc3FramerateConversionAlgorithm = "DUPLICATE_DROP" - Vc3FramerateConversionAlgorithmInterpolate Vc3FramerateConversionAlgorithm = "INTERPOLATE" - Vc3FramerateConversionAlgorithmFrameformer Vc3FramerateConversionAlgorithm = "FRAMEFORMER" + Vc3FramerateConversionAlgorithmDuplicateDrop Vc3FramerateConversionAlgorithm = "DUPLICATE_DROP" + Vc3FramerateConversionAlgorithmInterpolate Vc3FramerateConversionAlgorithm = "INTERPOLATE" + Vc3FramerateConversionAlgorithmFrameformer Vc3FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for Vc3FramerateConversionAlgorithm. Note that @@ -6163,16 +6163,16 @@ type VideoCodec string // Enum values for VideoCodec const ( - VideoCodecAv1 VideoCodec = "AV1" - VideoCodecAvc_intra VideoCodec = "AVC_INTRA" - VideoCodecFrame_capture VideoCodec = "FRAME_CAPTURE" - VideoCodecH_264 VideoCodec = "H_264" - VideoCodecH_265 VideoCodec = "H_265" - VideoCodecMpeg2 VideoCodec = "MPEG2" - VideoCodecProres VideoCodec = "PRORES" - VideoCodecVc3 VideoCodec = "VC3" - VideoCodecVp8 VideoCodec = "VP8" - VideoCodecVp9 VideoCodec = "VP9" + VideoCodecAv1 VideoCodec = "AV1" + VideoCodecAvcIntra VideoCodec = "AVC_INTRA" + VideoCodecFrameCapture VideoCodec = "FRAME_CAPTURE" + VideoCodecH264 VideoCodec = "H_264" + VideoCodecH265 VideoCodec = "H_265" + VideoCodecMpeg2 VideoCodec = "MPEG2" + VideoCodecProres VideoCodec = "PRORES" + VideoCodecVc3 VideoCodec = "VC3" + VideoCodecVp8 VideoCodec = "VP8" + VideoCodecVp9 VideoCodec = "VP9" ) // Values returns all known values for VideoCodec. Note that this can be expanded @@ -6197,8 +6197,8 @@ type VideoTimecodeInsertion string // Enum values for VideoTimecodeInsertion const ( - VideoTimecodeInsertionDisabled VideoTimecodeInsertion = "DISABLED" - VideoTimecodeInsertionPic_timing_sei VideoTimecodeInsertion = "PIC_TIMING_SEI" + VideoTimecodeInsertionDisabled VideoTimecodeInsertion = "DISABLED" + VideoTimecodeInsertionPicTimingSei VideoTimecodeInsertion = "PIC_TIMING_SEI" ) // Values returns all known values for VideoTimecodeInsertion. Note that this can @@ -6215,8 +6215,8 @@ type Vp8FramerateControl string // Enum values for Vp8FramerateControl const ( - Vp8FramerateControlInitialize_from_source Vp8FramerateControl = "INITIALIZE_FROM_SOURCE" - Vp8FramerateControlSpecified Vp8FramerateControl = "SPECIFIED" + Vp8FramerateControlInitializeFromSource Vp8FramerateControl = "INITIALIZE_FROM_SOURCE" + Vp8FramerateControlSpecified Vp8FramerateControl = "SPECIFIED" ) // Values returns all known values for Vp8FramerateControl. Note that this can be @@ -6233,9 +6233,9 @@ type Vp8FramerateConversionAlgorithm string // Enum values for Vp8FramerateConversionAlgorithm const ( - Vp8FramerateConversionAlgorithmDuplicate_drop Vp8FramerateConversionAlgorithm = "DUPLICATE_DROP" - Vp8FramerateConversionAlgorithmInterpolate Vp8FramerateConversionAlgorithm = "INTERPOLATE" - Vp8FramerateConversionAlgorithmFrameformer Vp8FramerateConversionAlgorithm = "FRAMEFORMER" + Vp8FramerateConversionAlgorithmDuplicateDrop Vp8FramerateConversionAlgorithm = "DUPLICATE_DROP" + Vp8FramerateConversionAlgorithmInterpolate Vp8FramerateConversionAlgorithm = "INTERPOLATE" + Vp8FramerateConversionAlgorithmFrameformer Vp8FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for Vp8FramerateConversionAlgorithm. Note that @@ -6254,8 +6254,8 @@ type Vp8ParControl string // Enum values for Vp8ParControl const ( - Vp8ParControlInitialize_from_source Vp8ParControl = "INITIALIZE_FROM_SOURCE" - Vp8ParControlSpecified Vp8ParControl = "SPECIFIED" + Vp8ParControlInitializeFromSource Vp8ParControl = "INITIALIZE_FROM_SOURCE" + Vp8ParControlSpecified Vp8ParControl = "SPECIFIED" ) // Values returns all known values for Vp8ParControl. Note that this can be @@ -6272,8 +6272,8 @@ type Vp8QualityTuningLevel string // Enum values for Vp8QualityTuningLevel const ( - Vp8QualityTuningLevelMulti_pass Vp8QualityTuningLevel = "MULTI_PASS" - Vp8QualityTuningLevelMulti_pass_hq Vp8QualityTuningLevel = "MULTI_PASS_HQ" + Vp8QualityTuningLevelMultiPass Vp8QualityTuningLevel = "MULTI_PASS" + Vp8QualityTuningLevelMultiPassHq Vp8QualityTuningLevel = "MULTI_PASS_HQ" ) // Values returns all known values for Vp8QualityTuningLevel. Note that this can be @@ -6306,8 +6306,8 @@ type Vp9FramerateControl string // Enum values for Vp9FramerateControl const ( - Vp9FramerateControlInitialize_from_source Vp9FramerateControl = "INITIALIZE_FROM_SOURCE" - Vp9FramerateControlSpecified Vp9FramerateControl = "SPECIFIED" + Vp9FramerateControlInitializeFromSource Vp9FramerateControl = "INITIALIZE_FROM_SOURCE" + Vp9FramerateControlSpecified Vp9FramerateControl = "SPECIFIED" ) // Values returns all known values for Vp9FramerateControl. Note that this can be @@ -6324,9 +6324,9 @@ type Vp9FramerateConversionAlgorithm string // Enum values for Vp9FramerateConversionAlgorithm const ( - Vp9FramerateConversionAlgorithmDuplicate_drop Vp9FramerateConversionAlgorithm = "DUPLICATE_DROP" - Vp9FramerateConversionAlgorithmInterpolate Vp9FramerateConversionAlgorithm = "INTERPOLATE" - Vp9FramerateConversionAlgorithmFrameformer Vp9FramerateConversionAlgorithm = "FRAMEFORMER" + Vp9FramerateConversionAlgorithmDuplicateDrop Vp9FramerateConversionAlgorithm = "DUPLICATE_DROP" + Vp9FramerateConversionAlgorithmInterpolate Vp9FramerateConversionAlgorithm = "INTERPOLATE" + Vp9FramerateConversionAlgorithmFrameformer Vp9FramerateConversionAlgorithm = "FRAMEFORMER" ) // Values returns all known values for Vp9FramerateConversionAlgorithm. Note that @@ -6345,8 +6345,8 @@ type Vp9ParControl string // Enum values for Vp9ParControl const ( - Vp9ParControlInitialize_from_source Vp9ParControl = "INITIALIZE_FROM_SOURCE" - Vp9ParControlSpecified Vp9ParControl = "SPECIFIED" + Vp9ParControlInitializeFromSource Vp9ParControl = "INITIALIZE_FROM_SOURCE" + Vp9ParControlSpecified Vp9ParControl = "SPECIFIED" ) // Values returns all known values for Vp9ParControl. Note that this can be @@ -6363,8 +6363,8 @@ type Vp9QualityTuningLevel string // Enum values for Vp9QualityTuningLevel const ( - Vp9QualityTuningLevelMulti_pass Vp9QualityTuningLevel = "MULTI_PASS" - Vp9QualityTuningLevelMulti_pass_hq Vp9QualityTuningLevel = "MULTI_PASS_HQ" + Vp9QualityTuningLevelMultiPass Vp9QualityTuningLevel = "MULTI_PASS" + Vp9QualityTuningLevelMultiPassHq Vp9QualityTuningLevel = "MULTI_PASS_HQ" ) // Values returns all known values for Vp9QualityTuningLevel. Note that this can be diff --git a/service/medialive/types/enums.go b/service/medialive/types/enums.go index fbd1866899e..94c4e8a408a 100644 --- a/service/medialive/types/enums.go +++ b/service/medialive/types/enums.go @@ -6,11 +6,11 @@ type AacCodingMode string // Enum values for AacCodingMode const ( - AacCodingModeAd_receiver_mix AacCodingMode = "AD_RECEIVER_MIX" - AacCodingModeCoding_mode_1_0 AacCodingMode = "CODING_MODE_1_0" - AacCodingModeCoding_mode_1_1 AacCodingMode = "CODING_MODE_1_1" - AacCodingModeCoding_mode_2_0 AacCodingMode = "CODING_MODE_2_0" - AacCodingModeCoding_mode_5_1 AacCodingMode = "CODING_MODE_5_1" + AacCodingModeAdReceiverMix AacCodingMode = "AD_RECEIVER_MIX" + AacCodingModeCodingMode10 AacCodingMode = "CODING_MODE_1_0" + AacCodingModeCodingMode11 AacCodingMode = "CODING_MODE_1_1" + AacCodingModeCodingMode20 AacCodingMode = "CODING_MODE_2_0" + AacCodingModeCodingMode51 AacCodingMode = "CODING_MODE_5_1" ) // Values returns all known values for AacCodingMode. Note that this can be @@ -30,8 +30,8 @@ type AacInputType string // Enum values for AacInputType const ( - AacInputTypeBroadcaster_mixed_ad AacInputType = "BROADCASTER_MIXED_AD" - AacInputTypeNormal AacInputType = "NORMAL" + AacInputTypeBroadcasterMixedAd AacInputType = "BROADCASTER_MIXED_AD" + AacInputTypeNormal AacInputType = "NORMAL" ) // Values returns all known values for AacInputType. Note that this can be expanded @@ -86,8 +86,8 @@ type AacRawFormat string // Enum values for AacRawFormat const ( - AacRawFormatLatm_loas AacRawFormat = "LATM_LOAS" - AacRawFormatNone AacRawFormat = "NONE" + AacRawFormatLatmLoas AacRawFormat = "LATM_LOAS" + AacRawFormatNone AacRawFormat = "NONE" ) // Values returns all known values for AacRawFormat. Note that this can be expanded @@ -122,10 +122,10 @@ type AacVbrQuality string // Enum values for AacVbrQuality const ( - AacVbrQualityHigh AacVbrQuality = "HIGH" - AacVbrQualityLow AacVbrQuality = "LOW" - AacVbrQualityMedium_high AacVbrQuality = "MEDIUM_HIGH" - AacVbrQualityMedium_low AacVbrQuality = "MEDIUM_LOW" + AacVbrQualityHigh AacVbrQuality = "HIGH" + AacVbrQualityLow AacVbrQuality = "LOW" + AacVbrQualityMediumHigh AacVbrQuality = "MEDIUM_HIGH" + AacVbrQualityMediumLow AacVbrQuality = "MEDIUM_LOW" ) // Values returns all known values for AacVbrQuality. Note that this can be @@ -144,14 +144,14 @@ type Ac3BitstreamMode string // Enum values for Ac3BitstreamMode const ( - Ac3BitstreamModeCommentary Ac3BitstreamMode = "COMMENTARY" - Ac3BitstreamModeComplete_main Ac3BitstreamMode = "COMPLETE_MAIN" - Ac3BitstreamModeDialogue Ac3BitstreamMode = "DIALOGUE" - Ac3BitstreamModeEmergency Ac3BitstreamMode = "EMERGENCY" - Ac3BitstreamModeHearing_impaired Ac3BitstreamMode = "HEARING_IMPAIRED" - Ac3BitstreamModeMusic_and_effects Ac3BitstreamMode = "MUSIC_AND_EFFECTS" - Ac3BitstreamModeVisually_impaired Ac3BitstreamMode = "VISUALLY_IMPAIRED" - Ac3BitstreamModeVoice_over Ac3BitstreamMode = "VOICE_OVER" + Ac3BitstreamModeCommentary Ac3BitstreamMode = "COMMENTARY" + Ac3BitstreamModeCompleteMain Ac3BitstreamMode = "COMPLETE_MAIN" + Ac3BitstreamModeDialogue Ac3BitstreamMode = "DIALOGUE" + Ac3BitstreamModeEmergency Ac3BitstreamMode = "EMERGENCY" + Ac3BitstreamModeHearingImpaired Ac3BitstreamMode = "HEARING_IMPAIRED" + Ac3BitstreamModeMusicAndEffects Ac3BitstreamMode = "MUSIC_AND_EFFECTS" + Ac3BitstreamModeVisuallyImpaired Ac3BitstreamMode = "VISUALLY_IMPAIRED" + Ac3BitstreamModeVoiceOver Ac3BitstreamMode = "VOICE_OVER" ) // Values returns all known values for Ac3BitstreamMode. Note that this can be @@ -174,10 +174,10 @@ type Ac3CodingMode string // Enum values for Ac3CodingMode const ( - Ac3CodingModeCoding_mode_1_0 Ac3CodingMode = "CODING_MODE_1_0" - Ac3CodingModeCoding_mode_1_1 Ac3CodingMode = "CODING_MODE_1_1" - Ac3CodingModeCoding_mode_2_0 Ac3CodingMode = "CODING_MODE_2_0" - Ac3CodingModeCoding_mode_3_2_lfe Ac3CodingMode = "CODING_MODE_3_2_LFE" + Ac3CodingModeCodingMode10 Ac3CodingMode = "CODING_MODE_1_0" + Ac3CodingModeCodingMode11 Ac3CodingMode = "CODING_MODE_1_1" + Ac3CodingModeCodingMode20 Ac3CodingMode = "CODING_MODE_2_0" + Ac3CodingModeCodingMode32Lfe Ac3CodingMode = "CODING_MODE_3_2_LFE" ) // Values returns all known values for Ac3CodingMode. Note that this can be @@ -196,8 +196,8 @@ type Ac3DrcProfile string // Enum values for Ac3DrcProfile const ( - Ac3DrcProfileFilm_standard Ac3DrcProfile = "FILM_STANDARD" - Ac3DrcProfileNone Ac3DrcProfile = "NONE" + Ac3DrcProfileFilmStandard Ac3DrcProfile = "FILM_STANDARD" + Ac3DrcProfileNone Ac3DrcProfile = "NONE" ) // Values returns all known values for Ac3DrcProfile. Note that this can be @@ -232,8 +232,8 @@ type Ac3MetadataControl string // Enum values for Ac3MetadataControl const ( - Ac3MetadataControlFollow_input Ac3MetadataControl = "FOLLOW_INPUT" - Ac3MetadataControlUse_configured Ac3MetadataControl = "USE_CONFIGURED" + Ac3MetadataControlFollowInput Ac3MetadataControl = "FOLLOW_INPUT" + Ac3MetadataControlUseConfigured Ac3MetadataControl = "USE_CONFIGURED" ) // Values returns all known values for Ac3MetadataControl. Note that this can be @@ -250,7 +250,7 @@ type AcceptHeader string // Enum values for AcceptHeader const ( - AcceptHeaderImage_jpeg AcceptHeader = "image/jpeg" + AcceptHeaderImageJpeg AcceptHeader = "image/jpeg" ) // Values returns all known values for AcceptHeader. Note that this can be expanded @@ -286,8 +286,8 @@ type AudioDescriptionAudioTypeControl string // Enum values for AudioDescriptionAudioTypeControl const ( - AudioDescriptionAudioTypeControlFollow_input AudioDescriptionAudioTypeControl = "FOLLOW_INPUT" - AudioDescriptionAudioTypeControlUse_configured AudioDescriptionAudioTypeControl = "USE_CONFIGURED" + AudioDescriptionAudioTypeControlFollowInput AudioDescriptionAudioTypeControl = "FOLLOW_INPUT" + AudioDescriptionAudioTypeControlUseConfigured AudioDescriptionAudioTypeControl = "USE_CONFIGURED" ) // Values returns all known values for AudioDescriptionAudioTypeControl. Note that @@ -305,8 +305,8 @@ type AudioDescriptionLanguageCodeControl string // Enum values for AudioDescriptionLanguageCodeControl const ( - AudioDescriptionLanguageCodeControlFollow_input AudioDescriptionLanguageCodeControl = "FOLLOW_INPUT" - AudioDescriptionLanguageCodeControlUse_configured AudioDescriptionLanguageCodeControl = "USE_CONFIGURED" + AudioDescriptionLanguageCodeControlFollowInput AudioDescriptionLanguageCodeControl = "FOLLOW_INPUT" + AudioDescriptionLanguageCodeControlUseConfigured AudioDescriptionLanguageCodeControl = "USE_CONFIGURED" ) // Values returns all known values for AudioDescriptionLanguageCodeControl. Note @@ -342,8 +342,8 @@ type AudioNormalizationAlgorithm string // Enum values for AudioNormalizationAlgorithm const ( - AudioNormalizationAlgorithmItu_1770_1 AudioNormalizationAlgorithm = "ITU_1770_1" - AudioNormalizationAlgorithmItu_1770_2 AudioNormalizationAlgorithm = "ITU_1770_2" + AudioNormalizationAlgorithmItu17701 AudioNormalizationAlgorithm = "ITU_1770_1" + AudioNormalizationAlgorithmItu17702 AudioNormalizationAlgorithm = "ITU_1770_2" ) // Values returns all known values for AudioNormalizationAlgorithm. Note that this @@ -360,7 +360,7 @@ type AudioNormalizationAlgorithmControl string // Enum values for AudioNormalizationAlgorithmControl const ( - AudioNormalizationAlgorithmControlCorrect_audio AudioNormalizationAlgorithmControl = "CORRECT_AUDIO" + AudioNormalizationAlgorithmControlCorrectAudio AudioNormalizationAlgorithmControl = "CORRECT_AUDIO" ) // Values returns all known values for AudioNormalizationAlgorithmControl. Note @@ -395,10 +395,10 @@ type AudioOnlyHlsTrackType string // Enum values for AudioOnlyHlsTrackType const ( - AudioOnlyHlsTrackTypeAlternate_audio_auto_select AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_AUTO_SELECT" - AudioOnlyHlsTrackTypeAlternate_audio_auto_select_default AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" - AudioOnlyHlsTrackTypeAlternate_audio_not_auto_select AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" - AudioOnlyHlsTrackTypeAudio_only_variant_stream AudioOnlyHlsTrackType = "AUDIO_ONLY_VARIANT_STREAM" + AudioOnlyHlsTrackTypeAlternateAudioAutoSelect AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_AUTO_SELECT" + AudioOnlyHlsTrackTypeAlternateAudioAutoSelectDefault AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_AUTO_SELECT_DEFAULT" + AudioOnlyHlsTrackTypeAlternateAudioNotAutoSelect AudioOnlyHlsTrackType = "ALTERNATE_AUDIO_NOT_AUTO_SELECT" + AudioOnlyHlsTrackTypeAudioOnlyVariantStream AudioOnlyHlsTrackType = "AUDIO_ONLY_VARIANT_STREAM" ) // Values returns all known values for AudioOnlyHlsTrackType. Note that this can be @@ -417,10 +417,10 @@ type AudioType string // Enum values for AudioType const ( - AudioTypeClean_effects AudioType = "CLEAN_EFFECTS" - AudioTypeHearing_impaired AudioType = "HEARING_IMPAIRED" - AudioTypeUndefined AudioType = "UNDEFINED" - AudioTypeVisual_impaired_commentary AudioType = "VISUAL_IMPAIRED_COMMENTARY" + AudioTypeCleanEffects AudioType = "CLEAN_EFFECTS" + AudioTypeHearingImpaired AudioType = "HEARING_IMPAIRED" + AudioTypeUndefined AudioType = "UNDEFINED" + AudioTypeVisualImpairedCommentary AudioType = "VISUAL_IMPAIRED_COMMENTARY" ) // Values returns all known values for AudioType. Note that this can be expanded in @@ -664,8 +664,8 @@ type ChannelClass string // Enum values for ChannelClass const ( - ChannelClassStandard ChannelClass = "STANDARD" - ChannelClassSingle_pipeline ChannelClass = "SINGLE_PIPELINE" + ChannelClassStandard ChannelClass = "STANDARD" + ChannelClassSinglePipeline ChannelClass = "SINGLE_PIPELINE" ) // Values returns all known values for ChannelClass. Note that this can be expanded @@ -682,17 +682,17 @@ type ChannelState string // Enum values for ChannelState const ( - ChannelStateCreating ChannelState = "CREATING" - ChannelStateCreate_failed ChannelState = "CREATE_FAILED" - ChannelStateIdle ChannelState = "IDLE" - ChannelStateStarting ChannelState = "STARTING" - ChannelStateRunning ChannelState = "RUNNING" - ChannelStateRecovering ChannelState = "RECOVERING" - ChannelStateStopping ChannelState = "STOPPING" - ChannelStateDeleting ChannelState = "DELETING" - ChannelStateDeleted ChannelState = "DELETED" - ChannelStateUpdating ChannelState = "UPDATING" - ChannelStateUpdate_failed ChannelState = "UPDATE_FAILED" + ChannelStateCreating ChannelState = "CREATING" + ChannelStateCreateFailed ChannelState = "CREATE_FAILED" + ChannelStateIdle ChannelState = "IDLE" + ChannelStateStarting ChannelState = "STARTING" + ChannelStateRunning ChannelState = "RUNNING" + ChannelStateRecovering ChannelState = "RECOVERING" + ChannelStateStopping ChannelState = "STOPPING" + ChannelStateDeleting ChannelState = "DELETING" + ChannelStateDeleted ChannelState = "DELETED" + ChannelStateUpdating ChannelState = "UPDATING" + ChannelStateUpdateFailed ChannelState = "UPDATE_FAILED" ) // Values returns all known values for ChannelState. Note that this can be expanded @@ -718,7 +718,7 @@ type ContentType string // Enum values for ContentType const ( - ContentTypeImage_jpeg ContentType = "image/jpeg" + ContentTypeImageJpeg ContentType = "image/jpeg" ) // Values returns all known values for ContentType. Note that this can be expanded @@ -752,10 +752,10 @@ type DvbSdtOutputSdt string // Enum values for DvbSdtOutputSdt const ( - DvbSdtOutputSdtSdt_follow DvbSdtOutputSdt = "SDT_FOLLOW" - DvbSdtOutputSdtSdt_follow_if_present DvbSdtOutputSdt = "SDT_FOLLOW_IF_PRESENT" - DvbSdtOutputSdtSdt_manual DvbSdtOutputSdt = "SDT_MANUAL" - DvbSdtOutputSdtSdt_none DvbSdtOutputSdt = "SDT_NONE" + DvbSdtOutputSdtSdtFollow DvbSdtOutputSdt = "SDT_FOLLOW" + DvbSdtOutputSdtSdtFollowIfPresent DvbSdtOutputSdt = "SDT_FOLLOW_IF_PRESENT" + DvbSdtOutputSdtSdtManual DvbSdtOutputSdt = "SDT_MANUAL" + DvbSdtOutputSdtSdtNone DvbSdtOutputSdt = "SDT_NONE" ) // Values returns all known values for DvbSdtOutputSdt. Note that this can be @@ -907,8 +907,8 @@ type Eac3AttenuationControl string // Enum values for Eac3AttenuationControl const ( - Eac3AttenuationControlAttenuate_3_db Eac3AttenuationControl = "ATTENUATE_3_DB" - Eac3AttenuationControlNone Eac3AttenuationControl = "NONE" + Eac3AttenuationControlAttenuate3Db Eac3AttenuationControl = "ATTENUATE_3_DB" + Eac3AttenuationControlNone Eac3AttenuationControl = "NONE" ) // Values returns all known values for Eac3AttenuationControl. Note that this can @@ -925,11 +925,11 @@ type Eac3BitstreamMode string // Enum values for Eac3BitstreamMode const ( - Eac3BitstreamModeCommentary Eac3BitstreamMode = "COMMENTARY" - Eac3BitstreamModeComplete_main Eac3BitstreamMode = "COMPLETE_MAIN" - Eac3BitstreamModeEmergency Eac3BitstreamMode = "EMERGENCY" - Eac3BitstreamModeHearing_impaired Eac3BitstreamMode = "HEARING_IMPAIRED" - Eac3BitstreamModeVisually_impaired Eac3BitstreamMode = "VISUALLY_IMPAIRED" + Eac3BitstreamModeCommentary Eac3BitstreamMode = "COMMENTARY" + Eac3BitstreamModeCompleteMain Eac3BitstreamMode = "COMPLETE_MAIN" + Eac3BitstreamModeEmergency Eac3BitstreamMode = "EMERGENCY" + Eac3BitstreamModeHearingImpaired Eac3BitstreamMode = "HEARING_IMPAIRED" + Eac3BitstreamModeVisuallyImpaired Eac3BitstreamMode = "VISUALLY_IMPAIRED" ) // Values returns all known values for Eac3BitstreamMode. Note that this can be @@ -949,9 +949,9 @@ type Eac3CodingMode string // Enum values for Eac3CodingMode const ( - Eac3CodingModeCoding_mode_1_0 Eac3CodingMode = "CODING_MODE_1_0" - Eac3CodingModeCoding_mode_2_0 Eac3CodingMode = "CODING_MODE_2_0" - Eac3CodingModeCoding_mode_3_2 Eac3CodingMode = "CODING_MODE_3_2" + Eac3CodingModeCodingMode10 Eac3CodingMode = "CODING_MODE_1_0" + Eac3CodingModeCodingMode20 Eac3CodingMode = "CODING_MODE_2_0" + Eac3CodingModeCodingMode32 Eac3CodingMode = "CODING_MODE_3_2" ) // Values returns all known values for Eac3CodingMode. Note that this can be @@ -987,12 +987,12 @@ type Eac3DrcLine string // Enum values for Eac3DrcLine const ( - Eac3DrcLineFilm_light Eac3DrcLine = "FILM_LIGHT" - Eac3DrcLineFilm_standard Eac3DrcLine = "FILM_STANDARD" - Eac3DrcLineMusic_light Eac3DrcLine = "MUSIC_LIGHT" - Eac3DrcLineMusic_standard Eac3DrcLine = "MUSIC_STANDARD" - Eac3DrcLineNone Eac3DrcLine = "NONE" - Eac3DrcLineSpeech Eac3DrcLine = "SPEECH" + Eac3DrcLineFilmLight Eac3DrcLine = "FILM_LIGHT" + Eac3DrcLineFilmStandard Eac3DrcLine = "FILM_STANDARD" + Eac3DrcLineMusicLight Eac3DrcLine = "MUSIC_LIGHT" + Eac3DrcLineMusicStandard Eac3DrcLine = "MUSIC_STANDARD" + Eac3DrcLineNone Eac3DrcLine = "NONE" + Eac3DrcLineSpeech Eac3DrcLine = "SPEECH" ) // Values returns all known values for Eac3DrcLine. Note that this can be expanded @@ -1013,12 +1013,12 @@ type Eac3DrcRf string // Enum values for Eac3DrcRf const ( - Eac3DrcRfFilm_light Eac3DrcRf = "FILM_LIGHT" - Eac3DrcRfFilm_standard Eac3DrcRf = "FILM_STANDARD" - Eac3DrcRfMusic_light Eac3DrcRf = "MUSIC_LIGHT" - Eac3DrcRfMusic_standard Eac3DrcRf = "MUSIC_STANDARD" - Eac3DrcRfNone Eac3DrcRf = "NONE" - Eac3DrcRfSpeech Eac3DrcRf = "SPEECH" + Eac3DrcRfFilmLight Eac3DrcRf = "FILM_LIGHT" + Eac3DrcRfFilmStandard Eac3DrcRf = "FILM_STANDARD" + Eac3DrcRfMusicLight Eac3DrcRf = "MUSIC_LIGHT" + Eac3DrcRfMusicStandard Eac3DrcRf = "MUSIC_STANDARD" + Eac3DrcRfNone Eac3DrcRf = "NONE" + Eac3DrcRfSpeech Eac3DrcRf = "SPEECH" ) // Values returns all known values for Eac3DrcRf. Note that this can be expanded in @@ -1039,8 +1039,8 @@ type Eac3LfeControl string // Enum values for Eac3LfeControl const ( - Eac3LfeControlLfe Eac3LfeControl = "LFE" - Eac3LfeControlNo_lfe Eac3LfeControl = "NO_LFE" + Eac3LfeControlLfe Eac3LfeControl = "LFE" + Eac3LfeControlNoLfe Eac3LfeControl = "NO_LFE" ) // Values returns all known values for Eac3LfeControl. Note that this can be @@ -1075,8 +1075,8 @@ type Eac3MetadataControl string // Enum values for Eac3MetadataControl const ( - Eac3MetadataControlFollow_input Eac3MetadataControl = "FOLLOW_INPUT" - Eac3MetadataControlUse_configured Eac3MetadataControl = "USE_CONFIGURED" + Eac3MetadataControlFollowInput Eac3MetadataControl = "FOLLOW_INPUT" + Eac3MetadataControlUseConfigured Eac3MetadataControl = "USE_CONFIGURED" ) // Values returns all known values for Eac3MetadataControl. Note that this can be @@ -1093,8 +1093,8 @@ type Eac3PassthroughControl string // Enum values for Eac3PassthroughControl const ( - Eac3PassthroughControlNo_passthrough Eac3PassthroughControl = "NO_PASSTHROUGH" - Eac3PassthroughControlWhen_possible Eac3PassthroughControl = "WHEN_POSSIBLE" + Eac3PassthroughControlNoPassthrough Eac3PassthroughControl = "NO_PASSTHROUGH" + Eac3PassthroughControlWhenPossible Eac3PassthroughControl = "WHEN_POSSIBLE" ) // Values returns all known values for Eac3PassthroughControl. Note that this can @@ -1111,8 +1111,8 @@ type Eac3PhaseControl string // Enum values for Eac3PhaseControl const ( - Eac3PhaseControlNo_shift Eac3PhaseControl = "NO_SHIFT" - Eac3PhaseControlShift_90_degrees Eac3PhaseControl = "SHIFT_90_DEGREES" + Eac3PhaseControlNoShift Eac3PhaseControl = "NO_SHIFT" + Eac3PhaseControlShift90Degrees Eac3PhaseControl = "SHIFT_90_DEGREES" ) // Values returns all known values for Eac3PhaseControl. Note that this can be @@ -1129,10 +1129,10 @@ type Eac3StereoDownmix string // Enum values for Eac3StereoDownmix const ( - Eac3StereoDownmixDpl2 Eac3StereoDownmix = "DPL2" - Eac3StereoDownmixLo_ro Eac3StereoDownmix = "LO_RO" - Eac3StereoDownmixLt_rt Eac3StereoDownmix = "LT_RT" - Eac3StereoDownmixNot_indicated Eac3StereoDownmix = "NOT_INDICATED" + Eac3StereoDownmixDpl2 Eac3StereoDownmix = "DPL2" + Eac3StereoDownmixLoRo Eac3StereoDownmix = "LO_RO" + Eac3StereoDownmixLtRt Eac3StereoDownmix = "LT_RT" + Eac3StereoDownmixNotIndicated Eac3StereoDownmix = "NOT_INDICATED" ) // Values returns all known values for Eac3StereoDownmix. Note that this can be @@ -1151,9 +1151,9 @@ type Eac3SurroundExMode string // Enum values for Eac3SurroundExMode const ( - Eac3SurroundExModeDisabled Eac3SurroundExMode = "DISABLED" - Eac3SurroundExModeEnabled Eac3SurroundExMode = "ENABLED" - Eac3SurroundExModeNot_indicated Eac3SurroundExMode = "NOT_INDICATED" + Eac3SurroundExModeDisabled Eac3SurroundExMode = "DISABLED" + Eac3SurroundExModeEnabled Eac3SurroundExMode = "ENABLED" + Eac3SurroundExModeNotIndicated Eac3SurroundExMode = "NOT_INDICATED" ) // Values returns all known values for Eac3SurroundExMode. Note that this can be @@ -1171,9 +1171,9 @@ type Eac3SurroundMode string // Enum values for Eac3SurroundMode const ( - Eac3SurroundModeDisabled Eac3SurroundMode = "DISABLED" - Eac3SurroundModeEnabled Eac3SurroundMode = "ENABLED" - Eac3SurroundModeNot_indicated Eac3SurroundMode = "NOT_INDICATED" + Eac3SurroundModeDisabled Eac3SurroundMode = "DISABLED" + Eac3SurroundModeEnabled Eac3SurroundMode = "ENABLED" + Eac3SurroundModeNotIndicated Eac3SurroundMode = "NOT_INDICATED" ) // Values returns all known values for Eac3SurroundMode. Note that this can be @@ -1283,8 +1283,8 @@ type FecOutputIncludeFec string // Enum values for FecOutputIncludeFec const ( - FecOutputIncludeFecColumn FecOutputIncludeFec = "COLUMN" - FecOutputIncludeFecColumn_and_row FecOutputIncludeFec = "COLUMN_AND_ROW" + FecOutputIncludeFecColumn FecOutputIncludeFec = "COLUMN" + FecOutputIncludeFecColumnAndRow FecOutputIncludeFec = "COLUMN_AND_ROW" ) // Values returns all known values for FecOutputIncludeFec. Note that this can be @@ -1301,17 +1301,17 @@ type FixedAfd string // Enum values for FixedAfd const ( - FixedAfdAfd_0000 FixedAfd = "AFD_0000" - FixedAfdAfd_0010 FixedAfd = "AFD_0010" - FixedAfdAfd_0011 FixedAfd = "AFD_0011" - FixedAfdAfd_0100 FixedAfd = "AFD_0100" - FixedAfdAfd_1000 FixedAfd = "AFD_1000" - FixedAfdAfd_1001 FixedAfd = "AFD_1001" - FixedAfdAfd_1010 FixedAfd = "AFD_1010" - FixedAfdAfd_1011 FixedAfd = "AFD_1011" - FixedAfdAfd_1101 FixedAfd = "AFD_1101" - FixedAfdAfd_1110 FixedAfd = "AFD_1110" - FixedAfdAfd_1111 FixedAfd = "AFD_1111" + FixedAfdAfd0000 FixedAfd = "AFD_0000" + FixedAfdAfd0010 FixedAfd = "AFD_0010" + FixedAfdAfd0011 FixedAfd = "AFD_0011" + FixedAfdAfd0100 FixedAfd = "AFD_0100" + FixedAfdAfd1000 FixedAfd = "AFD_1000" + FixedAfdAfd1001 FixedAfd = "AFD_1001" + FixedAfdAfd1010 FixedAfd = "AFD_1010" + FixedAfdAfd1011 FixedAfd = "AFD_1011" + FixedAfdAfd1101 FixedAfd = "AFD_1101" + FixedAfdAfd1110 FixedAfd = "AFD_1110" + FixedAfdAfd1111 FixedAfd = "AFD_1111" ) // Values returns all known values for FixedAfd. Note that this can be expanded in @@ -1337,8 +1337,8 @@ type Fmp4NielsenId3Behavior string // Enum values for Fmp4NielsenId3Behavior const ( - Fmp4NielsenId3BehaviorNo_passthrough Fmp4NielsenId3Behavior = "NO_PASSTHROUGH" - Fmp4NielsenId3BehaviorPassthrough Fmp4NielsenId3Behavior = "PASSTHROUGH" + Fmp4NielsenId3BehaviorNoPassthrough Fmp4NielsenId3Behavior = "NO_PASSTHROUGH" + Fmp4NielsenId3BehaviorPassthrough Fmp4NielsenId3Behavior = "PASSTHROUGH" ) // Values returns all known values for Fmp4NielsenId3Behavior. Note that this can @@ -1355,8 +1355,8 @@ type Fmp4TimedMetadataBehavior string // Enum values for Fmp4TimedMetadataBehavior const ( - Fmp4TimedMetadataBehaviorNo_passthrough Fmp4TimedMetadataBehavior = "NO_PASSTHROUGH" - Fmp4TimedMetadataBehaviorPassthrough Fmp4TimedMetadataBehavior = "PASSTHROUGH" + Fmp4TimedMetadataBehaviorNoPassthrough Fmp4TimedMetadataBehavior = "NO_PASSTHROUGH" + Fmp4TimedMetadataBehaviorPassthrough Fmp4TimedMetadataBehavior = "PASSTHROUGH" ) // Values returns all known values for Fmp4TimedMetadataBehavior. Note that this @@ -1409,8 +1409,8 @@ type GlobalConfigurationInputEndAction string // Enum values for GlobalConfigurationInputEndAction const ( - GlobalConfigurationInputEndActionNone GlobalConfigurationInputEndAction = "NONE" - GlobalConfigurationInputEndActionSwitch_and_loop_inputs GlobalConfigurationInputEndAction = "SWITCH_AND_LOOP_INPUTS" + GlobalConfigurationInputEndActionNone GlobalConfigurationInputEndAction = "NONE" + GlobalConfigurationInputEndActionSwitchAndLoopInputs GlobalConfigurationInputEndAction = "SWITCH_AND_LOOP_INPUTS" ) // Values returns all known values for GlobalConfigurationInputEndAction. Note that @@ -1447,8 +1447,8 @@ type GlobalConfigurationOutputLockingMode string // Enum values for GlobalConfigurationOutputLockingMode const ( - GlobalConfigurationOutputLockingModeEpoch_locking GlobalConfigurationOutputLockingMode = "EPOCH_LOCKING" - GlobalConfigurationOutputLockingModePipeline_locking GlobalConfigurationOutputLockingMode = "PIPELINE_LOCKING" + GlobalConfigurationOutputLockingModeEpochLocking GlobalConfigurationOutputLockingMode = "EPOCH_LOCKING" + GlobalConfigurationOutputLockingModePipelineLocking GlobalConfigurationOutputLockingMode = "PIPELINE_LOCKING" ) // Values returns all known values for GlobalConfigurationOutputLockingMode. Note @@ -1466,8 +1466,8 @@ type GlobalConfigurationOutputTimingSource string // Enum values for GlobalConfigurationOutputTimingSource const ( - GlobalConfigurationOutputTimingSourceInput_clock GlobalConfigurationOutputTimingSource = "INPUT_CLOCK" - GlobalConfigurationOutputTimingSourceSystem_clock GlobalConfigurationOutputTimingSource = "SYSTEM_CLOCK" + GlobalConfigurationOutputTimingSourceInputClock GlobalConfigurationOutputTimingSource = "INPUT_CLOCK" + GlobalConfigurationOutputTimingSourceSystemClock GlobalConfigurationOutputTimingSource = "SYSTEM_CLOCK" ) // Values returns all known values for GlobalConfigurationOutputTimingSource. Note @@ -1583,8 +1583,8 @@ type H264FramerateControl string // Enum values for H264FramerateControl const ( - H264FramerateControlInitialize_from_source H264FramerateControl = "INITIALIZE_FROM_SOURCE" - H264FramerateControlSpecified H264FramerateControl = "SPECIFIED" + H264FramerateControlInitializeFromSource H264FramerateControl = "INITIALIZE_FROM_SOURCE" + H264FramerateControlSpecified H264FramerateControl = "SPECIFIED" ) // Values returns all known values for H264FramerateControl. Note that this can be @@ -1637,23 +1637,23 @@ type H264Level string // Enum values for H264Level const ( - H264LevelH264_level_1 H264Level = "H264_LEVEL_1" - H264LevelH264_level_1_1 H264Level = "H264_LEVEL_1_1" - H264LevelH264_level_1_2 H264Level = "H264_LEVEL_1_2" - H264LevelH264_level_1_3 H264Level = "H264_LEVEL_1_3" - H264LevelH264_level_2 H264Level = "H264_LEVEL_2" - H264LevelH264_level_2_1 H264Level = "H264_LEVEL_2_1" - H264LevelH264_level_2_2 H264Level = "H264_LEVEL_2_2" - H264LevelH264_level_3 H264Level = "H264_LEVEL_3" - H264LevelH264_level_3_1 H264Level = "H264_LEVEL_3_1" - H264LevelH264_level_3_2 H264Level = "H264_LEVEL_3_2" - H264LevelH264_level_4 H264Level = "H264_LEVEL_4" - H264LevelH264_level_4_1 H264Level = "H264_LEVEL_4_1" - H264LevelH264_level_4_2 H264Level = "H264_LEVEL_4_2" - H264LevelH264_level_5 H264Level = "H264_LEVEL_5" - H264LevelH264_level_5_1 H264Level = "H264_LEVEL_5_1" - H264LevelH264_level_5_2 H264Level = "H264_LEVEL_5_2" - H264LevelH264_level_auto H264Level = "H264_LEVEL_AUTO" + H264LevelH264Level1 H264Level = "H264_LEVEL_1" + H264LevelH264Level11 H264Level = "H264_LEVEL_1_1" + H264LevelH264Level12 H264Level = "H264_LEVEL_1_2" + H264LevelH264Level13 H264Level = "H264_LEVEL_1_3" + H264LevelH264Level2 H264Level = "H264_LEVEL_2" + H264LevelH264Level21 H264Level = "H264_LEVEL_2_1" + H264LevelH264Level22 H264Level = "H264_LEVEL_2_2" + H264LevelH264Level3 H264Level = "H264_LEVEL_3" + H264LevelH264Level31 H264Level = "H264_LEVEL_3_1" + H264LevelH264Level32 H264Level = "H264_LEVEL_3_2" + H264LevelH264Level4 H264Level = "H264_LEVEL_4" + H264LevelH264Level41 H264Level = "H264_LEVEL_4_1" + H264LevelH264Level42 H264Level = "H264_LEVEL_4_2" + H264LevelH264Level5 H264Level = "H264_LEVEL_5" + H264LevelH264Level51 H264Level = "H264_LEVEL_5_1" + H264LevelH264Level52 H264Level = "H264_LEVEL_5_2" + H264LevelH264LevelAuto H264Level = "H264_LEVEL_AUTO" ) // Values returns all known values for H264Level. Note that this can be expanded in @@ -1705,8 +1705,8 @@ type H264ParControl string // Enum values for H264ParControl const ( - H264ParControlInitialize_from_source H264ParControl = "INITIALIZE_FROM_SOURCE" - H264ParControlSpecified H264ParControl = "SPECIFIED" + H264ParControlInitializeFromSource H264ParControl = "INITIALIZE_FROM_SOURCE" + H264ParControlSpecified H264ParControl = "SPECIFIED" ) // Values returns all known values for H264ParControl. Note that this can be @@ -1723,12 +1723,12 @@ type H264Profile string // Enum values for H264Profile const ( - H264ProfileBaseline H264Profile = "BASELINE" - H264ProfileHigh H264Profile = "HIGH" - H264ProfileHigh_10bit H264Profile = "HIGH_10BIT" - H264ProfileHigh_422 H264Profile = "HIGH_422" - H264ProfileHigh_422_10bit H264Profile = "HIGH_422_10BIT" - H264ProfileMain H264Profile = "MAIN" + H264ProfileBaseline H264Profile = "BASELINE" + H264ProfileHigh H264Profile = "HIGH" + H264ProfileHigh10bit H264Profile = "HIGH_10BIT" + H264ProfileHigh422 H264Profile = "HIGH_422" + H264ProfileHigh42210bit H264Profile = "HIGH_422_10BIT" + H264ProfileMain H264Profile = "MAIN" ) // Values returns all known values for H264Profile. Note that this can be expanded @@ -1749,8 +1749,8 @@ type H264QualityLevel string // Enum values for H264QualityLevel const ( - H264QualityLevelEnhanced_quality H264QualityLevel = "ENHANCED_QUALITY" - H264QualityLevelStandard_quality H264QualityLevel = "STANDARD_QUALITY" + H264QualityLevelEnhancedQuality H264QualityLevel = "ENHANCED_QUALITY" + H264QualityLevelStandardQuality H264QualityLevel = "STANDARD_QUALITY" ) // Values returns all known values for H264QualityLevel. Note that this can be @@ -1897,8 +1897,8 @@ type H264TimecodeInsertionBehavior string // Enum values for H264TimecodeInsertionBehavior const ( - H264TimecodeInsertionBehaviorDisabled H264TimecodeInsertionBehavior = "DISABLED" - H264TimecodeInsertionBehaviorPic_timing_sei H264TimecodeInsertionBehavior = "PIC_TIMING_SEI" + H264TimecodeInsertionBehaviorDisabled H264TimecodeInsertionBehavior = "DISABLED" + H264TimecodeInsertionBehaviorPicTimingSei H264TimecodeInsertionBehavior = "PIC_TIMING_SEI" ) // Values returns all known values for H264TimecodeInsertionBehavior. Note that @@ -2015,20 +2015,20 @@ type H265Level string // Enum values for H265Level const ( - H265LevelH265_level_1 H265Level = "H265_LEVEL_1" - H265LevelH265_level_2 H265Level = "H265_LEVEL_2" - H265LevelH265_level_2_1 H265Level = "H265_LEVEL_2_1" - H265LevelH265_level_3 H265Level = "H265_LEVEL_3" - H265LevelH265_level_3_1 H265Level = "H265_LEVEL_3_1" - H265LevelH265_level_4 H265Level = "H265_LEVEL_4" - H265LevelH265_level_4_1 H265Level = "H265_LEVEL_4_1" - H265LevelH265_level_5 H265Level = "H265_LEVEL_5" - H265LevelH265_level_5_1 H265Level = "H265_LEVEL_5_1" - H265LevelH265_level_5_2 H265Level = "H265_LEVEL_5_2" - H265LevelH265_level_6 H265Level = "H265_LEVEL_6" - H265LevelH265_level_6_1 H265Level = "H265_LEVEL_6_1" - H265LevelH265_level_6_2 H265Level = "H265_LEVEL_6_2" - H265LevelH265_level_auto H265Level = "H265_LEVEL_AUTO" + H265LevelH265Level1 H265Level = "H265_LEVEL_1" + H265LevelH265Level2 H265Level = "H265_LEVEL_2" + H265LevelH265Level21 H265Level = "H265_LEVEL_2_1" + H265LevelH265Level3 H265Level = "H265_LEVEL_3" + H265LevelH265Level31 H265Level = "H265_LEVEL_3_1" + H265LevelH265Level4 H265Level = "H265_LEVEL_4" + H265LevelH265Level41 H265Level = "H265_LEVEL_4_1" + H265LevelH265Level5 H265Level = "H265_LEVEL_5" + H265LevelH265Level51 H265Level = "H265_LEVEL_5_1" + H265LevelH265Level52 H265Level = "H265_LEVEL_5_2" + H265LevelH265Level6 H265Level = "H265_LEVEL_6" + H265LevelH265Level61 H265Level = "H265_LEVEL_6_1" + H265LevelH265Level62 H265Level = "H265_LEVEL_6_2" + H265LevelH265LevelAuto H265Level = "H265_LEVEL_AUTO" ) // Values returns all known values for H265Level. Note that this can be expanded in @@ -2077,8 +2077,8 @@ type H265Profile string // Enum values for H265Profile const ( - H265ProfileMain H265Profile = "MAIN" - H265ProfileMain_10bit H265Profile = "MAIN_10BIT" + H265ProfileMain H265Profile = "MAIN" + H265ProfileMain10bit H265Profile = "MAIN_10BIT" ) // Values returns all known values for H265Profile. Note that this can be expanded @@ -2169,8 +2169,8 @@ type H265TimecodeInsertionBehavior string // Enum values for H265TimecodeInsertionBehavior const ( - H265TimecodeInsertionBehaviorDisabled H265TimecodeInsertionBehavior = "DISABLED" - H265TimecodeInsertionBehaviorPic_timing_sei H265TimecodeInsertionBehavior = "PIC_TIMING_SEI" + H265TimecodeInsertionBehaviorDisabled H265TimecodeInsertionBehavior = "DISABLED" + H265TimecodeInsertionBehaviorPicTimingSei H265TimecodeInsertionBehavior = "PIC_TIMING_SEI" ) // Values returns all known values for H265TimecodeInsertionBehavior. Note that @@ -2188,9 +2188,9 @@ type HlsAdMarkers string // Enum values for HlsAdMarkers const ( - HlsAdMarkersAdobe HlsAdMarkers = "ADOBE" - HlsAdMarkersElemental HlsAdMarkers = "ELEMENTAL" - HlsAdMarkersElemental_scte35 HlsAdMarkers = "ELEMENTAL_SCTE35" + HlsAdMarkersAdobe HlsAdMarkers = "ADOBE" + HlsAdMarkersElemental HlsAdMarkers = "ELEMENTAL" + HlsAdMarkersElementalScte35 HlsAdMarkers = "ELEMENTAL_SCTE35" ) // Values returns all known values for HlsAdMarkers. Note that this can be expanded @@ -2208,8 +2208,8 @@ type HlsAkamaiHttpTransferMode string // Enum values for HlsAkamaiHttpTransferMode const ( - HlsAkamaiHttpTransferModeChunked HlsAkamaiHttpTransferMode = "CHUNKED" - HlsAkamaiHttpTransferModeNon_chunked HlsAkamaiHttpTransferMode = "NON_CHUNKED" + HlsAkamaiHttpTransferModeChunked HlsAkamaiHttpTransferMode = "CHUNKED" + HlsAkamaiHttpTransferModeNonChunked HlsAkamaiHttpTransferMode = "NON_CHUNKED" ) // Values returns all known values for HlsAkamaiHttpTransferMode. Note that this @@ -2264,8 +2264,8 @@ type HlsCodecSpecification string // Enum values for HlsCodecSpecification const ( - HlsCodecSpecificationRfc_4281 HlsCodecSpecification = "RFC_4281" - HlsCodecSpecificationRfc_6381 HlsCodecSpecification = "RFC_6381" + HlsCodecSpecificationRfc4281 HlsCodecSpecification = "RFC_4281" + HlsCodecSpecificationRfc6381 HlsCodecSpecification = "RFC_6381" ) // Values returns all known values for HlsCodecSpecification. Note that this can be @@ -2282,8 +2282,8 @@ type HlsDirectoryStructure string // Enum values for HlsDirectoryStructure const ( - HlsDirectoryStructureSingle_directory HlsDirectoryStructure = "SINGLE_DIRECTORY" - HlsDirectoryStructureSubdirectory_per_stream HlsDirectoryStructure = "SUBDIRECTORY_PER_STREAM" + HlsDirectoryStructureSingleDirectory HlsDirectoryStructure = "SINGLE_DIRECTORY" + HlsDirectoryStructureSubdirectoryPerStream HlsDirectoryStructure = "SUBDIRECTORY_PER_STREAM" ) // Values returns all known values for HlsDirectoryStructure. Note that this can be @@ -2300,8 +2300,8 @@ type HlsEncryptionType string // Enum values for HlsEncryptionType const ( - HlsEncryptionTypeAes128 HlsEncryptionType = "AES128" - HlsEncryptionTypeSample_aes HlsEncryptionType = "SAMPLE_AES" + HlsEncryptionTypeAes128 HlsEncryptionType = "AES128" + HlsEncryptionTypeSampleAes HlsEncryptionType = "SAMPLE_AES" ) // Values returns all known values for HlsEncryptionType. Note that this can be @@ -2372,8 +2372,8 @@ type HlsIvSource string // Enum values for HlsIvSource const ( - HlsIvSourceExplicit HlsIvSource = "EXPLICIT" - HlsIvSourceFollows_segment_number HlsIvSource = "FOLLOWS_SEGMENT_NUMBER" + HlsIvSourceExplicit HlsIvSource = "EXPLICIT" + HlsIvSourceFollowsSegmentNumber HlsIvSource = "FOLLOWS_SEGMENT_NUMBER" ) // Values returns all known values for HlsIvSource. Note that this can be expanded @@ -2408,8 +2408,8 @@ type HlsManifestDurationFormat string // Enum values for HlsManifestDurationFormat const ( - HlsManifestDurationFormatFloating_point HlsManifestDurationFormat = "FLOATING_POINT" - HlsManifestDurationFormatInteger HlsManifestDurationFormat = "INTEGER" + HlsManifestDurationFormatFloatingPoint HlsManifestDurationFormat = "FLOATING_POINT" + HlsManifestDurationFormatInteger HlsManifestDurationFormat = "INTEGER" ) // Values returns all known values for HlsManifestDurationFormat. Note that this @@ -2460,9 +2460,9 @@ type HlsOutputSelection string // Enum values for HlsOutputSelection const ( - HlsOutputSelectionManifests_and_segments HlsOutputSelection = "MANIFESTS_AND_SEGMENTS" - HlsOutputSelectionSegments_only HlsOutputSelection = "SEGMENTS_ONLY" - HlsOutputSelectionVariant_manifests_and_segments HlsOutputSelection = "VARIANT_MANIFESTS_AND_SEGMENTS" + HlsOutputSelectionManifestsAndSegments HlsOutputSelection = "MANIFESTS_AND_SEGMENTS" + HlsOutputSelectionSegmentsOnly HlsOutputSelection = "SEGMENTS_ONLY" + HlsOutputSelectionVariantManifestsAndSegments HlsOutputSelection = "VARIANT_MANIFESTS_AND_SEGMENTS" ) // Values returns all known values for HlsOutputSelection. Note that this can be @@ -2516,8 +2516,8 @@ type HlsSegmentationMode string // Enum values for HlsSegmentationMode const ( - HlsSegmentationModeUse_input_segmentation HlsSegmentationMode = "USE_INPUT_SEGMENTATION" - HlsSegmentationModeUse_segment_duration HlsSegmentationMode = "USE_SEGMENT_DURATION" + HlsSegmentationModeUseInputSegmentation HlsSegmentationMode = "USE_INPUT_SEGMENTATION" + HlsSegmentationModeUseSegmentDuration HlsSegmentationMode = "USE_SEGMENT_DURATION" ) // Values returns all known values for HlsSegmentationMode. Note that this can be @@ -2572,8 +2572,8 @@ type HlsTsFileMode string // Enum values for HlsTsFileMode const ( - HlsTsFileModeSegmented_files HlsTsFileMode = "SEGMENTED_FILES" - HlsTsFileModeSingle_file HlsTsFileMode = "SINGLE_FILE" + HlsTsFileModeSegmentedFiles HlsTsFileMode = "SEGMENTED_FILES" + HlsTsFileModeSingleFile HlsTsFileMode = "SINGLE_FILE" ) // Values returns all known values for HlsTsFileMode. Note that this can be @@ -2590,8 +2590,8 @@ type HlsWebdavHttpTransferMode string // Enum values for HlsWebdavHttpTransferMode const ( - HlsWebdavHttpTransferModeChunked HlsWebdavHttpTransferMode = "CHUNKED" - HlsWebdavHttpTransferModeNon_chunked HlsWebdavHttpTransferMode = "NON_CHUNKED" + HlsWebdavHttpTransferModeChunked HlsWebdavHttpTransferMode = "CHUNKED" + HlsWebdavHttpTransferModeNonChunked HlsWebdavHttpTransferMode = "NON_CHUNKED" ) // Values returns all known values for HlsWebdavHttpTransferMode. Note that this @@ -2626,8 +2626,8 @@ type InputClass string // Enum values for InputClass const ( - InputClassStandard InputClass = "STANDARD" - InputClassSingle_pipeline InputClass = "SINGLE_PIPELINE" + InputClassStandard InputClass = "STANDARD" + InputClassSinglePipeline InputClass = "SINGLE_PIPELINE" ) // Values returns all known values for InputClass. Note that this can be expanded @@ -2864,8 +2864,8 @@ type InputLossActionForHlsOut string // Enum values for InputLossActionForHlsOut const ( - InputLossActionForHlsOutEmit_output InputLossActionForHlsOut = "EMIT_OUTPUT" - InputLossActionForHlsOutPause_output InputLossActionForHlsOut = "PAUSE_OUTPUT" + InputLossActionForHlsOutEmitOutput InputLossActionForHlsOut = "EMIT_OUTPUT" + InputLossActionForHlsOutPauseOutput InputLossActionForHlsOut = "PAUSE_OUTPUT" ) // Values returns all known values for InputLossActionForHlsOut. Note that this can @@ -2882,8 +2882,8 @@ type InputLossActionForMsSmoothOut string // Enum values for InputLossActionForMsSmoothOut const ( - InputLossActionForMsSmoothOutEmit_output InputLossActionForMsSmoothOut = "EMIT_OUTPUT" - InputLossActionForMsSmoothOutPause_output InputLossActionForMsSmoothOut = "PAUSE_OUTPUT" + InputLossActionForMsSmoothOutEmitOutput InputLossActionForMsSmoothOut = "EMIT_OUTPUT" + InputLossActionForMsSmoothOutPauseOutput InputLossActionForMsSmoothOut = "PAUSE_OUTPUT" ) // Values returns all known values for InputLossActionForMsSmoothOut. Note that @@ -2901,8 +2901,8 @@ type InputLossActionForRtmpOut string // Enum values for InputLossActionForRtmpOut const ( - InputLossActionForRtmpOutEmit_output InputLossActionForRtmpOut = "EMIT_OUTPUT" - InputLossActionForRtmpOutPause_output InputLossActionForRtmpOut = "PAUSE_OUTPUT" + InputLossActionForRtmpOutEmitOutput InputLossActionForRtmpOut = "EMIT_OUTPUT" + InputLossActionForRtmpOutPauseOutput InputLossActionForRtmpOut = "PAUSE_OUTPUT" ) // Values returns all known values for InputLossActionForRtmpOut. Note that this @@ -2919,9 +2919,9 @@ type InputLossActionForUdpOut string // Enum values for InputLossActionForUdpOut const ( - InputLossActionForUdpOutDrop_program InputLossActionForUdpOut = "DROP_PROGRAM" - InputLossActionForUdpOutDrop_ts InputLossActionForUdpOut = "DROP_TS" - InputLossActionForUdpOutEmit_program InputLossActionForUdpOut = "EMIT_PROGRAM" + InputLossActionForUdpOutDropProgram InputLossActionForUdpOut = "DROP_PROGRAM" + InputLossActionForUdpOutDropTs InputLossActionForUdpOut = "DROP_TS" + InputLossActionForUdpOutEmitProgram InputLossActionForUdpOut = "EMIT_PROGRAM" ) // Values returns all known values for InputLossActionForUdpOut. Note that this can @@ -2957,9 +2957,9 @@ type InputMaximumBitrate string // Enum values for InputMaximumBitrate const ( - InputMaximumBitrateMax_10_mbps InputMaximumBitrate = "MAX_10_MBPS" - InputMaximumBitrateMax_20_mbps InputMaximumBitrate = "MAX_20_MBPS" - InputMaximumBitrateMax_50_mbps InputMaximumBitrate = "MAX_50_MBPS" + InputMaximumBitrateMax10Mbps InputMaximumBitrate = "MAX_10_MBPS" + InputMaximumBitrateMax20Mbps InputMaximumBitrate = "MAX_20_MBPS" + InputMaximumBitrateMax50Mbps InputMaximumBitrate = "MAX_50_MBPS" ) // Values returns all known values for InputMaximumBitrate. Note that this can be @@ -2977,8 +2977,8 @@ type InputPreference string // Enum values for InputPreference const ( - InputPreferenceEqual_input_preference InputPreference = "EQUAL_INPUT_PREFERENCE" - InputPreferencePrimary_input_preferred InputPreference = "PRIMARY_INPUT_PREFERRED" + InputPreferenceEqualInputPreference InputPreference = "EQUAL_INPUT_PREFERENCE" + InputPreferencePrimaryInputPreferred InputPreference = "PRIMARY_INPUT_PREFERRED" ) // Values returns all known values for InputPreference. Note that this can be @@ -3016,7 +3016,7 @@ type InputSecurityGroupState string // Enum values for InputSecurityGroupState const ( InputSecurityGroupStateIdle InputSecurityGroupState = "IDLE" - InputSecurityGroupStateIn_use InputSecurityGroupState = "IN_USE" + InputSecurityGroupStateInUse InputSecurityGroupState = "IN_USE" InputSecurityGroupStateUpdating InputSecurityGroupState = "UPDATING" InputSecurityGroupStateDeleted InputSecurityGroupState = "DELETED" ) @@ -3115,15 +3115,15 @@ type InputType string // Enum values for InputType const ( - InputTypeUdp_push InputType = "UDP_PUSH" - InputTypeRtp_push InputType = "RTP_PUSH" - InputTypeRtmp_push InputType = "RTMP_PUSH" - InputTypeRtmp_pull InputType = "RTMP_PULL" - InputTypeUrl_pull InputType = "URL_PULL" - InputTypeMp4_file InputType = "MP4_FILE" + InputTypeUdpPush InputType = "UDP_PUSH" + InputTypeRtpPush InputType = "RTP_PUSH" + InputTypeRtmpPush InputType = "RTMP_PUSH" + InputTypeRtmpPull InputType = "RTMP_PULL" + InputTypeUrlPull InputType = "URL_PULL" + InputTypeMp4File InputType = "MP4_FILE" InputTypeMediaconnect InputType = "MEDIACONNECT" - InputTypeInput_device InputType = "INPUT_DEVICE" - InputTypeAws_cdi InputType = "AWS_CDI" + InputTypeInputDevice InputType = "INPUT_DEVICE" + InputTypeAwsCdi InputType = "AWS_CDI" ) // Values returns all known values for InputType. Note that this can be expanded in @@ -3147,8 +3147,8 @@ type LastFrameClippingBehavior string // Enum values for LastFrameClippingBehavior const ( - LastFrameClippingBehaviorExclude_last_frame LastFrameClippingBehavior = "EXCLUDE_LAST_FRAME" - LastFrameClippingBehaviorInclude_last_frame LastFrameClippingBehavior = "INCLUDE_LAST_FRAME" + LastFrameClippingBehaviorExcludeLastFrame LastFrameClippingBehavior = "EXCLUDE_LAST_FRAME" + LastFrameClippingBehaviorIncludeLastFrame LastFrameClippingBehavior = "INCLUDE_LAST_FRAME" ) // Values returns all known values for LastFrameClippingBehavior. Note that this @@ -3189,8 +3189,8 @@ type M2tsAbsentInputAudioBehavior string // Enum values for M2tsAbsentInputAudioBehavior const ( - M2tsAbsentInputAudioBehaviorDrop M2tsAbsentInputAudioBehavior = "DROP" - M2tsAbsentInputAudioBehaviorEncode_silence M2tsAbsentInputAudioBehavior = "ENCODE_SILENCE" + M2tsAbsentInputAudioBehaviorDrop M2tsAbsentInputAudioBehavior = "DROP" + M2tsAbsentInputAudioBehaviorEncodeSilence M2tsAbsentInputAudioBehavior = "ENCODE_SILENCE" ) // Values returns all known values for M2tsAbsentInputAudioBehavior. Note that this @@ -3225,8 +3225,8 @@ type M2tsAribCaptionsPidControl string // Enum values for M2tsAribCaptionsPidControl const ( - M2tsAribCaptionsPidControlAuto M2tsAribCaptionsPidControl = "AUTO" - M2tsAribCaptionsPidControlUse_configured M2tsAribCaptionsPidControl = "USE_CONFIGURED" + M2tsAribCaptionsPidControlAuto M2tsAribCaptionsPidControl = "AUTO" + M2tsAribCaptionsPidControlUseConfigured M2tsAribCaptionsPidControl = "USE_CONFIGURED" ) // Values returns all known values for M2tsAribCaptionsPidControl. Note that this @@ -3261,8 +3261,8 @@ type M2tsAudioInterval string // Enum values for M2tsAudioInterval const ( - M2tsAudioIntervalVideo_and_fixed_intervals M2tsAudioInterval = "VIDEO_AND_FIXED_INTERVALS" - M2tsAudioIntervalVideo_interval M2tsAudioInterval = "VIDEO_INTERVAL" + M2tsAudioIntervalVideoAndFixedIntervals M2tsAudioInterval = "VIDEO_AND_FIXED_INTERVALS" + M2tsAudioIntervalVideoInterval M2tsAudioInterval = "VIDEO_INTERVAL" ) // Values returns all known values for M2tsAudioInterval. Note that this can be @@ -3351,8 +3351,8 @@ type M2tsEbpPlacement string // Enum values for M2tsEbpPlacement const ( - M2tsEbpPlacementVideo_and_audio_pids M2tsEbpPlacement = "VIDEO_AND_AUDIO_PIDS" - M2tsEbpPlacementVideo_pid M2tsEbpPlacement = "VIDEO_PID" + M2tsEbpPlacementVideoAndAudioPids M2tsEbpPlacement = "VIDEO_AND_AUDIO_PIDS" + M2tsEbpPlacementVideoPid M2tsEbpPlacement = "VIDEO_PID" ) // Values returns all known values for M2tsEbpPlacement. Note that this can be @@ -3405,8 +3405,8 @@ type M2tsNielsenId3Behavior string // Enum values for M2tsNielsenId3Behavior const ( - M2tsNielsenId3BehaviorNo_passthrough M2tsNielsenId3Behavior = "NO_PASSTHROUGH" - M2tsNielsenId3BehaviorPassthrough M2tsNielsenId3Behavior = "PASSTHROUGH" + M2tsNielsenId3BehaviorNoPassthrough M2tsNielsenId3Behavior = "NO_PASSTHROUGH" + M2tsNielsenId3BehaviorPassthrough M2tsNielsenId3Behavior = "PASSTHROUGH" ) // Values returns all known values for M2tsNielsenId3Behavior. Note that this can @@ -3423,8 +3423,8 @@ type M2tsPcrControl string // Enum values for M2tsPcrControl const ( - M2tsPcrControlConfigured_pcr_period M2tsPcrControl = "CONFIGURED_PCR_PERIOD" - M2tsPcrControlPcr_every_pes_packet M2tsPcrControl = "PCR_EVERY_PES_PACKET" + M2tsPcrControlConfiguredPcrPeriod M2tsPcrControl = "CONFIGURED_PCR_PERIOD" + M2tsPcrControlPcrEveryPesPacket M2tsPcrControl = "PCR_EVERY_PES_PACKET" ) // Values returns all known values for M2tsPcrControl. Note that this can be @@ -3477,12 +3477,12 @@ type M2tsSegmentationMarkers string // Enum values for M2tsSegmentationMarkers const ( - M2tsSegmentationMarkersEbp M2tsSegmentationMarkers = "EBP" - M2tsSegmentationMarkersEbp_legacy M2tsSegmentationMarkers = "EBP_LEGACY" - M2tsSegmentationMarkersNone M2tsSegmentationMarkers = "NONE" - M2tsSegmentationMarkersPsi_segstart M2tsSegmentationMarkers = "PSI_SEGSTART" - M2tsSegmentationMarkersRai_adapt M2tsSegmentationMarkers = "RAI_ADAPT" - M2tsSegmentationMarkersRai_segstart M2tsSegmentationMarkers = "RAI_SEGSTART" + M2tsSegmentationMarkersEbp M2tsSegmentationMarkers = "EBP" + M2tsSegmentationMarkersEbpLegacy M2tsSegmentationMarkers = "EBP_LEGACY" + M2tsSegmentationMarkersNone M2tsSegmentationMarkers = "NONE" + M2tsSegmentationMarkersPsiSegstart M2tsSegmentationMarkers = "PSI_SEGSTART" + M2tsSegmentationMarkersRaiAdapt M2tsSegmentationMarkers = "RAI_ADAPT" + M2tsSegmentationMarkersRaiSegstart M2tsSegmentationMarkers = "RAI_SEGSTART" ) // Values returns all known values for M2tsSegmentationMarkers. Note that this can @@ -3503,8 +3503,8 @@ type M2tsSegmentationStyle string // Enum values for M2tsSegmentationStyle const ( - M2tsSegmentationStyleMaintain_cadence M2tsSegmentationStyle = "MAINTAIN_CADENCE" - M2tsSegmentationStyleReset_cadence M2tsSegmentationStyle = "RESET_CADENCE" + M2tsSegmentationStyleMaintainCadence M2tsSegmentationStyle = "MAINTAIN_CADENCE" + M2tsSegmentationStyleResetCadence M2tsSegmentationStyle = "RESET_CADENCE" ) // Values returns all known values for M2tsSegmentationStyle. Note that this can be @@ -3521,8 +3521,8 @@ type M2tsTimedMetadataBehavior string // Enum values for M2tsTimedMetadataBehavior const ( - M2tsTimedMetadataBehaviorNo_passthrough M2tsTimedMetadataBehavior = "NO_PASSTHROUGH" - M2tsTimedMetadataBehaviorPassthrough M2tsTimedMetadataBehavior = "PASSTHROUGH" + M2tsTimedMetadataBehaviorNoPassthrough M2tsTimedMetadataBehavior = "NO_PASSTHROUGH" + M2tsTimedMetadataBehaviorPassthrough M2tsTimedMetadataBehavior = "PASSTHROUGH" ) // Values returns all known values for M2tsTimedMetadataBehavior. Note that this @@ -3539,8 +3539,8 @@ type M3u8NielsenId3Behavior string // Enum values for M3u8NielsenId3Behavior const ( - M3u8NielsenId3BehaviorNo_passthrough M3u8NielsenId3Behavior = "NO_PASSTHROUGH" - M3u8NielsenId3BehaviorPassthrough M3u8NielsenId3Behavior = "PASSTHROUGH" + M3u8NielsenId3BehaviorNoPassthrough M3u8NielsenId3Behavior = "NO_PASSTHROUGH" + M3u8NielsenId3BehaviorPassthrough M3u8NielsenId3Behavior = "PASSTHROUGH" ) // Values returns all known values for M3u8NielsenId3Behavior. Note that this can @@ -3557,8 +3557,8 @@ type M3u8PcrControl string // Enum values for M3u8PcrControl const ( - M3u8PcrControlConfigured_pcr_period M3u8PcrControl = "CONFIGURED_PCR_PERIOD" - M3u8PcrControlPcr_every_pes_packet M3u8PcrControl = "PCR_EVERY_PES_PACKET" + M3u8PcrControlConfiguredPcrPeriod M3u8PcrControl = "CONFIGURED_PCR_PERIOD" + M3u8PcrControlPcrEveryPesPacket M3u8PcrControl = "PCR_EVERY_PES_PACKET" ) // Values returns all known values for M3u8PcrControl. Note that this can be @@ -3575,8 +3575,8 @@ type M3u8Scte35Behavior string // Enum values for M3u8Scte35Behavior const ( - M3u8Scte35BehaviorNo_passthrough M3u8Scte35Behavior = "NO_PASSTHROUGH" - M3u8Scte35BehaviorPassthrough M3u8Scte35Behavior = "PASSTHROUGH" + M3u8Scte35BehaviorNoPassthrough M3u8Scte35Behavior = "NO_PASSTHROUGH" + M3u8Scte35BehaviorPassthrough M3u8Scte35Behavior = "PASSTHROUGH" ) // Values returns all known values for M3u8Scte35Behavior. Note that this can be @@ -3593,8 +3593,8 @@ type M3u8TimedMetadataBehavior string // Enum values for M3u8TimedMetadataBehavior const ( - M3u8TimedMetadataBehaviorNo_passthrough M3u8TimedMetadataBehavior = "NO_PASSTHROUGH" - M3u8TimedMetadataBehaviorPassthrough M3u8TimedMetadataBehavior = "PASSTHROUGH" + M3u8TimedMetadataBehaviorNoPassthrough M3u8TimedMetadataBehavior = "NO_PASSTHROUGH" + M3u8TimedMetadataBehaviorPassthrough M3u8TimedMetadataBehavior = "PASSTHROUGH" ) // Values returns all known values for M3u8TimedMetadataBehavior. Note that this @@ -3611,8 +3611,8 @@ type Mp2CodingMode string // Enum values for Mp2CodingMode const ( - Mp2CodingModeCoding_mode_1_0 Mp2CodingMode = "CODING_MODE_1_0" - Mp2CodingModeCoding_mode_2_0 Mp2CodingMode = "CODING_MODE_2_0" + Mp2CodingModeCodingMode10 Mp2CodingMode = "CODING_MODE_1_0" + Mp2CodingModeCodingMode20 Mp2CodingMode = "CODING_MODE_2_0" ) // Values returns all known values for Mp2CodingMode. Note that this can be @@ -3761,8 +3761,8 @@ type Mpeg2TimecodeInsertionBehavior string // Enum values for Mpeg2TimecodeInsertionBehavior const ( - Mpeg2TimecodeInsertionBehaviorDisabled Mpeg2TimecodeInsertionBehavior = "DISABLED" - Mpeg2TimecodeInsertionBehaviorGop_timecode Mpeg2TimecodeInsertionBehavior = "GOP_TIMECODE" + Mpeg2TimecodeInsertionBehaviorDisabled Mpeg2TimecodeInsertionBehavior = "DISABLED" + Mpeg2TimecodeInsertionBehaviorGopTimecode Mpeg2TimecodeInsertionBehavior = "GOP_TIMECODE" ) // Values returns all known values for Mpeg2TimecodeInsertionBehavior. Note that @@ -3798,15 +3798,15 @@ type MultiplexState string // Enum values for MultiplexState const ( - MultiplexStateCreating MultiplexState = "CREATING" - MultiplexStateCreate_failed MultiplexState = "CREATE_FAILED" - MultiplexStateIdle MultiplexState = "IDLE" - MultiplexStateStarting MultiplexState = "STARTING" - MultiplexStateRunning MultiplexState = "RUNNING" - MultiplexStateRecovering MultiplexState = "RECOVERING" - MultiplexStateStopping MultiplexState = "STOPPING" - MultiplexStateDeleting MultiplexState = "DELETING" - MultiplexStateDeleted MultiplexState = "DELETED" + MultiplexStateCreating MultiplexState = "CREATING" + MultiplexStateCreateFailed MultiplexState = "CREATE_FAILED" + MultiplexStateIdle MultiplexState = "IDLE" + MultiplexStateStarting MultiplexState = "STARTING" + MultiplexStateRunning MultiplexState = "RUNNING" + MultiplexStateRecovering MultiplexState = "RECOVERING" + MultiplexStateStopping MultiplexState = "STOPPING" + MultiplexStateDeleting MultiplexState = "DELETING" + MultiplexStateDeleted MultiplexState = "DELETED" ) // Values returns all known values for MultiplexState. Note that this can be @@ -3830,8 +3830,8 @@ type NetworkInputServerValidation string // Enum values for NetworkInputServerValidation const ( - NetworkInputServerValidationCheck_cryptography_and_validate_name NetworkInputServerValidation = "CHECK_CRYPTOGRAPHY_AND_VALIDATE_NAME" - NetworkInputServerValidationCheck_cryptography_only NetworkInputServerValidation = "CHECK_CRYPTOGRAPHY_ONLY" + NetworkInputServerValidationCheckCryptographyAndValidateName NetworkInputServerValidation = "CHECK_CRYPTOGRAPHY_AND_VALIDATE_NAME" + NetworkInputServerValidationCheckCryptographyOnly NetworkInputServerValidation = "CHECK_CRYPTOGRAPHY_ONLY" ) // Values returns all known values for NetworkInputServerValidation. Note that this @@ -3882,7 +3882,7 @@ type OfferingType string // Enum values for OfferingType const ( - OfferingTypeNo_upfront OfferingType = "NO_UPFRONT" + OfferingTypeNoUpfront OfferingType = "NO_UPFRONT" ) // Values returns all known values for OfferingType. Note that this can be expanded @@ -3898,8 +3898,8 @@ type PipelineId string // Enum values for PipelineId const ( - PipelineIdPipeline_0 PipelineId = "PIPELINE_0" - PipelineIdPipeline_1 PipelineId = "PIPELINE_1" + PipelineIdPipeline0 PipelineId = "PIPELINE_0" + PipelineIdPipeline1 PipelineId = "PIPELINE_1" ) // Values returns all known values for PipelineId. Note that this can be expanded @@ -3916,9 +3916,9 @@ type PreferredChannelPipeline string // Enum values for PreferredChannelPipeline const ( - PreferredChannelPipelineCurrently_active PreferredChannelPipeline = "CURRENTLY_ACTIVE" - PreferredChannelPipelinePipeline_0 PreferredChannelPipeline = "PIPELINE_0" - PreferredChannelPipelinePipeline_1 PreferredChannelPipeline = "PIPELINE_1" + PreferredChannelPipelineCurrentlyActive PreferredChannelPipeline = "CURRENTLY_ACTIVE" + PreferredChannelPipelinePipeline0 PreferredChannelPipeline = "PIPELINE_0" + PreferredChannelPipelinePipeline1 PreferredChannelPipeline = "PIPELINE_1" ) // Values returns all known values for PreferredChannelPipeline. Note that this can @@ -3960,9 +3960,9 @@ type ReservationMaximumBitrate string // Enum values for ReservationMaximumBitrate const ( - ReservationMaximumBitrateMax_10_mbps ReservationMaximumBitrate = "MAX_10_MBPS" - ReservationMaximumBitrateMax_20_mbps ReservationMaximumBitrate = "MAX_20_MBPS" - ReservationMaximumBitrateMax_50_mbps ReservationMaximumBitrate = "MAX_50_MBPS" + ReservationMaximumBitrateMax10Mbps ReservationMaximumBitrate = "MAX_10_MBPS" + ReservationMaximumBitrateMax20Mbps ReservationMaximumBitrate = "MAX_20_MBPS" + ReservationMaximumBitrateMax50Mbps ReservationMaximumBitrate = "MAX_50_MBPS" ) // Values returns all known values for ReservationMaximumBitrate. Note that this @@ -3980,8 +3980,8 @@ type ReservationMaximumFramerate string // Enum values for ReservationMaximumFramerate const ( - ReservationMaximumFramerateMax_30_fps ReservationMaximumFramerate = "MAX_30_FPS" - ReservationMaximumFramerateMax_60_fps ReservationMaximumFramerate = "MAX_60_FPS" + ReservationMaximumFramerateMax30Fps ReservationMaximumFramerate = "MAX_30_FPS" + ReservationMaximumFramerateMax60Fps ReservationMaximumFramerate = "MAX_60_FPS" ) // Values returns all known values for ReservationMaximumFramerate. Note that this @@ -4042,8 +4042,8 @@ type ReservationSpecialFeature string // Enum values for ReservationSpecialFeature const ( - ReservationSpecialFeatureAdvanced_audio ReservationSpecialFeature = "ADVANCED_AUDIO" - ReservationSpecialFeatureAudio_normalization ReservationSpecialFeature = "AUDIO_NORMALIZATION" + ReservationSpecialFeatureAdvancedAudio ReservationSpecialFeature = "ADVANCED_AUDIO" + ReservationSpecialFeatureAudioNormalization ReservationSpecialFeature = "AUDIO_NORMALIZATION" ) // Values returns all known values for ReservationSpecialFeature. Note that this @@ -4102,8 +4102,8 @@ type RtmpCacheFullBehavior string // Enum values for RtmpCacheFullBehavior const ( - RtmpCacheFullBehaviorDisconnect_immediately RtmpCacheFullBehavior = "DISCONNECT_IMMEDIATELY" - RtmpCacheFullBehaviorWait_for_server RtmpCacheFullBehavior = "WAIT_FOR_SERVER" + RtmpCacheFullBehaviorDisconnectImmediately RtmpCacheFullBehavior = "DISCONNECT_IMMEDIATELY" + RtmpCacheFullBehaviorWaitForServer RtmpCacheFullBehavior = "WAIT_FOR_SERVER" ) // Values returns all known values for RtmpCacheFullBehavior. Note that this can be @@ -4120,9 +4120,9 @@ type RtmpCaptionData string // Enum values for RtmpCaptionData const ( - RtmpCaptionDataAll RtmpCaptionData = "ALL" - RtmpCaptionDataField1_608 RtmpCaptionData = "FIELD1_608" - RtmpCaptionDataField1_and_field2_608 RtmpCaptionData = "FIELD1_AND_FIELD2_608" + RtmpCaptionDataAll RtmpCaptionData = "ALL" + RtmpCaptionDataField1608 RtmpCaptionData = "FIELD1_608" + RtmpCaptionDataField1AndField2608 RtmpCaptionData = "FIELD1_AND_FIELD2_608" ) // Values returns all known values for RtmpCaptionData. Note that this can be @@ -4140,8 +4140,8 @@ type RtmpOutputCertificateMode string // Enum values for RtmpOutputCertificateMode const ( - RtmpOutputCertificateModeSelf_signed RtmpOutputCertificateMode = "SELF_SIGNED" - RtmpOutputCertificateModeVerify_authenticity RtmpOutputCertificateMode = "VERIFY_AUTHENTICITY" + RtmpOutputCertificateModeSelfSigned RtmpOutputCertificateMode = "SELF_SIGNED" + RtmpOutputCertificateModeVerifyAuthenticity RtmpOutputCertificateMode = "VERIFY_AUTHENTICITY" ) // Values returns all known values for RtmpOutputCertificateMode. Note that this @@ -4214,8 +4214,8 @@ type Scte35ArchiveAllowedFlag string // Enum values for Scte35ArchiveAllowedFlag const ( - Scte35ArchiveAllowedFlagArchive_not_allowed Scte35ArchiveAllowedFlag = "ARCHIVE_NOT_ALLOWED" - Scte35ArchiveAllowedFlagArchive_allowed Scte35ArchiveAllowedFlag = "ARCHIVE_ALLOWED" + Scte35ArchiveAllowedFlagArchiveNotAllowed Scte35ArchiveAllowedFlag = "ARCHIVE_NOT_ALLOWED" + Scte35ArchiveAllowedFlagArchiveAllowed Scte35ArchiveAllowedFlag = "ARCHIVE_ALLOWED" ) // Values returns all known values for Scte35ArchiveAllowedFlag. Note that this can @@ -4232,10 +4232,10 @@ type Scte35DeviceRestrictions string // Enum values for Scte35DeviceRestrictions const ( - Scte35DeviceRestrictionsNone Scte35DeviceRestrictions = "NONE" - Scte35DeviceRestrictionsRestrict_group0 Scte35DeviceRestrictions = "RESTRICT_GROUP0" - Scte35DeviceRestrictionsRestrict_group1 Scte35DeviceRestrictions = "RESTRICT_GROUP1" - Scte35DeviceRestrictionsRestrict_group2 Scte35DeviceRestrictions = "RESTRICT_GROUP2" + Scte35DeviceRestrictionsNone Scte35DeviceRestrictions = "NONE" + Scte35DeviceRestrictionsRestrictGroup0 Scte35DeviceRestrictions = "RESTRICT_GROUP0" + Scte35DeviceRestrictionsRestrictGroup1 Scte35DeviceRestrictions = "RESTRICT_GROUP1" + Scte35DeviceRestrictionsRestrictGroup2 Scte35DeviceRestrictions = "RESTRICT_GROUP2" ) // Values returns all known values for Scte35DeviceRestrictions. Note that this can @@ -4254,8 +4254,8 @@ type Scte35NoRegionalBlackoutFlag string // Enum values for Scte35NoRegionalBlackoutFlag const ( - Scte35NoRegionalBlackoutFlagRegional_blackout Scte35NoRegionalBlackoutFlag = "REGIONAL_BLACKOUT" - Scte35NoRegionalBlackoutFlagNo_regional_blackout Scte35NoRegionalBlackoutFlag = "NO_REGIONAL_BLACKOUT" + Scte35NoRegionalBlackoutFlagRegionalBlackout Scte35NoRegionalBlackoutFlag = "REGIONAL_BLACKOUT" + Scte35NoRegionalBlackoutFlagNoRegionalBlackout Scte35NoRegionalBlackoutFlag = "NO_REGIONAL_BLACKOUT" ) // Values returns all known values for Scte35NoRegionalBlackoutFlag. Note that this @@ -4272,8 +4272,8 @@ type Scte35SegmentationCancelIndicator string // Enum values for Scte35SegmentationCancelIndicator const ( - Scte35SegmentationCancelIndicatorSegmentation_event_not_canceled Scte35SegmentationCancelIndicator = "SEGMENTATION_EVENT_NOT_CANCELED" - Scte35SegmentationCancelIndicatorSegmentation_event_canceled Scte35SegmentationCancelIndicator = "SEGMENTATION_EVENT_CANCELED" + Scte35SegmentationCancelIndicatorSegmentationEventNotCanceled Scte35SegmentationCancelIndicator = "SEGMENTATION_EVENT_NOT_CANCELED" + Scte35SegmentationCancelIndicatorSegmentationEventCanceled Scte35SegmentationCancelIndicator = "SEGMENTATION_EVENT_CANCELED" ) // Values returns all known values for Scte35SegmentationCancelIndicator. Note that @@ -4329,8 +4329,8 @@ type Scte35WebDeliveryAllowedFlag string // Enum values for Scte35WebDeliveryAllowedFlag const ( - Scte35WebDeliveryAllowedFlagWeb_delivery_not_allowed Scte35WebDeliveryAllowedFlag = "WEB_DELIVERY_NOT_ALLOWED" - Scte35WebDeliveryAllowedFlagWeb_delivery_allowed Scte35WebDeliveryAllowedFlag = "WEB_DELIVERY_ALLOWED" + Scte35WebDeliveryAllowedFlagWebDeliveryNotAllowed Scte35WebDeliveryAllowedFlag = "WEB_DELIVERY_NOT_ALLOWED" + Scte35WebDeliveryAllowedFlagWebDeliveryAllowed Scte35WebDeliveryAllowedFlag = "WEB_DELIVERY_ALLOWED" ) // Values returns all known values for Scte35WebDeliveryAllowedFlag. Note that this @@ -4347,8 +4347,8 @@ type SmoothGroupAudioOnlyTimecodeControl string // Enum values for SmoothGroupAudioOnlyTimecodeControl const ( - SmoothGroupAudioOnlyTimecodeControlPassthrough SmoothGroupAudioOnlyTimecodeControl = "PASSTHROUGH" - SmoothGroupAudioOnlyTimecodeControlUse_configured_clock SmoothGroupAudioOnlyTimecodeControl = "USE_CONFIGURED_CLOCK" + SmoothGroupAudioOnlyTimecodeControlPassthrough SmoothGroupAudioOnlyTimecodeControl = "PASSTHROUGH" + SmoothGroupAudioOnlyTimecodeControlUseConfiguredClock SmoothGroupAudioOnlyTimecodeControl = "USE_CONFIGURED_CLOCK" ) // Values returns all known values for SmoothGroupAudioOnlyTimecodeControl. Note @@ -4366,8 +4366,8 @@ type SmoothGroupCertificateMode string // Enum values for SmoothGroupCertificateMode const ( - SmoothGroupCertificateModeSelf_signed SmoothGroupCertificateMode = "SELF_SIGNED" - SmoothGroupCertificateModeVerify_authenticity SmoothGroupCertificateMode = "VERIFY_AUTHENTICITY" + SmoothGroupCertificateModeSelfSigned SmoothGroupCertificateMode = "SELF_SIGNED" + SmoothGroupCertificateModeVerifyAuthenticity SmoothGroupCertificateMode = "VERIFY_AUTHENTICITY" ) // Values returns all known values for SmoothGroupCertificateMode. Note that this @@ -4384,9 +4384,9 @@ type SmoothGroupEventIdMode string // Enum values for SmoothGroupEventIdMode const ( - SmoothGroupEventIdModeNo_event_id SmoothGroupEventIdMode = "NO_EVENT_ID" - SmoothGroupEventIdModeUse_configured SmoothGroupEventIdMode = "USE_CONFIGURED" - SmoothGroupEventIdModeUse_timestamp SmoothGroupEventIdMode = "USE_TIMESTAMP" + SmoothGroupEventIdModeNoEventId SmoothGroupEventIdMode = "NO_EVENT_ID" + SmoothGroupEventIdModeUseConfigured SmoothGroupEventIdMode = "USE_CONFIGURED" + SmoothGroupEventIdModeUseTimestamp SmoothGroupEventIdMode = "USE_TIMESTAMP" ) // Values returns all known values for SmoothGroupEventIdMode. Note that this can @@ -4404,8 +4404,8 @@ type SmoothGroupEventStopBehavior string // Enum values for SmoothGroupEventStopBehavior const ( - SmoothGroupEventStopBehaviorNone SmoothGroupEventStopBehavior = "NONE" - SmoothGroupEventStopBehaviorSend_eos SmoothGroupEventStopBehavior = "SEND_EOS" + SmoothGroupEventStopBehaviorNone SmoothGroupEventStopBehavior = "NONE" + SmoothGroupEventStopBehaviorSendEos SmoothGroupEventStopBehavior = "SEND_EOS" ) // Values returns all known values for SmoothGroupEventStopBehavior. Note that this @@ -4422,8 +4422,8 @@ type SmoothGroupSegmentationMode string // Enum values for SmoothGroupSegmentationMode const ( - SmoothGroupSegmentationModeUse_input_segmentation SmoothGroupSegmentationMode = "USE_INPUT_SEGMENTATION" - SmoothGroupSegmentationModeUse_segment_duration SmoothGroupSegmentationMode = "USE_SEGMENT_DURATION" + SmoothGroupSegmentationModeUseInputSegmentation SmoothGroupSegmentationMode = "USE_INPUT_SEGMENTATION" + SmoothGroupSegmentationModeUseSegmentDuration SmoothGroupSegmentationMode = "USE_SEGMENT_DURATION" ) // Values returns all known values for SmoothGroupSegmentationMode. Note that this @@ -4440,9 +4440,9 @@ type SmoothGroupSparseTrackType string // Enum values for SmoothGroupSparseTrackType const ( - SmoothGroupSparseTrackTypeNone SmoothGroupSparseTrackType = "NONE" - SmoothGroupSparseTrackTypeScte_35 SmoothGroupSparseTrackType = "SCTE_35" - SmoothGroupSparseTrackTypeScte_35_without_segmentation SmoothGroupSparseTrackType = "SCTE_35_WITHOUT_SEGMENTATION" + SmoothGroupSparseTrackTypeNone SmoothGroupSparseTrackType = "NONE" + SmoothGroupSparseTrackTypeScte35 SmoothGroupSparseTrackType = "SCTE_35" + SmoothGroupSparseTrackTypeScte35WithoutSegmentation SmoothGroupSparseTrackType = "SCTE_35_WITHOUT_SEGMENTATION" ) // Values returns all known values for SmoothGroupSparseTrackType. Note that this @@ -4460,8 +4460,8 @@ type SmoothGroupStreamManifestBehavior string // Enum values for SmoothGroupStreamManifestBehavior const ( - SmoothGroupStreamManifestBehaviorDo_not_send SmoothGroupStreamManifestBehavior = "DO_NOT_SEND" - SmoothGroupStreamManifestBehaviorSend SmoothGroupStreamManifestBehavior = "SEND" + SmoothGroupStreamManifestBehaviorDoNotSend SmoothGroupStreamManifestBehavior = "DO_NOT_SEND" + SmoothGroupStreamManifestBehaviorSend SmoothGroupStreamManifestBehavior = "SEND" ) // Values returns all known values for SmoothGroupStreamManifestBehavior. Note that @@ -4479,8 +4479,8 @@ type SmoothGroupTimestampOffsetMode string // Enum values for SmoothGroupTimestampOffsetMode const ( - SmoothGroupTimestampOffsetModeUse_configured_offset SmoothGroupTimestampOffsetMode = "USE_CONFIGURED_OFFSET" - SmoothGroupTimestampOffsetModeUse_event_start_date SmoothGroupTimestampOffsetMode = "USE_EVENT_START_DATE" + SmoothGroupTimestampOffsetModeUseConfiguredOffset SmoothGroupTimestampOffsetMode = "USE_CONFIGURED_OFFSET" + SmoothGroupTimestampOffsetModeUseEventStartDate SmoothGroupTimestampOffsetMode = "USE_EVENT_START_DATE" ) // Values returns all known values for SmoothGroupTimestampOffsetMode. Note that @@ -4537,23 +4537,23 @@ type TemporalFilterStrength string // Enum values for TemporalFilterStrength const ( - TemporalFilterStrengthAuto TemporalFilterStrength = "AUTO" - TemporalFilterStrengthStrength_1 TemporalFilterStrength = "STRENGTH_1" - TemporalFilterStrengthStrength_2 TemporalFilterStrength = "STRENGTH_2" - TemporalFilterStrengthStrength_3 TemporalFilterStrength = "STRENGTH_3" - TemporalFilterStrengthStrength_4 TemporalFilterStrength = "STRENGTH_4" - TemporalFilterStrengthStrength_5 TemporalFilterStrength = "STRENGTH_5" - TemporalFilterStrengthStrength_6 TemporalFilterStrength = "STRENGTH_6" - TemporalFilterStrengthStrength_7 TemporalFilterStrength = "STRENGTH_7" - TemporalFilterStrengthStrength_8 TemporalFilterStrength = "STRENGTH_8" - TemporalFilterStrengthStrength_9 TemporalFilterStrength = "STRENGTH_9" - TemporalFilterStrengthStrength_10 TemporalFilterStrength = "STRENGTH_10" - TemporalFilterStrengthStrength_11 TemporalFilterStrength = "STRENGTH_11" - TemporalFilterStrengthStrength_12 TemporalFilterStrength = "STRENGTH_12" - TemporalFilterStrengthStrength_13 TemporalFilterStrength = "STRENGTH_13" - TemporalFilterStrengthStrength_14 TemporalFilterStrength = "STRENGTH_14" - TemporalFilterStrengthStrength_15 TemporalFilterStrength = "STRENGTH_15" - TemporalFilterStrengthStrength_16 TemporalFilterStrength = "STRENGTH_16" + TemporalFilterStrengthAuto TemporalFilterStrength = "AUTO" + TemporalFilterStrengthStrength1 TemporalFilterStrength = "STRENGTH_1" + TemporalFilterStrengthStrength2 TemporalFilterStrength = "STRENGTH_2" + TemporalFilterStrengthStrength3 TemporalFilterStrength = "STRENGTH_3" + TemporalFilterStrengthStrength4 TemporalFilterStrength = "STRENGTH_4" + TemporalFilterStrengthStrength5 TemporalFilterStrength = "STRENGTH_5" + TemporalFilterStrengthStrength6 TemporalFilterStrength = "STRENGTH_6" + TemporalFilterStrengthStrength7 TemporalFilterStrength = "STRENGTH_7" + TemporalFilterStrengthStrength8 TemporalFilterStrength = "STRENGTH_8" + TemporalFilterStrengthStrength9 TemporalFilterStrength = "STRENGTH_9" + TemporalFilterStrengthStrength10 TemporalFilterStrength = "STRENGTH_10" + TemporalFilterStrengthStrength11 TemporalFilterStrength = "STRENGTH_11" + TemporalFilterStrengthStrength12 TemporalFilterStrength = "STRENGTH_12" + TemporalFilterStrengthStrength13 TemporalFilterStrength = "STRENGTH_13" + TemporalFilterStrengthStrength14 TemporalFilterStrength = "STRENGTH_14" + TemporalFilterStrengthStrength15 TemporalFilterStrength = "STRENGTH_15" + TemporalFilterStrengthStrength16 TemporalFilterStrength = "STRENGTH_16" ) // Values returns all known values for TemporalFilterStrength. Note that this can @@ -4605,8 +4605,8 @@ type TtmlDestinationStyleControl string // Enum values for TtmlDestinationStyleControl const ( - TtmlDestinationStyleControlPassthrough TtmlDestinationStyleControl = "PASSTHROUGH" - TtmlDestinationStyleControlUse_configured TtmlDestinationStyleControl = "USE_CONFIGURED" + TtmlDestinationStyleControlPassthrough TtmlDestinationStyleControl = "PASSTHROUGH" + TtmlDestinationStyleControlUseConfigured TtmlDestinationStyleControl = "USE_CONFIGURED" ) // Values returns all known values for TtmlDestinationStyleControl. Note that this @@ -4663,8 +4663,8 @@ type VideoDescriptionScalingBehavior string // Enum values for VideoDescriptionScalingBehavior const ( - VideoDescriptionScalingBehaviorDefault VideoDescriptionScalingBehavior = "DEFAULT" - VideoDescriptionScalingBehaviorStretch_to_output VideoDescriptionScalingBehavior = "STRETCH_TO_OUTPUT" + VideoDescriptionScalingBehaviorDefault VideoDescriptionScalingBehavior = "DEFAULT" + VideoDescriptionScalingBehaviorStretchToOutput VideoDescriptionScalingBehavior = "STRETCH_TO_OUTPUT" ) // Values returns all known values for VideoDescriptionScalingBehavior. Note that @@ -4682,9 +4682,9 @@ type VideoSelectorColorSpace string // Enum values for VideoSelectorColorSpace const ( - VideoSelectorColorSpaceFollow VideoSelectorColorSpace = "FOLLOW" - VideoSelectorColorSpaceRec_601 VideoSelectorColorSpace = "REC_601" - VideoSelectorColorSpaceRec_709 VideoSelectorColorSpace = "REC_709" + VideoSelectorColorSpaceFollow VideoSelectorColorSpace = "FOLLOW" + VideoSelectorColorSpaceRec601 VideoSelectorColorSpace = "REC_601" + VideoSelectorColorSpaceRec709 VideoSelectorColorSpace = "REC_709" ) // Values returns all known values for VideoSelectorColorSpace. Note that this can @@ -4720,10 +4720,10 @@ type WavCodingMode string // Enum values for WavCodingMode const ( - WavCodingModeCoding_mode_1_0 WavCodingMode = "CODING_MODE_1_0" - WavCodingModeCoding_mode_2_0 WavCodingMode = "CODING_MODE_2_0" - WavCodingModeCoding_mode_4_0 WavCodingMode = "CODING_MODE_4_0" - WavCodingModeCoding_mode_8_0 WavCodingMode = "CODING_MODE_8_0" + WavCodingModeCodingMode10 WavCodingMode = "CODING_MODE_1_0" + WavCodingModeCodingMode20 WavCodingMode = "CODING_MODE_2_0" + WavCodingModeCodingMode40 WavCodingMode = "CODING_MODE_4_0" + WavCodingModeCodingMode80 WavCodingMode = "CODING_MODE_8_0" ) // Values returns all known values for WavCodingMode. Note that this can be diff --git a/service/medialive/types/types.go b/service/medialive/types/types.go index 020210feee3..bc8315a2a08 100644 --- a/service/medialive/types/types.go +++ b/service/medialive/types/types.go @@ -1079,11 +1079,11 @@ type EbuTtDDestinationSettings struct { // Specifies how to handle the gap between the lines (in multi-line captions). // - // - // * enabled: Fill with the captions background color (as specified in the input + // * + // enabled: Fill with the captions background color (as specified in the input // captions). // - // * disabled: Leave the gap unfilled. + // * disabled: Leave the gap unfilled. FillLineGap EbuTtDFillLineGapControl // Specifies the font family to include in the font data attached to the EBU-TT @@ -1094,27 +1094,26 @@ type EbuTtDDestinationSettings struct { // copied from the input captions. The size is always set to 100% to allow the // downstream player to choose the size. // - // * Enter a list of font families, as a + // * Enter a list of font families, as a // comma-separated list of font names, in order of preference. The name can be a // font family (such as “Arial”), or a generic font family (such as “serif”), or // “default” (to let the downstream player choose the font). // - // * Leave blank to - // set the family to “monospace”. + // * Leave blank to set + // the family to “monospace”. FontFamily *string // Specifies the style information (font color, font position, and so on) to // include in the font data that is attached to the EBU-TT captions. // - // * - // include: Take the style information (font color, font position, and so on) from - // the source captions and include that information in the font data attached to - // the EBU-TT captions. This option is valid only if the source captions are - // Embedded or Teletext. + // * include: + // Take the style information (font color, font position, and so on) from the + // source captions and include that information in the font data attached to the + // EBU-TT captions. This option is valid only if the source captions are Embedded + // or Teletext. // - // * exclude: In the font data attached to the EBU-TT - // captions, set the font family to "monospaced". Do not include any other style - // information. + // * exclude: In the font data attached to the EBU-TT captions, set + // the font family to "monospaced". Do not include any other style information. StyleControl EbuTtDDestinationStyleControl } @@ -1488,13 +1487,13 @@ type H264Settings struct { // Leave as STANDARD_QUALITY or choose a different value (which might result in // additional costs to run the channel). // - // * ENHANCED_QUALITY: Produces a - // slightly better video quality without an increase in the bitrate. Has an effect - // only when the Rate control mode is QVBR or CBR. If this channel is in a - // MediaLive multiplex, the value must be ENHANCED_QUALITY. + // * ENHANCED_QUALITY: Produces a slightly + // better video quality without an increase in the bitrate. Has an effect only when + // the Rate control mode is QVBR or CBR. If this channel is in a MediaLive + // multiplex, the value must be ENHANCED_QUALITY. // - // * - // STANDARD_QUALITY: Valid for any Rate control mode. + // * STANDARD_QUALITY: Valid for + // any Rate control mode. QualityLevel H264QualityLevel // Controls the target quality for the video encode. Applies only when the rate @@ -1502,13 +1501,13 @@ type H264Settings struct { // bitrate field that suit your most important viewing devices. Recommended values // are: // - // * Primary screen: Quality level: 8 to 10. Max bitrate: 4M + // * Primary screen: Quality level: 8 to 10. Max bitrate: 4M // - // * PC or - // tablet: Quality level: 7. Max bitrate: 1.5M to 3M + // * PC or tablet: + // Quality level: 7. Max bitrate: 1.5M to 3M // - // * Smartphone: Quality - // level: 6. Max bitrate: 1M to 1.5M + // * Smartphone: Quality level: 6. Max + // bitrate: 1M to 1.5M QvbrQualityLevel *int32 // Rate control mode. QVBR: Quality will match the specified quality level except @@ -1528,10 +1527,10 @@ type H264Settings struct { // Scene change detection. // - // * On: inserts I-frames when scene change is + // * On: inserts I-frames when scene change is // detected. // - // * Off: does not force an I-frame when scene change is detected. + // * Off: does not force an I-frame when scene change is detected. SceneChangeDetect H264SceneChangeDetect // Number of slices per picture. Must be less than or equal to the number of @@ -1562,11 +1561,11 @@ type H264Settings struct { // Determines how timecodes should be inserted into the video elementary stream. // + // * + // 'disabled': Do not include timecodes // - // * 'disabled': Do not include timecodes - // - // * 'picTimingSei': Pass through - // picture timing SEI messages from the source specified in Timecode Config + // * 'picTimingSei': Pass through picture + // timing SEI messages from the source specified in Timecode Config TimecodeInsertion H264TimecodeInsertionBehavior } @@ -1693,13 +1692,13 @@ type H265Settings struct { // bitrate field that suit your most important viewing devices. Recommended values // are: // - // * Primary screen: Quality level: 8 to 10. Max bitrate: 4M + // * Primary screen: Quality level: 8 to 10. Max bitrate: 4M // - // * PC or - // tablet: Quality level: 7. Max bitrate: 1.5M to 3M + // * PC or tablet: + // Quality level: 7. Max bitrate: 1.5M to 3M // - // * Smartphone: Quality - // level: 6. Max bitrate: 1M to 1.5M + // * Smartphone: Quality level: 6. Max + // bitrate: 1M to 1.5M QvbrQualityLevel *int32 // Rate control mode. QVBR: Quality will match the specified quality level except @@ -1730,11 +1729,11 @@ type H265Settings struct { // Determines how timecodes should be inserted into the video elementary stream. // + // * + // 'disabled': Do not include timecodes // - // * 'disabled': Do not include timecodes - // - // * 'picTimingSei': Pass through - // picture timing SEI messages from the source specified in Timecode Config + // * 'picTimingSei': Pass through picture + // timing SEI messages from the source specified in Timecode Config TimecodeInsertion H265TimecodeInsertionBehavior } @@ -2515,13 +2514,13 @@ type InputSettings struct { // Turns on the filter for this input. MPEG-2 inputs have the deblocking filter // enabled by default. // - // * auto - filtering will be applied depending on input + // * auto - filtering will be applied depending on input // type/quality // - // * disabled - no filtering will be applied to the input + // * disabled - no filtering will be applied to the input // - // * - // forced - filtering will be applied regardless of input type + // * forced - + // filtering will be applied regardless of input type InputFilter InputFilter // Input settings. @@ -2531,11 +2530,11 @@ type InputSettings struct { // in this input. Applicable data types are captions, timecode, AFD, and SCTE-104 // messages. // - // * PREFER: Extract from SMPTE-2038 if present in this input, - // otherwise extract from another source (if any). + // * PREFER: Extract from SMPTE-2038 if present in this input, otherwise + // extract from another source (if any). // - // * IGNORE: Never extract any - // ancillary data from SMPTE-2038. + // * IGNORE: Never extract any ancillary + // data from SMPTE-2038. Smpte2038DataPreference Smpte2038DataPreference // Loop input if it is a file. This allows a file input to be streamed @@ -3142,14 +3141,14 @@ type MsSmoothGroupSettings struct { // is sent and the same Live Event is used without changing the publishing point, // clients might see cached video from the previous run. Options: // - // * + // * // "useConfigured" - use the value provided in eventId // - // * "useTimestamp" - - // generate and send an event ID based on the current timestamp + // * "useTimestamp" - generate + // and send an event ID based on the current timestamp // - // * "noEventId" - // - do not send an event ID to the IIS server. + // * "noEventId" - do not send + // an event ID to the IIS server. EventIdMode SmoothGroupEventIdMode // When set to sendEos, send EOS signal to IIS server when stopping the event @@ -3181,16 +3180,16 @@ type MsSmoothGroupSettings struct { // Identifies the type of data to place in the sparse track: // - // * SCTE35: Insert + // * SCTE35: Insert // SCTE-35 messages from the source content. With each message, insert an IDR frame // to start a new segment. // - // * SCTE35_WITHOUT_SEGMENTATION: Insert SCTE-35 - // messages from the source content. With each message, insert an IDR frame but - // don't start a new segment. + // * SCTE35_WITHOUT_SEGMENTATION: Insert SCTE-35 messages + // from the source content. With each message, insert an IDR frame but don't start + // a new segment. // - // * NONE: Don't generate a sparse track for any - // outputs in this output group. + // * NONE: Don't generate a sparse track for any outputs in this + // output group. SparseTrackType SmoothGroupSparseTrackType // When set to send, send stream manifest so publishing point doesn't start until @@ -3203,10 +3202,10 @@ type MsSmoothGroupSettings struct { // Type of timestamp date offset to use. // - // * useEventStartDate: Use the date the + // * useEventStartDate: Use the date the // event was started as the offset // - // * useConfiguredOffset: Use an explicitly + // * useConfiguredOffset: Use an explicitly // configured date as the offset TimestampOffsetMode SmoothGroupTimestampOffsetMode } @@ -3887,12 +3886,11 @@ type RtmpGroupSettings struct { // Controls the behavior of this RTMP group if input becomes unavailable. // - // * + // * // emitOutput: Emit a slate until input returns. // - // * pauseOutput: Stop - // transmitting data until input returns. This does not close the underlying RTMP - // connection. + // * pauseOutput: Stop transmitting + // data until input returns. This does not close the underlying RTMP connection. InputLossAction InputLossActionForRtmpOut // If a streaming output fails, number of seconds to wait until a restart is @@ -4021,18 +4019,18 @@ type Scte27SourceSettings struct { // The pid field is used in conjunction with the caption selector languageCode // field as follows: // - // * Specify PID and Language: Extracts captions from that - // PID; the language is "informational". + // * Specify PID and Language: Extracts captions from that PID; + // the language is "informational". // - // * Specify PID and omit Language: - // Extracts the specified PID. + // * Specify PID and omit Language: Extracts the + // specified PID. // - // * Omit PID and specify Language: Extracts the - // specified language, whichever PID that happens to be. + // * Omit PID and specify Language: Extracts the specified + // language, whichever PID that happens to be. // - // * Omit PID and omit - // Language: Valid only if source is DVB-Sub that is being passed through; all - // languages will be passed through. + // * Omit PID and omit Language: Valid + // only if source is DVB-Sub that is being passed through; all languages will be + // passed through. Pid *int32 } @@ -4344,11 +4342,11 @@ type TemporalFilterSettings struct { // If you enable this filter, the results are the following: // - // * If the source + // * If the source // content is noisy (it contains excessive digital artifacts), the filter cleans up // the source. // - // * If the source content is already clean, the filter tends to + // * If the source content is already clean, the filter tends to // decrease the bitrate, especially when the rate control mode is QVBR. PostFilterSharpening TemporalFilterPostFilterSharpening diff --git a/service/mediapackage/types/enums.go b/service/mediapackage/types/enums.go index a050f87ca1c..176be4a855b 100644 --- a/service/mediapackage/types/enums.go +++ b/service/mediapackage/types/enums.go @@ -6,14 +6,14 @@ type AdTriggersElement string // Enum values for AdTriggersElement const ( - AdTriggersElementSplice_insert AdTriggersElement = "SPLICE_INSERT" - AdTriggersElementBreak AdTriggersElement = "BREAK" - AdTriggersElementProvider_advertisement AdTriggersElement = "PROVIDER_ADVERTISEMENT" - AdTriggersElementDistributor_advertisement AdTriggersElement = "DISTRIBUTOR_ADVERTISEMENT" - AdTriggersElementProvider_placement_opportunity AdTriggersElement = "PROVIDER_PLACEMENT_OPPORTUNITY" - AdTriggersElementDistributor_placement_opportunity AdTriggersElement = "DISTRIBUTOR_PLACEMENT_OPPORTUNITY" - AdTriggersElementProvider_overlay_placement_opportunity AdTriggersElement = "PROVIDER_OVERLAY_PLACEMENT_OPPORTUNITY" - AdTriggersElementDistributor_overlay_placement_opportunity AdTriggersElement = "DISTRIBUTOR_OVERLAY_PLACEMENT_OPPORTUNITY" + AdTriggersElementSpliceInsert AdTriggersElement = "SPLICE_INSERT" + AdTriggersElementBreak AdTriggersElement = "BREAK" + AdTriggersElementProviderAdvertisement AdTriggersElement = "PROVIDER_ADVERTISEMENT" + AdTriggersElementDistributorAdvertisement AdTriggersElement = "DISTRIBUTOR_ADVERTISEMENT" + AdTriggersElementProviderPlacementOpportunity AdTriggersElement = "PROVIDER_PLACEMENT_OPPORTUNITY" + AdTriggersElementDistributorPlacementOpportunity AdTriggersElement = "DISTRIBUTOR_PLACEMENT_OPPORTUNITY" + AdTriggersElementProviderOverlayPlacementOpportunity AdTriggersElement = "PROVIDER_OVERLAY_PLACEMENT_OPPORTUNITY" + AdTriggersElementDistributorOverlayPlacementOpportunity AdTriggersElement = "DISTRIBUTOR_OVERLAY_PLACEMENT_OPPORTUNITY" ) // Values returns all known values for AdTriggersElement. Note that this can be @@ -52,10 +52,10 @@ type AdMarkers string // Enum values for AdMarkers const ( - AdMarkersNone AdMarkers = "NONE" - AdMarkersScte35_enhanced AdMarkers = "SCTE35_ENHANCED" - AdMarkersPassthrough AdMarkers = "PASSTHROUGH" - AdMarkersDaterange AdMarkers = "DATERANGE" + AdMarkersNone AdMarkers = "NONE" + AdMarkersScte35Enhanced AdMarkers = "SCTE35_ENHANCED" + AdMarkersPassthrough AdMarkers = "PASSTHROUGH" + AdMarkersDaterange AdMarkers = "DATERANGE" ) // Values returns all known values for AdMarkers. Note that this can be expanded in @@ -96,8 +96,8 @@ type EncryptionMethod string // Enum values for EncryptionMethod const ( - EncryptionMethodAes_128 EncryptionMethod = "AES_128" - EncryptionMethodSample_aes EncryptionMethod = "SAMPLE_AES" + EncryptionMethodAes128 EncryptionMethod = "AES_128" + EncryptionMethodSampleAes EncryptionMethod = "SAMPLE_AES" ) // Values returns all known values for EncryptionMethod. Note that this can be @@ -170,8 +170,8 @@ type Profile string // Enum values for Profile const ( - ProfileNone Profile = "NONE" - ProfileHbbtv_1_5 Profile = "HBBTV_1_5" + ProfileNone Profile = "NONE" + ProfileHbbtv15 Profile = "HBBTV_1_5" ) // Values returns all known values for Profile. Note that this can be expanded in @@ -188,9 +188,9 @@ type SegmentTemplateFormat string // Enum values for SegmentTemplateFormat const ( - SegmentTemplateFormatNumber_with_timeline SegmentTemplateFormat = "NUMBER_WITH_TIMELINE" - SegmentTemplateFormatTime_with_timeline SegmentTemplateFormat = "TIME_WITH_TIMELINE" - SegmentTemplateFormatNumber_with_duration SegmentTemplateFormat = "NUMBER_WITH_DURATION" + SegmentTemplateFormatNumberWithTimeline SegmentTemplateFormat = "NUMBER_WITH_TIMELINE" + SegmentTemplateFormatTimeWithTimeline SegmentTemplateFormat = "TIME_WITH_TIMELINE" + SegmentTemplateFormatNumberWithDuration SegmentTemplateFormat = "NUMBER_WITH_DURATION" ) // Values returns all known values for SegmentTemplateFormat. Note that this can be @@ -208,9 +208,9 @@ type Status string // Enum values for Status const ( - StatusIn_progress Status = "IN_PROGRESS" - StatusSucceeded Status = "SUCCEEDED" - StatusFailed Status = "FAILED" + StatusInProgress Status = "IN_PROGRESS" + StatusSucceeded Status = "SUCCEEDED" + StatusFailed Status = "FAILED" ) // Values returns all known values for Status. Note that this can be expanded in @@ -228,9 +228,9 @@ type StreamOrder string // Enum values for StreamOrder const ( - StreamOrderOriginal StreamOrder = "ORIGINAL" - StreamOrderVideo_bitrate_ascending StreamOrder = "VIDEO_BITRATE_ASCENDING" - StreamOrderVideo_bitrate_descending StreamOrder = "VIDEO_BITRATE_DESCENDING" + StreamOrderOriginal StreamOrder = "ORIGINAL" + StreamOrderVideoBitrateAscending StreamOrder = "VIDEO_BITRATE_ASCENDING" + StreamOrderVideoBitrateDescending StreamOrder = "VIDEO_BITRATE_DESCENDING" ) // Values returns all known values for StreamOrder. Note that this can be expanded @@ -248,9 +248,9 @@ type UtcTiming string // Enum values for UtcTiming const ( - UtcTimingNone UtcTiming = "NONE" - UtcTimingHttp_head UtcTiming = "HTTP-HEAD" - UtcTimingHttp_iso UtcTiming = "HTTP-ISO" + UtcTimingNone UtcTiming = "NONE" + UtcTimingHttpHead UtcTiming = "HTTP-HEAD" + UtcTimingHttpIso UtcTiming = "HTTP-ISO" ) // Values returns all known values for UtcTiming. Note that this can be expanded in diff --git a/service/mediapackagevod/types/enums.go b/service/mediapackagevod/types/enums.go index ed1ae4d7395..ff7188b2b8d 100644 --- a/service/mediapackagevod/types/enums.go +++ b/service/mediapackagevod/types/enums.go @@ -22,9 +22,9 @@ type AdMarkers string // Enum values for AdMarkers const ( - AdMarkersNone AdMarkers = "NONE" - AdMarkersScte35_enhanced AdMarkers = "SCTE35_ENHANCED" - AdMarkersPassthrough AdMarkers = "PASSTHROUGH" + AdMarkersNone AdMarkers = "NONE" + AdMarkersScte35Enhanced AdMarkers = "SCTE35_ENHANCED" + AdMarkersPassthrough AdMarkers = "PASSTHROUGH" ) // Values returns all known values for AdMarkers. Note that this can be expanded in @@ -42,8 +42,8 @@ type EncryptionMethod string // Enum values for EncryptionMethod const ( - EncryptionMethodAes_128 EncryptionMethod = "AES_128" - EncryptionMethodSample_aes EncryptionMethod = "SAMPLE_AES" + EncryptionMethodAes128 EncryptionMethod = "AES_128" + EncryptionMethodSampleAes EncryptionMethod = "SAMPLE_AES" ) // Values returns all known values for EncryptionMethod. Note that this can be @@ -78,8 +78,8 @@ type Profile string // Enum values for Profile const ( - ProfileNone Profile = "NONE" - ProfileHbbtv_1_5 Profile = "HBBTV_1_5" + ProfileNone Profile = "NONE" + ProfileHbbtv15 Profile = "HBBTV_1_5" ) // Values returns all known values for Profile. Note that this can be expanded in @@ -96,9 +96,9 @@ type SegmentTemplateFormat string // Enum values for SegmentTemplateFormat const ( - SegmentTemplateFormatNumber_with_timeline SegmentTemplateFormat = "NUMBER_WITH_TIMELINE" - SegmentTemplateFormatTime_with_timeline SegmentTemplateFormat = "TIME_WITH_TIMELINE" - SegmentTemplateFormatNumber_with_duration SegmentTemplateFormat = "NUMBER_WITH_DURATION" + SegmentTemplateFormatNumberWithTimeline SegmentTemplateFormat = "NUMBER_WITH_TIMELINE" + SegmentTemplateFormatTimeWithTimeline SegmentTemplateFormat = "TIME_WITH_TIMELINE" + SegmentTemplateFormatNumberWithDuration SegmentTemplateFormat = "NUMBER_WITH_DURATION" ) // Values returns all known values for SegmentTemplateFormat. Note that this can be @@ -116,9 +116,9 @@ type StreamOrder string // Enum values for StreamOrder const ( - StreamOrderOriginal StreamOrder = "ORIGINAL" - StreamOrderVideo_bitrate_ascending StreamOrder = "VIDEO_BITRATE_ASCENDING" - StreamOrderVideo_bitrate_descending StreamOrder = "VIDEO_BITRATE_DESCENDING" + StreamOrderOriginal StreamOrder = "ORIGINAL" + StreamOrderVideoBitrateAscending StreamOrder = "VIDEO_BITRATE_ASCENDING" + StreamOrderVideoBitrateDescending StreamOrder = "VIDEO_BITRATE_DESCENDING" ) // Values returns all known values for StreamOrder. Note that this can be expanded diff --git a/service/mediastore/api_op_PutContainerPolicy.go b/service/mediastore/api_op_PutContainerPolicy.go index 04c7e788242..125b7600c56 100644 --- a/service/mediastore/api_op_PutContainerPolicy.go +++ b/service/mediastore/api_op_PutContainerPolicy.go @@ -40,10 +40,10 @@ type PutContainerPolicyInput struct { // The contents of the policy, which includes the following: // - // * One Version - // tag + // * One Version tag // - // * One Statement tag that contains the standard tags for the policy. + // * + // One Statement tag that contains the standard tags for the policy. // // This member is required. Policy *string diff --git a/service/mediastore/api_op_PutMetricPolicy.go b/service/mediastore/api_op_PutMetricPolicy.go index aec33686eb8..1b8f9deeed7 100644 --- a/service/mediastore/api_op_PutMetricPolicy.go +++ b/service/mediastore/api_op_PutMetricPolicy.go @@ -42,14 +42,14 @@ type PutMetricPolicyInput struct { // MediaStore to send object-level metrics for. If you include rules in the policy, // construct each rule with both of the following: // - // * An object group that - // defines which objects to include in the group. The definition can be a path or a - // file name, but it can't have more than 900 characters. Valid characters are: - // a-z, A-Z, 0-9, _ (underscore), = (equal), : (colon), . (period), - (hyphen), ~ + // * An object group that defines + // which objects to include in the group. The definition can be a path or a file + // name, but it can't have more than 900 characters. Valid characters are: a-z, + // A-Z, 0-9, _ (underscore), = (equal), : (colon), . (period), - (hyphen), ~ // (tilde), / (forward slash), and * (asterisk). Wildcards (*) are acceptable. // - // - // * An object group name that allows you to refer to the object group. The name + // * + // An object group name that allows you to refer to the object group. The name // can't have more than 30 characters. Valid characters are: a-z, A-Z, 0-9, and _ // (underscore). // diff --git a/service/mediatailor/types/enums.go b/service/mediatailor/types/enums.go index eee23796fdf..a160822cf5d 100644 --- a/service/mediatailor/types/enums.go +++ b/service/mediatailor/types/enums.go @@ -6,8 +6,8 @@ type Mode string // Enum values for Mode const ( - ModeOff Mode = "OFF" - ModeBehind_live_edge Mode = "BEHIND_LIVE_EDGE" + ModeOff Mode = "OFF" + ModeBehindLiveEdge Mode = "BEHIND_LIVE_EDGE" ) // Values returns all known values for Mode. Note that this can be expanded in the @@ -24,8 +24,8 @@ type OriginManifestType string // Enum values for OriginManifestType const ( - OriginManifestTypeSingle_period OriginManifestType = "SINGLE_PERIOD" - OriginManifestTypeMulti_period OriginManifestType = "MULTI_PERIOD" + OriginManifestTypeSinglePeriod OriginManifestType = "SINGLE_PERIOD" + OriginManifestTypeMultiPeriod OriginManifestType = "MULTI_PERIOD" ) // Values returns all known values for OriginManifestType. Note that this can be diff --git a/service/migrationhub/api_op_AssociateCreatedArtifact.go b/service/migrationhub/api_op_AssociateCreatedArtifact.go index c670a711d9c..81bb7ceb1ba 100644 --- a/service/migrationhub/api_op_AssociateCreatedArtifact.go +++ b/service/migrationhub/api_op_AssociateCreatedArtifact.go @@ -15,17 +15,17 @@ import ( // migration, with the migration task performed by a migration tool. This API has // the following traits: // -// * Migration tools can call the -// AssociateCreatedArtifact operation to indicate which AWS artifact is associated -// with a migration task. +// * Migration tools can call the AssociateCreatedArtifact +// operation to indicate which AWS artifact is associated with a migration task. // -// * The created artifact name must be provided in ARN -// (Amazon Resource Name) format which will contain information about type and -// region; for example: arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b. +// * +// The created artifact name must be provided in ARN (Amazon Resource Name) format +// which will contain information about type and region; for example: +// arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b. // -// -// * Examples of the AWS resource behind the created artifact are, AMI's, EC2 -// instance, or DMS endpoint, etc. +// * Examples of the AWS +// resource behind the created artifact are, AMI's, EC2 instance, or DMS endpoint, +// etc. func (c *Client) AssociateCreatedArtifact(ctx context.Context, params *AssociateCreatedArtifactInput, optFns ...func(*Options)) (*AssociateCreatedArtifactOutput, error) { if params == nil { params = &AssociateCreatedArtifactInput{} diff --git a/service/migrationhub/api_op_DeleteProgressUpdateStream.go b/service/migrationhub/api_op_DeleteProgressUpdateStream.go index 55ff241f5d6..903ff1a893a 100644 --- a/service/migrationhub/api_op_DeleteProgressUpdateStream.go +++ b/service/migrationhub/api_op_DeleteProgressUpdateStream.go @@ -14,27 +14,26 @@ import ( // previously created as an AWS resource used for access control. This API has the // following traits: // -// * The only parameter needed for -// DeleteProgressUpdateStream is the stream name (same as a -// CreateProgressUpdateStream call). +// * The only parameter needed for DeleteProgressUpdateStream is +// the stream name (same as a CreateProgressUpdateStream call). // -// * The call will return, and a background -// process will asynchronously delete the stream and all of its resources (tasks, -// associated resources, resource attributes, created artifacts). +// * The call will +// return, and a background process will asynchronously delete the stream and all +// of its resources (tasks, associated resources, resource attributes, created +// artifacts). // -// * If the -// stream takes time to be deleted, it might still show up on a -// ListProgressUpdateStreams call. +// * If the stream takes time to be deleted, it might still show up on +// a ListProgressUpdateStreams call. // -// * CreateProgressUpdateStream, +// * CreateProgressUpdateStream, // ImportMigrationTask, NotifyMigrationTaskState, and all Associate[*] APIs related // to the tasks belonging to the stream will throw "InvalidInputException" if the // stream of the same name is in the process of being deleted. // -// * Once the -// stream and all of its resources are deleted, CreateProgressUpdateStream for a -// stream of the same name will succeed, and that stream will be an entirely new -// logical resource (without any resources associated with the old stream). +// * Once the stream +// and all of its resources are deleted, CreateProgressUpdateStream for a stream of +// the same name will succeed, and that stream will be an entirely new logical +// resource (without any resources associated with the old stream). func (c *Client) DeleteProgressUpdateStream(ctx context.Context, params *DeleteProgressUpdateStreamInput, optFns ...func(*Options)) (*DeleteProgressUpdateStreamOutput, error) { if params == nil { params = &DeleteProgressUpdateStreamInput{} diff --git a/service/migrationhub/api_op_DisassociateCreatedArtifact.go b/service/migrationhub/api_op_DisassociateCreatedArtifact.go index bc024d54ff5..b0dbad3f462 100644 --- a/service/migrationhub/api_op_DisassociateCreatedArtifact.go +++ b/service/migrationhub/api_op_DisassociateCreatedArtifact.go @@ -14,17 +14,17 @@ import ( // performed by a migration tool that was previously associated. This API has the // following traits: // -// * A migration user can call the -// DisassociateCreatedArtifacts operation to disassociate a created AWS Artifact -// from a migration task. +// * A migration user can call the DisassociateCreatedArtifacts +// operation to disassociate a created AWS Artifact from a migration task. // -// * The created artifact name must be provided in ARN -// (Amazon Resource Name) format which will contain information about type and -// region; for example: arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b. +// * The +// created artifact name must be provided in ARN (Amazon Resource Name) format +// which will contain information about type and region; for example: +// arn:aws:ec2:us-east-1:488216288981:image/ami-6d0ba87b. // -// -// * Examples of the AWS resource behind the created artifact are, AMI's, EC2 -// instance, or RDS instance, etc. +// * Examples of the AWS +// resource behind the created artifact are, AMI's, EC2 instance, or RDS instance, +// etc. func (c *Client) DisassociateCreatedArtifact(ctx context.Context, params *DisassociateCreatedArtifactInput, optFns ...func(*Options)) (*DisassociateCreatedArtifactOutput, error) { if params == nil { params = &DisassociateCreatedArtifactInput{} diff --git a/service/migrationhub/api_op_ListCreatedArtifacts.go b/service/migrationhub/api_op_ListCreatedArtifacts.go index 7f4eb27d495..27e79271e9a 100644 --- a/service/migrationhub/api_op_ListCreatedArtifacts.go +++ b/service/migrationhub/api_op_ListCreatedArtifacts.go @@ -14,14 +14,14 @@ import ( // Lists the created artifacts attached to a given migration task in an update // stream. This API has the following traits: // -// * Gets the list of the created +// * Gets the list of the created // artifacts while migration is taking place. // -// * Shows the artifacts created by -// the migration tool that was associated by the AssociateCreatedArtifact API. +// * Shows the artifacts created by the +// migration tool that was associated by the AssociateCreatedArtifact API. // -// -// * Lists created artifacts in a paginated interface. +// * Lists +// created artifacts in a paginated interface. func (c *Client) ListCreatedArtifacts(ctx context.Context, params *ListCreatedArtifactsInput, optFns ...func(*Options)) (*ListCreatedArtifactsOutput, error) { if params == nil { params = &ListCreatedArtifactsInput{} diff --git a/service/migrationhub/api_op_ListMigrationTasks.go b/service/migrationhub/api_op_ListMigrationTasks.go index 2ea9f3aa854..792c7a25dfb 100644 --- a/service/migrationhub/api_op_ListMigrationTasks.go +++ b/service/migrationhub/api_op_ListMigrationTasks.go @@ -14,14 +14,14 @@ import ( // Lists all, or filtered by resource name, migration tasks associated with the // user account making this call. This API has the following traits: // -// * Can -// show a summary list of the most recent migration tasks. +// * Can show a +// summary list of the most recent migration tasks. // -// * Can show a -// summary list of migration tasks associated with a given discovered resource. +// * Can show a summary list of +// migration tasks associated with a given discovered resource. // -// -// * Lists migration tasks in a paginated interface. +// * Lists migration +// tasks in a paginated interface. func (c *Client) ListMigrationTasks(ctx context.Context, params *ListMigrationTasksInput, optFns ...func(*Options)) (*ListMigrationTasksOutput, error) { if params == nil { params = &ListMigrationTasksInput{} diff --git a/service/migrationhub/api_op_NotifyMigrationTaskState.go b/service/migrationhub/api_op_NotifyMigrationTaskState.go index 88701e49c7e..fd52209e103 100644 --- a/service/migrationhub/api_op_NotifyMigrationTaskState.go +++ b/service/migrationhub/api_op_NotifyMigrationTaskState.go @@ -15,15 +15,15 @@ import ( // Notifies Migration Hub of the current status, progress, or other detail // regarding a migration task. This API has the following traits: // -// * Migration +// * Migration // tools will call the NotifyMigrationTaskState API to share the latest progress // and status. // -// * MigrationTaskName is used for addressing updates to the -// correct target. +// * MigrationTaskName is used for addressing updates to the correct +// target. // -// * ProgressUpdateStream is used for access control and to -// provide a namespace for each migration tool. +// * ProgressUpdateStream is used for access control and to provide a +// namespace for each migration tool. func (c *Client) NotifyMigrationTaskState(ctx context.Context, params *NotifyMigrationTaskStateInput, optFns ...func(*Options)) (*NotifyMigrationTaskStateOutput, error) { if params == nil { params = &NotifyMigrationTaskStateInput{} diff --git a/service/migrationhub/api_op_PutResourceAttributes.go b/service/migrationhub/api_op_PutResourceAttributes.go index e91a1981a0e..1f8df3cec0b 100644 --- a/service/migrationhub/api_op_PutResourceAttributes.go +++ b/service/migrationhub/api_op_PutResourceAttributes.go @@ -15,13 +15,13 @@ import ( // associated in the Application Discovery Service repository. This association // occurs asynchronously after PutResourceAttributes returns. // -// * Keep in mind -// that subsequent calls to PutResourceAttributes will override previously stored +// * Keep in mind that +// subsequent calls to PutResourceAttributes will override previously stored // attributes. For example, if it is first called with a MAC address, but later, it // is desired to add an IP address, it will then be required to call it with both // the IP and MAC addresses to prevent overriding the MAC address. // -// * Note the +// * Note the // instructions regarding the special use case of the ResourceAttributeList // (https://docs.aws.amazon.com/migrationhub/latest/ug/API_PutResourceAttributes.html#migrationhub-PutResourceAttributes-request-ResourceAttributeList) // parameter when specifying any "VM" related value. @@ -66,17 +66,17 @@ type PutResourceAttributesInput struct { // MOTHERBOARD_SERIAL_NUMBER where the identifying value can be a string up to 256 // characters. // - // * If any "VM" related value is set for a ResourceAttribute - // object, it is required that VM_MANAGER_ID, as a minimum, is always set. If - // VM_MANAGER_ID is not set, then all "VM" fields will be discarded and "VM" fields - // will not be used for matching the migration task to a server in Application - // Discovery Service repository. See the Example + // * If any "VM" related value is set for a ResourceAttribute object, + // it is required that VM_MANAGER_ID, as a minimum, is always set. If VM_MANAGER_ID + // is not set, then all "VM" fields will be discarded and "VM" fields will not be + // used for matching the migration task to a server in Application Discovery + // Service repository. See the Example // (https://docs.aws.amazon.com/migrationhub/latest/ug/API_PutResourceAttributes.html#API_PutResourceAttributes_Examples) // section below for a use case of specifying "VM" related values. // - // * If a - // server you are trying to match has multiple IP or MAC addresses, you should - // provide as many as you know in separate type/value pairs passed to the + // * If a server + // you are trying to match has multiple IP or MAC addresses, you should provide as + // many as you know in separate type/value pairs passed to the // ResourceAttributeList parameter to maximize the chances of matching. // // This member is required. diff --git a/service/migrationhub/types/enums.go b/service/migrationhub/types/enums.go index 44434dfff26..d0892e664c0 100644 --- a/service/migrationhub/types/enums.go +++ b/service/migrationhub/types/enums.go @@ -6,9 +6,9 @@ type ApplicationStatus string // Enum values for ApplicationStatus const ( - ApplicationStatusNot_started ApplicationStatus = "NOT_STARTED" - ApplicationStatusIn_progress ApplicationStatus = "IN_PROGRESS" - ApplicationStatusCompleted ApplicationStatus = "COMPLETED" + ApplicationStatusNotStarted ApplicationStatus = "NOT_STARTED" + ApplicationStatusInProgress ApplicationStatus = "IN_PROGRESS" + ApplicationStatusCompleted ApplicationStatus = "COMPLETED" ) // Values returns all known values for ApplicationStatus. Note that this can be @@ -26,16 +26,16 @@ type ResourceAttributeType string // Enum values for ResourceAttributeType const ( - ResourceAttributeTypeIpv4_address ResourceAttributeType = "IPV4_ADDRESS" - ResourceAttributeTypeIpv6_address ResourceAttributeType = "IPV6_ADDRESS" - ResourceAttributeTypeMac_address ResourceAttributeType = "MAC_ADDRESS" - ResourceAttributeTypeFqdn ResourceAttributeType = "FQDN" - ResourceAttributeTypeVm_manager_id ResourceAttributeType = "VM_MANAGER_ID" - ResourceAttributeTypeVm_managed_object_reference ResourceAttributeType = "VM_MANAGED_OBJECT_REFERENCE" - ResourceAttributeTypeVm_name ResourceAttributeType = "VM_NAME" - ResourceAttributeTypeVm_path ResourceAttributeType = "VM_PATH" - ResourceAttributeTypeBios_id ResourceAttributeType = "BIOS_ID" - ResourceAttributeTypeMotherboard_serial_number ResourceAttributeType = "MOTHERBOARD_SERIAL_NUMBER" + ResourceAttributeTypeIpv4Address ResourceAttributeType = "IPV4_ADDRESS" + ResourceAttributeTypeIpv6Address ResourceAttributeType = "IPV6_ADDRESS" + ResourceAttributeTypeMacAddress ResourceAttributeType = "MAC_ADDRESS" + ResourceAttributeTypeFqdn ResourceAttributeType = "FQDN" + ResourceAttributeTypeVmManagerId ResourceAttributeType = "VM_MANAGER_ID" + ResourceAttributeTypeVmManagedObjectReference ResourceAttributeType = "VM_MANAGED_OBJECT_REFERENCE" + ResourceAttributeTypeVmName ResourceAttributeType = "VM_NAME" + ResourceAttributeTypeVmPath ResourceAttributeType = "VM_PATH" + ResourceAttributeTypeBiosId ResourceAttributeType = "BIOS_ID" + ResourceAttributeTypeMotherboardSerialNumber ResourceAttributeType = "MOTHERBOARD_SERIAL_NUMBER" ) // Values returns all known values for ResourceAttributeType. Note that this can be @@ -60,10 +60,10 @@ type Status string // Enum values for Status const ( - StatusNot_started Status = "NOT_STARTED" - StatusIn_progress Status = "IN_PROGRESS" - StatusFailed Status = "FAILED" - StatusCompleted Status = "COMPLETED" + StatusNotStarted Status = "NOT_STARTED" + StatusInProgress Status = "IN_PROGRESS" + StatusFailed Status = "FAILED" + StatusCompleted Status = "COMPLETED" ) // Values returns all known values for Status. Note that this can be expanded in diff --git a/service/migrationhubconfig/doc.go b/service/migrationhubconfig/doc.go index 90d7d93ba21..6d9ac701826 100644 --- a/service/migrationhubconfig/doc.go +++ b/service/migrationhubconfig/doc.go @@ -8,20 +8,20 @@ // region, as well as to create and work with controls that describe the home // region. // -// * You must make API calls for write actions (create, notify, -// associate, disassociate, import, or put) while in your home region, or a +// * You must make API calls for write actions (create, notify, associate, +// disassociate, import, or put) while in your home region, or a // HomeRegionNotSetException error is returned. // -// * API calls for read actions +// * API calls for read actions // (list, describe, stop, and delete) are permitted outside of your home region. // -// -// * If you call a write API outside the home region, an InvalidInputException is +// * +// If you call a write API outside the home region, an InvalidInputException is // returned. // -// * You can call GetHomeRegion action to obtain the account's -// Migration Hub home region. +// * You can call GetHomeRegion action to obtain the account's Migration +// Hub home region. // -// For specific API usage, see the sections that follow -// in this AWS Migration Hub Home Region API reference. +// For specific API usage, see the sections that follow in this +// AWS Migration Hub Home Region API reference. package migrationhubconfig diff --git a/service/mq/types/enums.go b/service/mq/types/enums.go index 515eae46bd7..6a1a08fe2c3 100644 --- a/service/mq/types/enums.go +++ b/service/mq/types/enums.go @@ -24,11 +24,11 @@ type BrokerState string // Enum values for BrokerState const ( - BrokerStateCreation_in_progress BrokerState = "CREATION_IN_PROGRESS" - BrokerStateCreation_failed BrokerState = "CREATION_FAILED" - BrokerStateDeletion_in_progress BrokerState = "DELETION_IN_PROGRESS" - BrokerStateRunning BrokerState = "RUNNING" - BrokerStateReboot_in_progress BrokerState = "REBOOT_IN_PROGRESS" + BrokerStateCreationInProgress BrokerState = "CREATION_IN_PROGRESS" + BrokerStateCreationFailed BrokerState = "CREATION_FAILED" + BrokerStateDeletionInProgress BrokerState = "DELETION_IN_PROGRESS" + BrokerStateRunning BrokerState = "RUNNING" + BrokerStateRebootInProgress BrokerState = "REBOOT_IN_PROGRESS" ) // Values returns all known values for BrokerState. Note that this can be expanded @@ -114,8 +114,8 @@ type DeploymentMode string // Enum values for DeploymentMode const ( - DeploymentModeSingle_instance DeploymentMode = "SINGLE_INSTANCE" - DeploymentModeActive_standby_multi_az DeploymentMode = "ACTIVE_STANDBY_MULTI_AZ" + DeploymentModeSingleInstance DeploymentMode = "SINGLE_INSTANCE" + DeploymentModeActiveStandbyMultiAz DeploymentMode = "ACTIVE_STANDBY_MULTI_AZ" ) // Values returns all known values for DeploymentMode. Note that this can be @@ -148,9 +148,9 @@ type SanitizationWarningReason string // Enum values for SanitizationWarningReason const ( - SanitizationWarningReasonDisallowed_element_removed SanitizationWarningReason = "DISALLOWED_ELEMENT_REMOVED" - SanitizationWarningReasonDisallowed_attribute_removed SanitizationWarningReason = "DISALLOWED_ATTRIBUTE_REMOVED" - SanitizationWarningReasonInvalid_attribute_value_removed SanitizationWarningReason = "INVALID_ATTRIBUTE_VALUE_REMOVED" + SanitizationWarningReasonDisallowedElementRemoved SanitizationWarningReason = "DISALLOWED_ELEMENT_REMOVED" + SanitizationWarningReasonDisallowedAttributeRemoved SanitizationWarningReason = "DISALLOWED_ATTRIBUTE_REMOVED" + SanitizationWarningReasonInvalidAttributeValueRemoved SanitizationWarningReason = "INVALID_ATTRIBUTE_VALUE_REMOVED" ) // Values returns all known values for SanitizationWarningReason. Note that this diff --git a/service/mturk/api_op_ApproveAssignment.go b/service/mturk/api_op_ApproveAssignment.go index aa3dd84a66a..e77f443a400 100644 --- a/service/mturk/api_op_ApproveAssignment.go +++ b/service/mturk/api_op_ApproveAssignment.go @@ -14,20 +14,20 @@ import ( // Approving an assignment initiates two payments from the Requester's Amazon.com // account // -// * The Worker who submitted the results is paid the reward specified -// in the HIT. +// * The Worker who submitted the results is paid the reward specified in +// the HIT. // -// * Amazon Mechanical Turk fees are debited. +// * Amazon Mechanical Turk fees are debited. // -// If the Requester's -// account does not have adequate funds for these payments, the call to -// ApproveAssignment returns an exception, and the approval is not processed. You -// can include an optional feedback message with the approval, which the Worker can -// see in the Status section of the web site. You can also call this operation for -// assignments that were previous rejected and approve them by explicitly -// overriding the previous rejection. This only works on rejected assignments that -// were submitted within the previous 30 days and only if the assignment's related -// HIT has not been deleted. +// If the Requester's account +// does not have adequate funds for these payments, the call to ApproveAssignment +// returns an exception, and the approval is not processed. You can include an +// optional feedback message with the approval, which the Worker can see in the +// Status section of the web site. You can also call this operation for assignments +// that were previous rejected and approve them by explicitly overriding the +// previous rejection. This only works on rejected assignments that were submitted +// within the previous 30 days and only if the assignment's related HIT has not +// been deleted. func (c *Client) ApproveAssignment(ctx context.Context, params *ApproveAssignmentInput, optFns ...func(*Options)) (*ApproveAssignmentOutput, error) { if params == nil { params = &ApproveAssignmentInput{} diff --git a/service/mturk/api_op_CreateAdditionalAssignmentsForHIT.go b/service/mturk/api_op_CreateAdditionalAssignmentsForHIT.go index 314b804e292..042ca3cf625 100644 --- a/service/mturk/api_op_CreateAdditionalAssignmentsForHIT.go +++ b/service/mturk/api_op_CreateAdditionalAssignmentsForHIT.go @@ -14,16 +14,16 @@ import ( // assignments of an existing HIT. To extend the maximum number of assignments, // specify the number of additional assignments. // -// * HITs created with fewer -// than 10 assignments cannot be extended to have 10 or more assignments. -// Attempting to add assignments in a way that brings the total number of -// assignments for a HIT from fewer than 10 assignments to 10 or more assignments -// will result in an AWS.MechanicalTurk.InvalidMaximumAssignmentsIncrease -// exception. +// * HITs created with fewer than 10 +// assignments cannot be extended to have 10 or more assignments. Attempting to add +// assignments in a way that brings the total number of assignments for a HIT from +// fewer than 10 assignments to 10 or more assignments will result in an +// AWS.MechanicalTurk.InvalidMaximumAssignmentsIncrease exception. // -// * HITs that were created before July 22, 2015 cannot be -// extended. Attempting to extend HITs that were created before July 22, 2015 will -// result in an AWS.MechanicalTurk.HITTooOldForExtension exception. +// * HITs that +// were created before July 22, 2015 cannot be extended. Attempting to extend HITs +// that were created before July 22, 2015 will result in an +// AWS.MechanicalTurk.HITTooOldForExtension exception. func (c *Client) CreateAdditionalAssignmentsForHIT(ctx context.Context, params *CreateAdditionalAssignmentsForHITInput, optFns ...func(*Options)) (*CreateAdditionalAssignmentsForHITOutput, error) { if params == nil { params = &CreateAdditionalAssignmentsForHITInput{} diff --git a/service/mturk/api_op_DeleteHIT.go b/service/mturk/api_op_DeleteHIT.go index 9be1c849973..2289e36a85e 100644 --- a/service/mturk/api_op_DeleteHIT.go +++ b/service/mturk/api_op_DeleteHIT.go @@ -19,17 +19,17 @@ import ( // submitted assignments already approved or rejected, the service will return an // error. // -// * HITs are automatically disposed of after 120 days. +// * HITs are automatically disposed of after 120 days. // -// * After -// you dispose of a HIT, you can no longer approve the HIT's rejected -// assignments. +// * After you +// dispose of a HIT, you can no longer approve the HIT's rejected assignments. // -// * Disposed HITs are not returned in results for the ListHITs -// operation. +// * +// Disposed HITs are not returned in results for the ListHITs operation. // -// * Disposing HITs can improve the performance of operations such -// as ListReviewableHITs and ListHITs. +// * +// Disposing HITs can improve the performance of operations such as +// ListReviewableHITs and ListHITs. func (c *Client) DeleteHIT(ctx context.Context, params *DeleteHITInput, optFns ...func(*Options)) (*DeleteHITOutput, error) { if params == nil { params = &DeleteHITInput{} diff --git a/service/mturk/api_op_UpdateHITReviewStatus.go b/service/mturk/api_op_UpdateHITReviewStatus.go index 8312a3e94b9..374e1ba7484 100644 --- a/service/mturk/api_op_UpdateHITReviewStatus.go +++ b/service/mturk/api_op_UpdateHITReviewStatus.go @@ -37,11 +37,11 @@ type UpdateHITReviewStatusInput struct { // Specifies how to update the HIT status. Default is False. // - // * Setting this to + // * Setting this to // false will only transition a HIT from Reviewable to Reviewing // - // * Setting - // this to true will only transition a HIT from Reviewing to Reviewable + // * Setting this to + // true will only transition a HIT from Reviewing to Reviewable Revert *bool } diff --git a/service/mturk/types/types.go b/service/mturk/types/types.go index 428eaa2eee0..3ed2266f383 100644 --- a/service/mturk/types/types.go +++ b/service/mturk/types/types.go @@ -206,13 +206,13 @@ type NotificationSpecification struct { // The target for notification messages. The Destination’s format is determined by // the specified Transport: // - // * When Transport is Email, the Destination is your + // * When Transport is Email, the Destination is your // email address. // - // * When Transport is SQS, the Destination is your queue - // URL. + // * When Transport is SQS, the Destination is your queue URL. // - // * When Transport is SNS, the Destination is the ARN of your topic. + // * + // When Transport is SNS, the Destination is the ARN of your topic. // // This member is required. Destination *string diff --git a/service/neptune/api_op_AddSourceIdentifierToSubscription.go b/service/neptune/api_op_AddSourceIdentifierToSubscription.go index 1633a59ab3e..715b621fe1f 100644 --- a/service/neptune/api_op_AddSourceIdentifierToSubscription.go +++ b/service/neptune/api_op_AddSourceIdentifierToSubscription.go @@ -31,18 +31,18 @@ type AddSourceIdentifierToSubscriptionInput struct { // The identifier of the event source to be added. Constraints: // - // * If the - // source type is a DB instance, then a DBInstanceIdentifier must be supplied. + // * If the source + // type is a DB instance, then a DBInstanceIdentifier must be supplied. // + // * If the + // source type is a DB security group, a DBSecurityGroupName must be supplied. // - // * If the source type is a DB security group, a DBSecurityGroupName must be + // * + // If the source type is a DB parameter group, a DBParameterGroupName must be // supplied. // - // * If the source type is a DB parameter group, a - // DBParameterGroupName must be supplied. - // - // * If the source type is a DB - // snapshot, a DBSnapshotIdentifier must be supplied. + // * If the source type is a DB snapshot, a DBSnapshotIdentifier must be + // supplied. // // This member is required. SourceIdentifier *string diff --git a/service/neptune/api_op_ApplyPendingMaintenanceAction.go b/service/neptune/api_op_ApplyPendingMaintenanceAction.go index 0970e7aa937..e326c7f9dd3 100644 --- a/service/neptune/api_op_ApplyPendingMaintenanceAction.go +++ b/service/neptune/api_op_ApplyPendingMaintenanceAction.go @@ -39,15 +39,14 @@ type ApplyPendingMaintenanceActionInput struct { // A value that specifies the type of opt-in request, or undoes an opt-in request. // An opt-in request of type immediate can't be undone. Valid values: // - // * - // immediate - Apply the maintenance action immediately. + // * immediate + // - Apply the maintenance action immediately. // - // * next-maintenance - - // Apply the maintenance action during the next maintenance window for the - // resource. + // * next-maintenance - Apply the + // maintenance action during the next maintenance window for the resource. // - // * undo-opt-in - Cancel any existing next-maintenance opt-in - // requests. + // * + // undo-opt-in - Cancel any existing next-maintenance opt-in requests. // // This member is required. OptInType *string diff --git a/service/neptune/api_op_CopyDBClusterParameterGroup.go b/service/neptune/api_op_CopyDBClusterParameterGroup.go index 94e0a048715..301a19e73a0 100644 --- a/service/neptune/api_op_CopyDBClusterParameterGroup.go +++ b/service/neptune/api_op_CopyDBClusterParameterGroup.go @@ -35,16 +35,15 @@ type CopyDBClusterParameterGroupInput struct { // (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // Constraints: // - // * Must specify a valid DB cluster parameter group. + // * Must specify a valid DB cluster parameter group. // - // * If - // the source DB cluster parameter group is in the same AWS Region as the copy, - // specify a valid DB parameter group identifier, for example - // my-db-cluster-param-group, or a valid ARN. + // * If the + // source DB cluster parameter group is in the same AWS Region as the copy, specify + // a valid DB parameter group identifier, for example my-db-cluster-param-group, or + // a valid ARN. // - // * If the source DB parameter - // group is in a different AWS Region than the copy, specify a valid DB cluster - // parameter group ARN, for example + // * If the source DB parameter group is in a different AWS Region + // than the copy, specify a valid DB cluster parameter group ARN, for example // arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1. // // This member is required. @@ -57,19 +56,18 @@ type CopyDBClusterParameterGroupInput struct { // The identifier for the copied DB cluster parameter group. Constraints: // - // * - // Cannot be null, empty, or blank + // * Cannot + // be null, empty, or blank // - // * Must contain from 1 to 255 letters, - // numbers, or hyphens + // * Must contain from 1 to 255 letters, numbers, or + // hyphens // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end - // with a hyphen or contain two consecutive hyphens + // * Cannot end with a hyphen or + // contain two consecutive hyphens // - // Example: - // my-cluster-param-group1 + // Example: my-cluster-param-group1 // // This member is required. TargetDBClusterParameterGroupIdentifier *string diff --git a/service/neptune/api_op_CopyDBClusterSnapshot.go b/service/neptune/api_op_CopyDBClusterSnapshot.go index 194f72ca4f0..c1190563cc0 100644 --- a/service/neptune/api_op_CopyDBClusterSnapshot.go +++ b/service/neptune/api_op_CopyDBClusterSnapshot.go @@ -34,11 +34,11 @@ type CopyDBClusterSnapshotInput struct { // The identifier of the DB cluster snapshot to copy. This parameter is not // case-sensitive. You can't copy from one AWS Region to another. Constraints: // + // * + // Must specify a valid system snapshot in the "available" state. // - // * Must specify a valid system snapshot in the "available" state. - // - // * Specify - // a valid DB snapshot identifier. + // * Specify a + // valid DB snapshot identifier. // // Example: my-cluster-snapshot1 // @@ -48,13 +48,13 @@ type CopyDBClusterSnapshotInput struct { // The identifier of the new DB cluster snapshot to create from the source DB // cluster snapshot. This parameter is not case-sensitive. Constraints: // - // * Must + // * Must // contain from 1 to 63 letters, numbers, or hyphens. // - // * First character must - // be a letter. + // * First character must be a + // letter. // - // * Cannot end with a hyphen or contain two consecutive + // * Cannot end with a hyphen or contain two consecutive // hyphens. // // Example: my-cluster-snapshot2 diff --git a/service/neptune/api_op_CopyDBParameterGroup.go b/service/neptune/api_op_CopyDBParameterGroup.go index 8006578b38c..abf43efdd62 100644 --- a/service/neptune/api_op_CopyDBParameterGroup.go +++ b/service/neptune/api_op_CopyDBParameterGroup.go @@ -34,11 +34,10 @@ type CopyDBParameterGroupInput struct { // (https://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing). // Constraints: // - // * Must specify a valid DB parameter group. + // * Must specify a valid DB parameter group. // - // * Must specify - // a valid DB parameter group identifier, for example my-db-param-group, or a valid - // ARN. + // * Must specify a valid + // DB parameter group identifier, for example my-db-param-group, or a valid ARN. // // This member is required. SourceDBParameterGroupIdentifier *string @@ -50,16 +49,16 @@ type CopyDBParameterGroupInput struct { // The identifier for the copied DB parameter group. Constraints: // - // * Cannot be + // * Cannot be // null, empty, or blank. // - // * Must contain from 1 to 255 letters, numbers, or + // * Must contain from 1 to 255 letters, numbers, or // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot end with a - // hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // // Example: my-db-parameter-group // diff --git a/service/neptune/api_op_CreateDBCluster.go b/service/neptune/api_op_CreateDBCluster.go index 24e44feac2e..902e5047187 100644 --- a/service/neptune/api_op_CreateDBCluster.go +++ b/service/neptune/api_op_CreateDBCluster.go @@ -38,13 +38,13 @@ type CreateDBClusterInput struct { // The DB cluster identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * First + // character must be a letter. // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain - // two consecutive hyphens. + // * Cannot end with a hyphen or contain two + // consecutive hyphens. // // Example: my-cluster1 // @@ -64,7 +64,7 @@ type CreateDBClusterInput struct { // The number of days for which automated backups are retained. You must specify a // minimum value of 1. Default: 1 Constraints: // - // * Must be a value from 1 to 35 + // * Must be a value from 1 to 35 BackupRetentionPeriod *int32 // (Not supported by Neptune) @@ -73,8 +73,8 @@ type CreateDBClusterInput struct { // The name of the DB cluster parameter group to associate with this DB cluster. If // this argument is omitted, the default is used. Constraints: // - // * If supplied, - // must match the name of an existing DBClusterParameterGroup. + // * If supplied, must + // match the name of an existing DBClusterParameterGroup. DBClusterParameterGroupName *string // A DB subnet group to associate with this DB cluster. Constraints: Must match the @@ -109,11 +109,11 @@ type CreateDBClusterInput struct { // of the ARN for the KMS encryption key. If an encryption key is not specified in // KmsKeyId: // - // * If ReplicationSourceIdentifier identifies an encrypted source, - // then Amazon Neptune will use the encryption key used to encrypt the source. + // * If ReplicationSourceIdentifier identifies an encrypted source, then + // Amazon Neptune will use the encryption key used to encrypt the source. // Otherwise, Amazon Neptune will use your default encryption key. // - // * If the + // * If the // StorageEncrypted parameter is true and ReplicationSourceIdentifier is not // specified, then Amazon Neptune will use your default encryption key. // @@ -132,13 +132,13 @@ type CreateDBClusterInput struct { // The name of the master user for the DB cluster. Constraints: // - // * Must be 1 to - // 16 letters or numbers. + // * Must be 1 to 16 + // letters or numbers. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot be - // a reserved word for the chosen database engine. + // * Cannot be a reserved + // word for the chosen database engine. MasterUsername *string // (Not supported by Neptune) @@ -159,16 +159,15 @@ type CreateDBClusterInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) // in the Amazon Neptune User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/neptune/api_op_CreateDBClusterParameterGroup.go b/service/neptune/api_op_CreateDBClusterParameterGroup.go index 7568d1d1412..f5f3c758234 100644 --- a/service/neptune/api_op_CreateDBClusterParameterGroup.go +++ b/service/neptune/api_op_CreateDBClusterParameterGroup.go @@ -50,11 +50,11 @@ type CreateDBClusterParameterGroupInput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must match the - // name of an existing DBClusterParameterGroup. + // * Must match the name + // of an existing DBClusterParameterGroup. // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. // // This member is required. DBClusterParameterGroupName *string diff --git a/service/neptune/api_op_CreateDBClusterSnapshot.go b/service/neptune/api_op_CreateDBClusterSnapshot.go index a32553a7790..a410c5e7e3f 100644 --- a/service/neptune/api_op_CreateDBClusterSnapshot.go +++ b/service/neptune/api_op_CreateDBClusterSnapshot.go @@ -32,7 +32,7 @@ type CreateDBClusterSnapshotInput struct { // The identifier of the DB cluster to create a snapshot for. This parameter is not // case-sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. // // Example: my-cluster1 @@ -43,16 +43,15 @@ type CreateDBClusterSnapshotInput struct { // The identifier of the DB cluster snapshot. This parameter is stored as a // lowercase string. Constraints: // - // * Must contain from 1 to 63 letters, - // numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot end - // with a hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // - // Example: - // my-cluster1-snapshot1 + // Example: my-cluster1-snapshot1 // // This member is required. DBClusterSnapshotIdentifier *string diff --git a/service/neptune/api_op_CreateDBInstance.go b/service/neptune/api_op_CreateDBInstance.go index 7f664aacc13..63af4b41481 100644 --- a/service/neptune/api_op_CreateDBInstance.go +++ b/service/neptune/api_op_CreateDBInstance.go @@ -38,13 +38,13 @@ type CreateDBInstanceInput struct { // The DB instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * First + // character must be a letter. // - // * First character must be a letter. - // - // * Cannot end with a hyphen or contain - // two consecutive hyphens. + // * Cannot end with a hyphen or contain two + // consecutive hyphens. // // Example: mydbinstance // @@ -78,10 +78,10 @@ type CreateDBInstanceInput struct { // retention period for automated backups is managed by the DB cluster. For more // information, see CreateDBCluster. Default: 1 Constraints: // - // * Must be a value + // * Must be a value // from 0 to 35 // - // * Cannot be set to 0 if the DB instance is a source to Read + // * Cannot be set to 0 if the DB instance is a source to Read // Replicas BackupRetentionPeriod *int32 @@ -103,12 +103,12 @@ type CreateDBInstanceInput struct { // argument is omitted, the default DBParameterGroup for the specified engine is // used. Constraints: // - // * Must be 1 to 255 letters, numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, or hyphens. // - // * - // First character must be a letter + // * First + // character must be a letter // - // * Cannot end with a hyphen or contain two + // * Cannot end with a hyphen or contain two // consecutive hyphens DBParameterGroupName *string diff --git a/service/neptune/api_op_CreateDBParameterGroup.go b/service/neptune/api_op_CreateDBParameterGroup.go index 0eee90ee25d..3a94464823a 100644 --- a/service/neptune/api_op_CreateDBParameterGroup.go +++ b/service/neptune/api_op_CreateDBParameterGroup.go @@ -55,16 +55,16 @@ type CreateDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * Must be 1 to 255 - // letters, numbers, or hyphens. + // * Must be 1 to 255 letters, + // numbers, or hyphens. // - // * First character must be a letter + // * First character must be a letter // - // * - // Cannot end with a hyphen or contain two consecutive hyphens + // * Cannot end with a + // hyphen or contain two consecutive hyphens // - // This value is - // stored as a lowercase string. + // This value is stored as a lowercase + // string. // // This member is required. DBParameterGroupName *string diff --git a/service/neptune/api_op_CreateEventSubscription.go b/service/neptune/api_op_CreateEventSubscription.go index ae8a726b28a..18d7ea8dbaa 100644 --- a/service/neptune/api_op_CreateEventSubscription.go +++ b/service/neptune/api_op_CreateEventSubscription.go @@ -70,21 +70,20 @@ type CreateEventSubscriptionInput struct { // begin with a letter and must contain only ASCII letters, digits, and hyphens; it // can't end with a hyphen or contain two consecutive hyphens. Constraints: // - // * - // If SourceIds are supplied, SourceType must also be provided. + // * If + // SourceIds are supplied, SourceType must also be provided. // - // * If the - // source type is a DB instance, then a DBInstanceIdentifier must be supplied. + // * If the source type + // is a DB instance, then a DBInstanceIdentifier must be supplied. // + // * If the source + // type is a DB security group, a DBSecurityGroupName must be supplied. // - // * If the source type is a DB security group, a DBSecurityGroupName must be - // supplied. + // * If the + // source type is a DB parameter group, a DBParameterGroupName must be supplied. // - // * If the source type is a DB parameter group, a - // DBParameterGroupName must be supplied. - // - // * If the source type is a DB - // snapshot, a DBSnapshotIdentifier must be supplied. + // * + // If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied. SourceIds []*string // The type of source that is generating the events. For example, if you want to be diff --git a/service/neptune/api_op_DeleteDBCluster.go b/service/neptune/api_op_DeleteDBCluster.go index 8b0e2a1d261..d2445e19c85 100644 --- a/service/neptune/api_op_DeleteDBCluster.go +++ b/service/neptune/api_op_DeleteDBCluster.go @@ -37,7 +37,7 @@ type DeleteDBClusterInput struct { // The DB cluster identifier for the DB cluster to be deleted. This parameter isn't // case-sensitive. Constraints: // - // * Must match an existing DBClusterIdentifier. + // * Must match an existing DBClusterIdentifier. // // This member is required. DBClusterIdentifier *string @@ -46,13 +46,13 @@ type DeleteDBClusterInput struct { // SkipFinalSnapshot is set to false. Specifying this parameter and also setting // the SkipFinalShapshot parameter to true results in an error. Constraints: // - // * + // * // Must be 1 to 255 letters, numbers, or hyphens. // - // * First character must be a + // * First character must be a // letter // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * Cannot end with a hyphen or contain two consecutive hyphens FinalDBSnapshotIdentifier *string // Determines whether a final DB cluster snapshot is created before the DB cluster diff --git a/service/neptune/api_op_DeleteDBClusterParameterGroup.go b/service/neptune/api_op_DeleteDBClusterParameterGroup.go index a5abbee9396..e34b1309f4a 100644 --- a/service/neptune/api_op_DeleteDBClusterParameterGroup.go +++ b/service/neptune/api_op_DeleteDBClusterParameterGroup.go @@ -31,13 +31,13 @@ type DeleteDBClusterParameterGroupInput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must be the name - // of an existing DB cluster parameter group. + // * Must be the name of + // an existing DB cluster parameter group. // - // * You can't delete a default DB - // cluster parameter group. + // * You can't delete a default DB cluster + // parameter group. // - // * Cannot be associated with any DB clusters. + // * Cannot be associated with any DB clusters. // // This member is required. DBClusterParameterGroupName *string diff --git a/service/neptune/api_op_DeleteDBInstance.go b/service/neptune/api_op_DeleteDBInstance.go index d05c9fd84bd..6b6d6b87f51 100644 --- a/service/neptune/api_op_DeleteDBInstance.go +++ b/service/neptune/api_op_DeleteDBInstance.go @@ -43,7 +43,7 @@ type DeleteDBInstanceInput struct { // The DB instance identifier for the DB instance to be deleted. This parameter // isn't case-sensitive. Constraints: // - // * Must match the name of an existing DB + // * Must match the name of an existing DB // instance. // // This member is required. @@ -53,16 +53,16 @@ type DeleteDBInstanceInput struct { // set to false. Specifying this parameter and also setting the SkipFinalShapshot // parameter to true results in an error. Constraints: // - // * Must be 1 to 255 - // letters or numbers. + // * Must be 1 to 255 letters + // or numbers. // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end - // with a hyphen or contain two consecutive hyphens + // * Cannot end with a hyphen or + // contain two consecutive hyphens // - // * Cannot be specified when - // deleting a Read Replica. + // * Cannot be specified when deleting a Read + // Replica. FinalDBSnapshotIdentifier *string // Determines whether a final DB snapshot is created before the DB instance is diff --git a/service/neptune/api_op_DeleteDBParameterGroup.go b/service/neptune/api_op_DeleteDBParameterGroup.go index 6fc33e02608..35aa9317fb7 100644 --- a/service/neptune/api_op_DeleteDBParameterGroup.go +++ b/service/neptune/api_op_DeleteDBParameterGroup.go @@ -31,13 +31,13 @@ type DeleteDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * Must be the name of an + // * Must be the name of an // existing DB parameter group // - // * You can't delete a default DB parameter - // group + // * You can't delete a default DB parameter group // - // * Cannot be associated with any DB instances + // * + // Cannot be associated with any DB instances // // This member is required. DBParameterGroupName *string diff --git a/service/neptune/api_op_DescribeDBClusterParameterGroups.go b/service/neptune/api_op_DescribeDBClusterParameterGroups.go index dfaefc1f226..d169e34b41a 100644 --- a/service/neptune/api_op_DescribeDBClusterParameterGroups.go +++ b/service/neptune/api_op_DescribeDBClusterParameterGroups.go @@ -34,7 +34,7 @@ type DescribeDBClusterParameterGroupsInput struct { // The name of a specific DB cluster parameter group to return details for. // Constraints: // - // * If supplied, must match the name of an existing + // * If supplied, must match the name of an existing // DBClusterParameterGroup. DBClusterParameterGroupName *string diff --git a/service/neptune/api_op_DescribeDBClusterParameters.go b/service/neptune/api_op_DescribeDBClusterParameters.go index e7cd13da425..410240d0e7a 100644 --- a/service/neptune/api_op_DescribeDBClusterParameters.go +++ b/service/neptune/api_op_DescribeDBClusterParameters.go @@ -32,7 +32,7 @@ type DescribeDBClusterParametersInput struct { // The name of a specific DB cluster parameter group to return parameter details // for. Constraints: // - // * If supplied, must match the name of an existing + // * If supplied, must match the name of an existing // DBClusterParameterGroup. // // This member is required. diff --git a/service/neptune/api_op_DescribeDBClusterSnapshots.go b/service/neptune/api_op_DescribeDBClusterSnapshots.go index 8629c56565e..99de3fa913f 100644 --- a/service/neptune/api_op_DescribeDBClusterSnapshots.go +++ b/service/neptune/api_op_DescribeDBClusterSnapshots.go @@ -34,19 +34,19 @@ type DescribeDBClusterSnapshotsInput struct { // parameter can't be used in conjunction with the DBClusterSnapshotIdentifier // parameter. This parameter is not case-sensitive. Constraints: // - // * If - // supplied, must match the identifier of an existing DBCluster. + // * If supplied, + // must match the identifier of an existing DBCluster. DBClusterIdentifier *string // A specific DB cluster snapshot identifier to describe. This parameter can't be // used in conjunction with the DBClusterIdentifier parameter. This value is stored // as a lowercase string. Constraints: // - // * If supplied, must match the - // identifier of an existing DBClusterSnapshot. + // * If supplied, must match the identifier of + // an existing DBClusterSnapshot. // - // * If this identifier is for an - // automated snapshot, the SnapshotType parameter must also be specified. + // * If this identifier is for an automated + // snapshot, the SnapshotType parameter must also be specified. DBClusterSnapshotIdentifier *string // This parameter is not currently supported. @@ -79,27 +79,27 @@ type DescribeDBClusterSnapshotsInput struct { // The type of DB cluster snapshots to be returned. You can specify one of the // following values: // - // * automated - Return all DB cluster snapshots that have - // been automatically taken by Amazon Neptune for my AWS account. + // * automated - Return all DB cluster snapshots that have been + // automatically taken by Amazon Neptune for my AWS account. // - // * manual - - // Return all DB cluster snapshots that have been taken by my AWS account. + // * manual - Return all + // DB cluster snapshots that have been taken by my AWS account. // - // * - // shared - Return all manual DB cluster snapshots that have been shared to my AWS - // account. + // * shared - Return + // all manual DB cluster snapshots that have been shared to my AWS account. // - // * public - Return all DB cluster snapshots that have been marked - // as public. + // * + // public - Return all DB cluster snapshots that have been marked as public. // - // If you don't specify a SnapshotType value, then both automated and - // manual DB cluster snapshots are returned. You can include shared DB cluster - // snapshots with these results by setting the IncludeShared parameter to true. You - // can include public DB cluster snapshots with these results by setting the - // IncludePublic parameter to true. The IncludeShared and IncludePublic parameters - // don't apply for SnapshotType values of manual or automated. The IncludePublic - // parameter doesn't apply when SnapshotType is set to shared. The IncludeShared - // parameter doesn't apply when SnapshotType is set to public. + // If + // you don't specify a SnapshotType value, then both automated and manual DB + // cluster snapshots are returned. You can include shared DB cluster snapshots with + // these results by setting the IncludeShared parameter to true. You can include + // public DB cluster snapshots with these results by setting the IncludePublic + // parameter to true. The IncludeShared and IncludePublic parameters don't apply + // for SnapshotType values of manual or automated. The IncludePublic parameter + // doesn't apply when SnapshotType is set to shared. The IncludeShared parameter + // doesn't apply when SnapshotType is set to public. SnapshotType *string } diff --git a/service/neptune/api_op_DescribeDBClusters.go b/service/neptune/api_op_DescribeDBClusters.go index 0a16f7f198b..54457185d84 100644 --- a/service/neptune/api_op_DescribeDBClusters.go +++ b/service/neptune/api_op_DescribeDBClusters.go @@ -35,24 +35,23 @@ type DescribeDBClustersInput struct { // information from only the specific DB cluster is returned. This parameter isn't // case-sensitive. Constraints: // - // * If supplied, must match an existing + // * If supplied, must match an existing // DBClusterIdentifier. DBClusterIdentifier *string // A filter that specifies one or more DB clusters to describe. Supported // filters: // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster - // Amazon Resource Names (ARNs). The results list will only include information - // about the DB clusters identified by these ARNs. + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). The results list will only include information about the + // DB clusters identified by these ARNs. // - // * engine - Accepts an - // engine name (such as neptune), and restricts the results list to DB clusters - // created by that engine. + // * engine - Accepts an engine name (such + // as neptune), and restricts the results list to DB clusters created by that + // engine. // - // For example, to invoke this API from the AWS CLI and - // filter so that only Neptune DB clusters are returned, you could use the - // following command: + // For example, to invoke this API from the AWS CLI and filter so that + // only Neptune DB clusters are returned, you could use the following command: Filters []*types.Filter // An optional pagination token provided by a previous DescribeDBClusters request. diff --git a/service/neptune/api_op_DescribeDBEngineVersions.go b/service/neptune/api_op_DescribeDBEngineVersions.go index ef3985dfa6c..e1e250f0211 100644 --- a/service/neptune/api_op_DescribeDBEngineVersions.go +++ b/service/neptune/api_op_DescribeDBEngineVersions.go @@ -32,7 +32,7 @@ type DescribeDBEngineVersionsInput struct { // The name of a specific DB parameter group family to return details for. // Constraints: // - // * If supplied, must match an existing DBParameterGroupFamily. + // * If supplied, must match an existing DBParameterGroupFamily. DBParameterGroupFamily *string // Indicates that only the default version of the specified engine or engine and diff --git a/service/neptune/api_op_DescribeDBInstances.go b/service/neptune/api_op_DescribeDBInstances.go index faa1ff841bd..0dd778625c0 100644 --- a/service/neptune/api_op_DescribeDBInstances.go +++ b/service/neptune/api_op_DescribeDBInstances.go @@ -35,24 +35,24 @@ type DescribeDBInstancesInput struct { // information from only the specific DB instance is returned. This parameter isn't // case-sensitive. Constraints: // - // * If supplied, must match the identifier of an + // * If supplied, must match the identifier of an // existing DBInstance. DBInstanceIdentifier *string // A filter that specifies one or more DB instances to describe. Supported // filters: // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster - // Amazon Resource Names (ARNs). The results list will only include information - // about the DB instances associated with the DB clusters identified by these - // ARNs. + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). The results list will only include information about the + // DB instances associated with the DB clusters identified by these ARNs. // - // * engine - Accepts an engine name (such as neptune), and restricts - // the results list to DB instances created by that engine. + // * engine + // - Accepts an engine name (such as neptune), and restricts the results list to DB + // instances created by that engine. // - // For example, to invoke - // this API from the AWS CLI and filter so that only Neptune DB instances are - // returned, you could use the following command: + // For example, to invoke this API from the AWS + // CLI and filter so that only Neptune DB instances are returned, you could use the + // following command: Filters []*types.Filter // An optional pagination token provided by a previous DescribeDBInstances request. diff --git a/service/neptune/api_op_DescribeDBParameterGroups.go b/service/neptune/api_op_DescribeDBParameterGroups.go index b5cd6f7c877..409560ccb9e 100644 --- a/service/neptune/api_op_DescribeDBParameterGroups.go +++ b/service/neptune/api_op_DescribeDBParameterGroups.go @@ -33,8 +33,8 @@ type DescribeDBParameterGroupsInput struct { // The name of a specific DB parameter group to return details for. Constraints: // - // - // * If supplied, must match the name of an existing DBClusterParameterGroup. + // * + // If supplied, must match the name of an existing DBClusterParameterGroup. DBParameterGroupName *string // This parameter is not currently supported. diff --git a/service/neptune/api_op_DescribeDBParameters.go b/service/neptune/api_op_DescribeDBParameters.go index b5a5b2bd8e7..5269d8f5ab0 100644 --- a/service/neptune/api_op_DescribeDBParameters.go +++ b/service/neptune/api_op_DescribeDBParameters.go @@ -31,8 +31,8 @@ type DescribeDBParametersInput struct { // The name of a specific DB parameter group to return details for. Constraints: // - // - // * If supplied, must match the name of an existing DBParameterGroup. + // * + // If supplied, must match the name of an existing DBParameterGroup. // // This member is required. DBParameterGroupName *string diff --git a/service/neptune/api_op_DescribeEvents.go b/service/neptune/api_op_DescribeEvents.go index e08381660c4..10ca054019b 100644 --- a/service/neptune/api_op_DescribeEvents.go +++ b/service/neptune/api_op_DescribeEvents.go @@ -63,23 +63,23 @@ type DescribeEventsInput struct { // The identifier of the event source for which events are returned. If not // specified, then all sources are included in the response. Constraints: // - // * If + // * If // SourceIdentifier is supplied, SourceType must also be provided. // - // * If the - // source type is DBInstance, then a DBInstanceIdentifier must be supplied. + // * If the source + // type is DBInstance, then a DBInstanceIdentifier must be supplied. // - // * - // If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. + // * If the + // source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. // + // * If + // the source type is DBParameterGroup, a DBParameterGroupName must be supplied. // - // * If the source type is DBParameterGroup, a DBParameterGroupName must be - // supplied. + // * + // If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. // - // * If the source type is DBSnapshot, a DBSnapshotIdentifier must - // be supplied. - // - // * Cannot end with a hyphen or contain two consecutive hyphens. + // * + // Cannot end with a hyphen or contain two consecutive hyphens. SourceIdentifier *string // The event source to retrieve events for. If no value is specified, all events diff --git a/service/neptune/api_op_DescribePendingMaintenanceActions.go b/service/neptune/api_op_DescribePendingMaintenanceActions.go index bf1da6f47da..7551420f9f1 100644 --- a/service/neptune/api_op_DescribePendingMaintenanceActions.go +++ b/service/neptune/api_op_DescribePendingMaintenanceActions.go @@ -33,13 +33,13 @@ type DescribePendingMaintenanceActionsInput struct { // A filter that specifies one or more resources to return pending maintenance // actions for. Supported filters: // - // * db-cluster-id - Accepts DB cluster + // * db-cluster-id - Accepts DB cluster // identifiers and DB cluster Amazon Resource Names (ARNs). The results list will // only include pending maintenance actions for the DB clusters identified by these // ARNs. // - // * db-instance-id - Accepts DB instance identifiers and DB instance - // ARNs. The results list will only include pending maintenance actions for the DB + // * db-instance-id - Accepts DB instance identifiers and DB instance ARNs. + // The results list will only include pending maintenance actions for the DB // instances identified by these ARNs. Filters []*types.Filter diff --git a/service/neptune/api_op_FailoverDBCluster.go b/service/neptune/api_op_FailoverDBCluster.go index e9461bb99ac..6349004007a 100644 --- a/service/neptune/api_op_FailoverDBCluster.go +++ b/service/neptune/api_op_FailoverDBCluster.go @@ -39,7 +39,7 @@ type FailoverDBClusterInput struct { // A DB cluster identifier to force a failover for. This parameter is not // case-sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. DBClusterIdentifier *string diff --git a/service/neptune/api_op_ModifyDBCluster.go b/service/neptune/api_op_ModifyDBCluster.go index 255560e3a42..8bda3c115f8 100644 --- a/service/neptune/api_op_ModifyDBCluster.go +++ b/service/neptune/api_op_ModifyDBCluster.go @@ -34,7 +34,7 @@ type ModifyDBClusterInput struct { // The DB cluster identifier for the cluster being modified. This parameter is not // case-sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. // // This member is required. @@ -55,7 +55,7 @@ type ModifyDBClusterInput struct { // The number of days for which automated backups are retained. You must specify a // minimum value of 1. Default: 1 Constraints: // - // * Must be a value from 1 to 35 + // * Must be a value from 1 to 35 BackupRetentionPeriod *int32 // The configuration setting for the log types to be enabled for export to @@ -88,16 +88,16 @@ type ModifyDBClusterInput struct { // The new DB cluster identifier for the DB cluster when renaming a DB cluster. // This value is stored as a lowercase string. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from 1 + // to 63 letters, numbers, or hyphens // - // * The first character must be a - // letter + // * The first character must be a letter // - // * Cannot end with a hyphen or contain two consecutive - // hyphens + // * + // Cannot end with a hyphen or contain two consecutive hyphens // - // Example: my-cluster2 + // Example: + // my-cluster2 NewDBClusterIdentifier *string // (Not supported by Neptune) @@ -112,15 +112,15 @@ type ModifyDBClusterInput struct { // 30-minute window selected at random from an 8-hour block of time for each AWS // Region. Constraints: // - // * Must be in the format hh24:mi-hh24:mi. + // * Must be in the format hh24:mi-hh24:mi. // - // * Must - // be in Universal Coordinated Time (UTC). + // * Must be in + // Universal Coordinated Time (UTC). // - // * Must not conflict with the - // preferred maintenance window. + // * Must not conflict with the preferred + // maintenance window. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/neptune/api_op_ModifyDBClusterParameterGroup.go b/service/neptune/api_op_ModifyDBClusterParameterGroup.go index 0262fb96e7c..ba441a6555e 100644 --- a/service/neptune/api_op_ModifyDBClusterParameterGroup.go +++ b/service/neptune/api_op_ModifyDBClusterParameterGroup.go @@ -59,16 +59,16 @@ type ModifyDBClusterParameterGroupOutput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must be 1 to 255 + // * Must be 1 to 255 // letters or numbers. // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end - // with a hyphen or contain two consecutive hyphens + // * Cannot end with a + // hyphen or contain two consecutive hyphens // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. DBClusterParameterGroupName *string // Metadata pertaining to the operation's result. diff --git a/service/neptune/api_op_ModifyDBInstance.go b/service/neptune/api_op_ModifyDBInstance.go index 5eabfbe7583..34323e8ec46 100644 --- a/service/neptune/api_op_ModifyDBInstance.go +++ b/service/neptune/api_op_ModifyDBInstance.go @@ -35,7 +35,7 @@ type ModifyDBInstanceInput struct { // The DB instance identifier. This value is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string @@ -108,7 +108,7 @@ type ModifyDBInstanceInput struct { // setting doesn't result in an outage and the change is asynchronously applied as // soon as possible. Constraints: // - // * If supplied, must match existing + // * If supplied, must match existing // DBSecurityGroups. DBSecurityGroups []*string @@ -184,13 +184,13 @@ type ModifyDBInstanceInput struct { // maintenance window if Apply Immediately to false. This value is stored as a // lowercase string. Constraints: // - // * Must contain from 1 to 63 letters, - // numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or + // hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * Cannot - // end with a hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // // Example: mydbinstance NewDBInstanceIdentifier *string @@ -206,15 +206,15 @@ type ModifyDBInstanceInput struct { // backups is managed by the DB cluster. For more information, see ModifyDBCluster. // Constraints: // - // * Must be in the format hh24:mi-hh24:mi + // * Must be in the format hh24:mi-hh24:mi // - // * Must be in - // Universal Time Coordinated (UTC) + // * Must be in Universal + // Time Coordinated (UTC) // - // * Must not conflict with the preferred - // maintenance window + // * Must not conflict with the preferred maintenance + // window // - // * Must be at least 30 minutes + // * Must be at least 30 minutes PreferredBackupWindow *string // The weekly time range (in UTC) during which system maintenance can occur, which @@ -252,7 +252,7 @@ type ModifyDBInstanceInput struct { // list of EC2 VPC security groups is managed by the DB cluster. For more // information, see ModifyDBCluster. Constraints: // - // * If supplied, must match + // * If supplied, must match // existing VpcSecurityGroupIds. VpcSecurityGroupIds []*string } diff --git a/service/neptune/api_op_ModifyDBParameterGroup.go b/service/neptune/api_op_ModifyDBParameterGroup.go index 1a7f8c226b2..f34715c36b7 100644 --- a/service/neptune/api_op_ModifyDBParameterGroup.go +++ b/service/neptune/api_op_ModifyDBParameterGroup.go @@ -45,8 +45,8 @@ type ModifyDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * If supplied, must match - // the name of an existing DBParameterGroup. + // * If supplied, must match the + // name of an existing DBParameterGroup. // // This member is required. DBParameterGroupName *string diff --git a/service/neptune/api_op_RebootDBInstance.go b/service/neptune/api_op_RebootDBInstance.go index 4b68ac52a91..727e74ec20e 100644 --- a/service/neptune/api_op_RebootDBInstance.go +++ b/service/neptune/api_op_RebootDBInstance.go @@ -37,7 +37,7 @@ type RebootDBInstanceInput struct { // The DB instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string diff --git a/service/neptune/api_op_ResetDBClusterParameterGroup.go b/service/neptune/api_op_ResetDBClusterParameterGroup.go index 0e6e76e69a3..69680c94463 100644 --- a/service/neptune/api_op_ResetDBClusterParameterGroup.go +++ b/service/neptune/api_op_ResetDBClusterParameterGroup.go @@ -57,16 +57,16 @@ type ResetDBClusterParameterGroupOutput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must be 1 to 255 + // * Must be 1 to 255 // letters or numbers. // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end - // with a hyphen or contain two consecutive hyphens + // * Cannot end with a + // hyphen or contain two consecutive hyphens // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. DBClusterParameterGroupName *string // Metadata pertaining to the operation's result. diff --git a/service/neptune/api_op_ResetDBParameterGroup.go b/service/neptune/api_op_ResetDBParameterGroup.go index 5d4b9881c6c..4f437b19a4b 100644 --- a/service/neptune/api_op_ResetDBParameterGroup.go +++ b/service/neptune/api_op_ResetDBParameterGroup.go @@ -37,8 +37,8 @@ type ResetDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * Must match the name of - // an existing DBParameterGroup. + // * Must match the name of an + // existing DBParameterGroup. // // This member is required. DBParameterGroupName *string diff --git a/service/neptune/api_op_RestoreDBClusterFromSnapshot.go b/service/neptune/api_op_RestoreDBClusterFromSnapshot.go index c1a6b408421..b2c7db89bca 100644 --- a/service/neptune/api_op_RestoreDBClusterFromSnapshot.go +++ b/service/neptune/api_op_RestoreDBClusterFromSnapshot.go @@ -38,16 +38,16 @@ type RestoreDBClusterFromSnapshotInput struct { // The name of the DB cluster to create from the DB snapshot or DB cluster // snapshot. This parameter isn't case-sensitive. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from + // 1 to 63 letters, numbers, or hyphens // - // * First character must be a - // letter + // * First character must be a letter // - // * Cannot end with a hyphen or contain two consecutive - // hyphens + // * + // Cannot end with a hyphen or contain two consecutive hyphens // - // Example: my-snapshot-id + // Example: + // my-snapshot-id // // This member is required. DBClusterIdentifier *string @@ -63,7 +63,7 @@ type RestoreDBClusterFromSnapshotInput struct { // cluster snapshot. However, you can use only the ARN to specify a DB snapshot. // Constraints: // - // * Must match the identifier of an existing Snapshot. + // * Must match the identifier of an existing Snapshot. // // This member is required. SnapshotIdentifier *string @@ -75,7 +75,7 @@ type RestoreDBClusterFromSnapshotInput struct { // The name of the DB cluster parameter group to associate with the new DB cluster. // Constraints: // - // * If supplied, must match the name of an existing + // * If supplied, must match the name of an existing // DBClusterParameterGroup. DBClusterParameterGroupName *string @@ -111,12 +111,12 @@ type RestoreDBClusterFromSnapshotInput struct { // ARN for the KMS encryption key. If you do not specify a value for the KmsKeyId // parameter, then the following will occur: // - // * If the DB snapshot or DB - // cluster snapshot in SnapshotIdentifier is encrypted, then the restored DB - // cluster is encrypted using the KMS key that was used to encrypt the DB snapshot - // or DB cluster snapshot. + // * If the DB snapshot or DB cluster + // snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is + // encrypted using the KMS key that was used to encrypt the DB snapshot or DB + // cluster snapshot. // - // * If the DB snapshot or DB cluster snapshot in + // * If the DB snapshot or DB cluster snapshot in // SnapshotIdentifier is not encrypted, then the restored DB cluster is not // encrypted. KmsKeyId *string diff --git a/service/neptune/api_op_RestoreDBClusterToPointInTime.go b/service/neptune/api_op_RestoreDBClusterToPointInTime.go index 1ce625fcf38..c1f0bce3f13 100644 --- a/service/neptune/api_op_RestoreDBClusterToPointInTime.go +++ b/service/neptune/api_op_RestoreDBClusterToPointInTime.go @@ -41,21 +41,21 @@ type RestoreDBClusterToPointInTimeInput struct { // The name of the new DB cluster to be created. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from + // 1 to 63 letters, numbers, or hyphens // - // * First character must be a - // letter + // * First character must be a letter // - // * Cannot end with a hyphen or contain two consecutive hyphens + // * + // Cannot end with a hyphen or contain two consecutive hyphens // // This member is required. DBClusterIdentifier *string // The identifier of the source DB cluster from which to restore. Constraints: // - // - // * Must match the identifier of an existing DBCluster. + // * + // Must match the identifier of an existing DBCluster. // // This member is required. SourceDBClusterIdentifier *string @@ -63,7 +63,7 @@ type RestoreDBClusterToPointInTimeInput struct { // The name of the DB cluster parameter group to associate with the new DB cluster. // Constraints: // - // * If supplied, must match the name of an existing + // * If supplied, must match the name of an existing // DBClusterParameterGroup. DBClusterParameterGroupName *string @@ -95,15 +95,15 @@ type RestoreDBClusterToPointInTimeInput struct { // KmsKeyId parameter. If you do not specify a value for the KmsKeyId parameter, // then the following will occur: // - // * If the DB cluster is encrypted, then the + // * If the DB cluster is encrypted, then the // restored DB cluster is encrypted using the KMS key that was used to encrypt the // source DB cluster. // - // * If the DB cluster is not encrypted, then the restored - // DB cluster is not encrypted. + // * If the DB cluster is not encrypted, then the restored DB + // cluster is not encrypted. // - // If DBClusterIdentifier refers to a DB cluster that - // is not encrypted, then the restore request is rejected. + // If DBClusterIdentifier refers to a DB cluster that is + // not encrypted, then the restore request is rejected. KmsKeyId *string // (Not supported by Neptune) @@ -116,17 +116,17 @@ type RestoreDBClusterToPointInTimeInput struct { // The date and time to restore the DB cluster to. Valid Values: Value must be a // time in Universal Coordinated Time (UTC) format Constraints: // - // * Must be - // before the latest restorable time for the DB instance + // * Must be before + // the latest restorable time for the DB instance // - // * Must be specified - // if UseLatestRestorableTime parameter is not provided + // * Must be specified if + // UseLatestRestorableTime parameter is not provided // - // * Cannot be specified - // if UseLatestRestorableTime parameter is true + // * Cannot be specified if + // UseLatestRestorableTime parameter is true // - // * Cannot be specified if - // RestoreType parameter is copy-on-write + // * Cannot be specified if RestoreType + // parameter is copy-on-write // // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time @@ -134,14 +134,14 @@ type RestoreDBClusterToPointInTimeInput struct { // The type of restore to be performed. You can specify one of the following // values: // - // * full-copy - The new DB cluster is restored as a full copy of the + // * full-copy - The new DB cluster is restored as a full copy of the // source DB cluster. // - // * copy-on-write - The new DB cluster is restored as a - // clone of the source DB cluster. + // * copy-on-write - The new DB cluster is restored as a clone + // of the source DB cluster. // - // If you don't specify a RestoreType value, then - // the new DB cluster is restored as a full copy of the source DB cluster. + // If you don't specify a RestoreType value, then the + // new DB cluster is restored as a full copy of the source DB cluster. RestoreType *string // The tags to be applied to the restored DB cluster. diff --git a/service/neptune/types/types.go b/service/neptune/types/types.go index e29cb64251d..7e82a587eb5 100644 --- a/service/neptune/types/types.go +++ b/service/neptune/types/types.go @@ -239,16 +239,16 @@ type DBClusterRole struct { // Describes the state of association between the IAM role and the DB cluster. The // Status property returns one of the following values: // - // * ACTIVE - the IAM - // role ARN is associated with the DB cluster and can be used to access other AWS + // * ACTIVE - the IAM role + // ARN is associated with the DB cluster and can be used to access other AWS // services on your behalf. // - // * PENDING - the IAM role ARN is being associated - // with the DB cluster. + // * PENDING - the IAM role ARN is being associated with + // the DB cluster. // - // * INVALID - the IAM role ARN is associated with the DB - // cluster, but the DB cluster is unable to assume the IAM role in order to access - // other AWS services on your behalf. + // * INVALID - the IAM role ARN is associated with the DB cluster, + // but the DB cluster is unable to assume the IAM role in order to access other AWS + // services on your behalf. Status *string } @@ -646,14 +646,14 @@ type DBParameterGroup struct { // The status of the DB parameter group. This data type is used as a response // element in the following actions: // -// * CreateDBInstance +// * CreateDBInstance // -// * -// DeleteDBInstance +// * DeleteDBInstance // -// * ModifyDBInstance +// * +// ModifyDBInstance // -// * RebootDBInstance +// * RebootDBInstance type DBParameterGroupStatus struct { // The name of the DP parameter group. diff --git a/service/networkmanager/api_op_CreateSite.go b/service/networkmanager/api_op_CreateSite.go index a3ef1a56af8..8b8e95ee5f7 100644 --- a/service/networkmanager/api_op_CreateSite.go +++ b/service/networkmanager/api_op_CreateSite.go @@ -42,13 +42,12 @@ type CreateSiteInput struct { // Manager console. If you specify the address, the latitude and longitude are // automatically calculated. // - // * Address: The physical address of the site. + // * Address: The physical address of the site. // + // * + // Latitude: The latitude of the site. // - // * Latitude: The latitude of the site. - // - // * Longitude: The longitude of the - // site. + // * Longitude: The longitude of the site. Location *types.Location // The tags to apply to the resource during creation. diff --git a/service/networkmanager/api_op_UpdateSite.go b/service/networkmanager/api_op_UpdateSite.go index 3093299f31f..54f8fc2f7c3 100644 --- a/service/networkmanager/api_op_UpdateSite.go +++ b/service/networkmanager/api_op_UpdateSite.go @@ -46,12 +46,12 @@ type UpdateSiteInput struct { // The site location: // - // * Address: The physical address of the site. + // * Address: The physical address of the site. // - // * - // Latitude: The latitude of the site. + // * Latitude: + // The latitude of the site. // - // * Longitude: The longitude of the site. + // * Longitude: The longitude of the site. Location *types.Location } diff --git a/service/networkmanager/types/enums.go b/service/networkmanager/types/enums.go index f804c57f756..22b5586a280 100644 --- a/service/networkmanager/types/enums.go +++ b/service/networkmanager/types/enums.go @@ -164,10 +164,10 @@ type ValidationExceptionReason string // Enum values for ValidationExceptionReason const ( - ValidationExceptionReasonUnknown_operation ValidationExceptionReason = "UnknownOperation" - ValidationExceptionReasonCannot_parse ValidationExceptionReason = "CannotParse" - ValidationExceptionReasonField_validation_failed ValidationExceptionReason = "FieldValidationFailed" - ValidationExceptionReasonOther ValidationExceptionReason = "Other" + ValidationExceptionReasonUnknownOperation ValidationExceptionReason = "UnknownOperation" + ValidationExceptionReasonCannotParse ValidationExceptionReason = "CannotParse" + ValidationExceptionReasonFieldValidationFailed ValidationExceptionReason = "FieldValidationFailed" + ValidationExceptionReasonOther ValidationExceptionReason = "Other" ) // Values returns all known values for ValidationExceptionReason. Note that this diff --git a/service/opsworks/api_op_AssignInstance.go b/service/opsworks/api_op_AssignInstance.go index 53462e3388f..f8a0e2b4469 100644 --- a/service/opsworks/api_op_AssignInstance.go +++ b/service/opsworks/api_op_AssignInstance.go @@ -12,19 +12,19 @@ import ( // Assign a registered instance to a layer. // -// * You can assign registered +// * You can assign registered // on-premises instances to any layer type. // -// * You can assign registered Amazon -// EC2 instances only to custom layers. +// * You can assign registered Amazon EC2 +// instances only to custom layers. // -// * You cannot use this action with -// instances that were created with AWS OpsWorks Stacks. +// * You cannot use this action with instances +// that were created with AWS OpsWorks Stacks. // -// Required Permissions: To -// use this action, an AWS Identity and Access Management (IAM) user must have a -// Manage permissions level for the stack or an attached policy that explicitly -// grants permissions. For more information on user permissions, see Managing User +// Required Permissions: To use this +// action, an AWS Identity and Access Management (IAM) user must have a Manage +// permissions level for the stack or an attached policy that explicitly grants +// permissions. For more information on user permissions, see Managing User // Permissions // (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *Client) AssignInstance(ctx context.Context, params *AssignInstanceInput, optFns ...func(*Options)) (*AssignInstanceOutput, error) { diff --git a/service/opsworks/api_op_CloneStack.go b/service/opsworks/api_op_CloneStack.go index d6cff9c4411..08b6836f1ed 100644 --- a/service/opsworks/api_op_CloneStack.go +++ b/service/opsworks/api_op_CloneStack.go @@ -54,24 +54,24 @@ type CloneStackInput struct { // This member is required. SourceStackId *string - // The default AWS OpsWorks Stacks agent version. You have the following options: - // - // - // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically - // installs new agent versions on the stack's instances as soon as they are - // available. - // - // * Fixed version - Set this parameter to your preferred agent - // version. To update the agent version, you must edit the stack configuration and - // specify a new version. AWS OpsWorks Stacks then automatically installs that - // version on the stack's instances. - // - // The default setting is LATEST. To specify an - // agent version, you must use the complete version number, not the abbreviated - // number shown on the console. For a list of available agent version numbers, call - // DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also - // specify an agent version when you create or update an instance, which overrides - // the stack's default setting. + // The default AWS OpsWorks Stacks agent version. You have the following + // options: + // + // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks + // automatically installs new agent versions on the stack's instances as soon as + // they are available. + // + // * Fixed version - Set this parameter to your preferred + // agent version. To update the agent version, you must edit the stack + // configuration and specify a new version. AWS OpsWorks Stacks then automatically + // installs that version on the stack's instances. + // + // The default setting is LATEST. + // To specify an agent version, you must use the complete version number, not the + // abbreviated number shown on the console. For a list of available agent version + // numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. AgentVersion *string // A list of stack attributes and values as key/value pairs to be added to the @@ -126,28 +126,27 @@ type CloneStackInput struct { // The stack's operating system, which must be set to one of the following. // - // * - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, + // * A + // supported Linux operating system: An Amazon Linux version, such as Amazon Linux + // 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. // - // * A + // * A // supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, // or Ubuntu 12.04 LTS. // - // * CentOS Linux 7 + // * CentOS Linux 7 // - // * Red Hat Enterprise Linux 7 + // * Red Hat Enterprise Linux 7 // + // * + // Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL + // Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or + // Microsoft Windows Server 2012 R2 with SQL Server Web. // - // * Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with - // SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, - // or Microsoft Windows Server 2012 R2 with SQL Server Web. - // - // * A custom AMI: - // Custom. You specify the custom AMI you want to use when you create instances. - // For more information about how to use custom AMIs with OpsWorks, see Using - // Custom AMIs + // * A custom AMI: Custom. + // You specify the custom AMI you want to use when you create instances. For more + // information about how to use custom AMIs with OpsWorks, see Using Custom AMIs // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The @@ -189,33 +188,33 @@ type CloneStackInput struct { // HostnameTheme is set to Layer_Dependent, which creates host names by appending // integers to the layer's short name. The other themes are: // - // * Baked_Goods - // - // - // * Clouds - // - // * Europe_Cities + // * Baked_Goods // - // * Fruits + // * + // Clouds // - // * Greek_Deities_and_Titans + // * Europe_Cities // + // * Fruits // - // * Legendary_creatures_from_Japan + // * Greek_Deities_and_Titans // - // * Planets_and_Moons + // * + // Legendary_creatures_from_Japan // - // * Roman_Deities + // * Planets_and_Moons // + // * Roman_Deities // - // * Scottish_Islands + // * + // Scottish_Islands // - // * US_Cities + // * US_Cities // - // * Wild_Cats + // * Wild_Cats // - // To obtain a generated host - // name, call GetHostNameSuggestion, which returns a host name based on the current + // To obtain a generated host name, + // call GetHostNameSuggestion, which returns a host name based on the current // theme. HostnameTheme *string @@ -236,13 +235,13 @@ type CloneStackInput struct { // UseOpsworksSecurityGroups you can instead provide your own custom security // groups. UseOpsworksSecurityGroups has the following settings: // - // * True - AWS + // * True - AWS // OpsWorks Stacks automatically associates the appropriate built-in security group // with each layer (default setting). You can associate additional security groups // with a layer after you create it but you cannot delete the built-in security // group. // - // * False - AWS OpsWorks Stacks does not associate built-in security + // * False - AWS OpsWorks Stacks does not associate built-in security // groups with layers. You must create appropriate Amazon Elastic Compute Cloud // (Amazon EC2) security groups and associate a security group with each layer that // you create. However, you can still manually associate a built-in security group @@ -257,29 +256,28 @@ type CloneStackInput struct { // the specified region. All instances are launched into this VPC, and you cannot // change the ID later. // - // * If your account supports EC2 Classic, the default - // value is no VPC. + // * If your account supports EC2 Classic, the default value + // is no VPC. // - // * If your account does not support EC2 Classic, the - // default value is the default VPC for the specified region. + // * If your account does not support EC2 Classic, the default value is + // the default VPC for the specified region. // - // If the VPC ID - // corresponds to a default VPC and you have specified either the - // DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks - // Stacks infers the value of the other parameter. If you specify neither - // parameter, AWS OpsWorks Stacks sets these parameters to the first valid - // Availability Zone for the specified region and the corresponding default VPC - // subnet ID, respectively. If you specify a nondefault VPC ID, note the - // following: + // If the VPC ID corresponds to a + // default VPC and you have specified either the DefaultAvailabilityZone or the + // DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the + // other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets + // these parameters to the first valid Availability Zone for the specified region + // and the corresponding default VPC subnet ID, respectively. If you specify a + // nondefault VPC ID, note the following: // - // * It must belong to a VPC in your account that is in the - // specified region. + // * It must belong to a VPC in your + // account that is in the specified region. // - // * You must specify a value for DefaultSubnetId. + // * You must specify a value for + // DefaultSubnetId. // - // For more - // information about how to use AWS OpsWorks Stacks with a VPC, see Running a Stack - // in a VPC + // For more information about how to use AWS OpsWorks Stacks with + // a VPC, see Running a Stack in a VPC // (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information about default VPC and EC2 Classic, see Supported Platforms // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). diff --git a/service/opsworks/api_op_CreateInstance.go b/service/opsworks/api_op_CreateInstance.go index 22e1de385b3..4df38eefb2a 100644 --- a/service/opsworks/api_op_CreateInstance.go +++ b/service/opsworks/api_op_CreateInstance.go @@ -57,21 +57,21 @@ type CreateInstanceInput struct { // This member is required. StackId *string - // The default AWS OpsWorks Stacks agent version. You have the following options: - // + // The default AWS OpsWorks Stacks agent version. You have the following + // options: // // * INHERIT - Use the stack's default agent version setting. // - // * version_number - // - Use the specified agent version. This value overrides the stack's default - // setting. To update the agent version, edit the instance configuration and - // specify a new version. AWS OpsWorks Stacks then automatically installs that - // version on the instance. + // * + // version_number - Use the specified agent version. This value overrides the + // stack's default setting. To update the agent version, edit the instance + // configuration and specify a new version. AWS OpsWorks Stacks then automatically + // installs that version on the instance. // - // The default setting is INHERIT. To specify an agent - // version, you must use the complete version number, not the abbreviated number - // shown on the console. For a list of available agent version numbers, call - // DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + // The default setting is INHERIT. To + // specify an agent version, you must use the complete version number, not the + // abbreviated number shown on the console. For a list of available agent version + // numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. AgentVersion *string // A custom AMI ID to be used to create the instance. The AMI should be based on @@ -118,26 +118,26 @@ type CreateInstanceInput struct { // The instance's operating system, which must be set to one of the following. // - // - // * A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, + // * A + // supported Linux operating system: An Amazon Linux version, such as Amazon Linux + // 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. // - // * A + // * A // supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, // or Ubuntu 12.04 LTS. // - // * CentOS Linux 7 - // - // * Red Hat Enterprise Linux 7 + // * CentOS Linux 7 // + // * Red Hat Enterprise Linux 7 // - // * A supported Windows operating system, such as Microsoft Windows Server 2012 R2 + // * A + // supported Windows operating system, such as Microsoft Windows Server 2012 R2 // Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server // 2012 R2 with SQL Server Web. // - // * A custom AMI: Custom. + // * A custom AMI: Custom. // // For more information // about the supported operating systems, see AWS OpsWorks Stacks Operating Systems diff --git a/service/opsworks/api_op_CreateStack.go b/service/opsworks/api_op_CreateStack.go index c5f5fca4121..86969cf571c 100644 --- a/service/opsworks/api_op_CreateStack.go +++ b/service/opsworks/api_op_CreateStack.go @@ -77,24 +77,25 @@ type CreateStackInput struct { // This member is required. ServiceRoleArn *string - // The default AWS OpsWorks Stacks agent version. You have the following options: - // - // - // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically - // installs new agent versions on the stack's instances as soon as they are - // available. - // - // * Fixed version - Set this parameter to your preferred agent - // version. To update the agent version, you must edit the stack configuration and - // specify a new version. AWS OpsWorks Stacks then automatically installs that - // version on the stack's instances. - // - // The default setting is the most recent - // release of the agent. To specify an agent version, you must use the complete - // version number, not the abbreviated number shown on the console. For a list of - // available agent version numbers, call DescribeAgentVersions. AgentVersion cannot - // be set to Chef 12.2. You can also specify an agent version when you create or - // update an instance, which overrides the stack's default setting. + // The default AWS OpsWorks Stacks agent version. You have the following + // options: + // + // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks + // automatically installs new agent versions on the stack's instances as soon as + // they are available. + // + // * Fixed version - Set this parameter to your preferred + // agent version. To update the agent version, you must edit the stack + // configuration and specify a new version. AWS OpsWorks Stacks then automatically + // installs that version on the stack's instances. + // + // The default setting is the most + // recent release of the agent. To specify an agent version, you must use the + // complete version number, not the abbreviated number shown on the console. For a + // list of available agent version numbers, call DescribeAgentVersions. + // AgentVersion cannot be set to Chef 12.2. You can also specify an agent version + // when you create or update an instance, which overrides the stack's default + // setting. AgentVersion *string // One or more user-defined key-value pairs to be added to the stack attributes. @@ -138,27 +139,27 @@ type CreateStackInput struct { // unless you specify a different operating system when you create the instance. // You can specify one of the following. // - // * A supported Linux operating system: - // An Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, - // Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux - // 2015.09, or Amazon Linux 2015.03. + // * A supported Linux operating system: An + // Amazon Linux version, such as Amazon Linux 2018.03, Amazon Linux 2017.09, Amazon + // Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, Amazon Linux 2015.09, + // or Amazon Linux 2015.03. // - // * A supported Ubuntu operating system, - // such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. + // * A supported Ubuntu operating system, such as Ubuntu + // 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS - // Linux 7 + // * CentOS Linux 7 // - // * Red Hat Enterprise Linux 7 + // * Red Hat + // Enterprise Linux 7 // - // * A supported Windows operating - // system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server - // 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL - // Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. + // * A supported Windows operating system, such as Microsoft + // Windows Server 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server + // Express, Microsoft Windows Server 2012 R2 with SQL Server Standard, or Microsoft + // Windows Server 2012 R2 with SQL Server Web. // - // * - // A custom AMI: Custom. You specify the custom AMI you want to use when you create - // instances. For more information, see Using Custom AMIs + // * A custom AMI: Custom. You specify + // the custom AMI you want to use when you create instances. For more information, + // see Using Custom AMIs // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The @@ -198,33 +199,33 @@ type CreateStackInput struct { // is set to Layer_Dependent, which creates host names by appending integers to the // layer's short name. The other themes are: // - // * Baked_Goods - // - // * Clouds + // * Baked_Goods // + // * Clouds // - // * Europe_Cities + // * + // Europe_Cities // - // * Fruits + // * Fruits // - // * Greek_Deities_and_Titans + // * Greek_Deities_and_Titans // - // * + // * // Legendary_creatures_from_Japan // - // * Planets_and_Moons - // - // * Roman_Deities + // * Planets_and_Moons // + // * Roman_Deities // - // * Scottish_Islands + // * + // Scottish_Islands // - // * US_Cities + // * US_Cities // - // * Wild_Cats + // * Wild_Cats // - // To obtain a generated host - // name, call GetHostNameSuggestion, which returns a host name based on the current + // To obtain a generated host name, + // call GetHostNameSuggestion, which returns a host name based on the current // theme. HostnameTheme *string @@ -237,13 +238,13 @@ type CreateStackInput struct { // UseOpsworksSecurityGroups you can instead provide your own custom security // groups. UseOpsworksSecurityGroups has the following settings: // - // * True - AWS + // * True - AWS // OpsWorks Stacks automatically associates the appropriate built-in security group // with each layer (default setting). You can associate additional security groups // with a layer after you create it, but you cannot delete the built-in security // group. // - // * False - AWS OpsWorks Stacks does not associate built-in security + // * False - AWS OpsWorks Stacks does not associate built-in security // groups with layers. You must create appropriate EC2 security groups and // associate a security group with each layer that you create. However, you can // still manually associate a built-in security group with a layer on creation; @@ -258,24 +259,24 @@ type CreateStackInput struct { // stack's region. All instances are launched into this VPC. You cannot change the // ID later. // - // * If your account supports EC2-Classic, the default value is no + // * If your account supports EC2-Classic, the default value is no // VPC. // - // * If your account does not support EC2-Classic, the default value is - // the default VPC for the specified region. + // * If your account does not support EC2-Classic, the default value is the + // default VPC for the specified region. // - // If the VPC ID corresponds to a - // default VPC and you have specified either the DefaultAvailabilityZone or the + // If the VPC ID corresponds to a default + // VPC and you have specified either the DefaultAvailabilityZone or the // DefaultSubnetId parameter only, AWS OpsWorks Stacks infers the value of the // other parameter. If you specify neither parameter, AWS OpsWorks Stacks sets // these parameters to the first valid Availability Zone for the specified region // and the corresponding default VPC subnet ID, respectively. If you specify a // nondefault VPC ID, note the following: // - // * It must belong to a VPC in your + // * It must belong to a VPC in your // account that is in the specified region. // - // * You must specify a value for + // * You must specify a value for // DefaultSubnetId. // // For more information about how to use AWS OpsWorks Stacks with diff --git a/service/opsworks/api_op_DescribePermissions.go b/service/opsworks/api_op_DescribePermissions.go index b77d8922f67..ddcea280e82 100644 --- a/service/opsworks/api_op_DescribePermissions.go +++ b/service/opsworks/api_op_DescribePermissions.go @@ -47,16 +47,16 @@ type DescribePermissionsOutput struct { // An array of Permission objects that describe the stack permissions. // - // * If - // the request object contains only a stack ID, the array contains a Permission - // object with permissions for each of the stack IAM ARNs. + // * If the + // request object contains only a stack ID, the array contains a Permission object + // with permissions for each of the stack IAM ARNs. // - // * If the request - // object contains only an IAM ARN, the array contains a Permission object with + // * If the request object + // contains only an IAM ARN, the array contains a Permission object with // permissions for each of the user's stack IDs. // - // * If the request contains a - // stack ID and an IAM ARN, the array contains a single Permission object with + // * If the request contains a stack + // ID and an IAM ARN, the array contains a single Permission object with // permissions for the specified stack and IAM ARN. Permissions []*types.Permission diff --git a/service/opsworks/api_op_SetPermission.go b/service/opsworks/api_op_SetPermission.go index 7ac5c3c5398..72ba9d34b73 100644 --- a/service/opsworks/api_op_SetPermission.go +++ b/service/opsworks/api_op_SetPermission.go @@ -54,19 +54,19 @@ type SetPermissionInput struct { // The user's permission level, which must be set to one of the following strings. // You cannot set your own permissions level. // - // * deny + // * deny // - // * show + // * show // - // * - // deploy + // * deploy // - // * manage + // * + // manage // - // * iam_only + // * iam_only // - // For more information about the permissions - // associated with these levels, see Managing User Permissions + // For more information about the permissions associated with + // these levels, see Managing User Permissions // (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). Level *string } diff --git a/service/opsworks/api_op_TagResource.go b/service/opsworks/api_op_TagResource.go index d0a1835e48c..bbd2e06ef51 100644 --- a/service/opsworks/api_op_TagResource.go +++ b/service/opsworks/api_op_TagResource.go @@ -39,21 +39,21 @@ type TagResourceInput struct { // A map that contains tag keys and tag values that are attached to a stack or // layer. // - // * The key cannot be empty. + // * The key cannot be empty. // - // * The key can be a maximum of 127 + // * The key can be a maximum of 127 // characters, and can contain only Unicode letters, numbers, or separators, or the // following special characters: + - = . _ : / // - // * The value can be a maximum - // 255 characters, and contain only Unicode letters, numbers, or separators, or the + // * The value can be a maximum 255 + // characters, and contain only Unicode letters, numbers, or separators, or the // following special characters: + - = . _ : / // - // * Leading and trailing white - // spaces are trimmed from both the key and value. + // * Leading and trailing white spaces + // are trimmed from both the key and value. // - // * A maximum of 40 tags is - // allowed for any resource. + // * A maximum of 40 tags is allowed for + // any resource. // // This member is required. Tags map[string]*string diff --git a/service/opsworks/api_op_UpdateInstance.go b/service/opsworks/api_op_UpdateInstance.go index da2793c1db7..99cc85725e1 100644 --- a/service/opsworks/api_op_UpdateInstance.go +++ b/service/opsworks/api_op_UpdateInstance.go @@ -38,21 +38,21 @@ type UpdateInstanceInput struct { // This member is required. InstanceId *string - // The default AWS OpsWorks Stacks agent version. You have the following options: - // + // The default AWS OpsWorks Stacks agent version. You have the following + // options: // // * INHERIT - Use the stack's default agent version setting. // - // * version_number - // - Use the specified agent version. This value overrides the stack's default - // setting. To update the agent version, you must edit the instance configuration - // and specify a new version. AWS OpsWorks Stacks then automatically installs that - // version on the instance. + // * + // version_number - Use the specified agent version. This value overrides the + // stack's default setting. To update the agent version, you must edit the instance + // configuration and specify a new version. AWS OpsWorks Stacks then automatically + // installs that version on the instance. // - // The default setting is INHERIT. To specify an agent - // version, you must use the complete version number, not the abbreviated number - // shown on the console. For a list of available agent version numbers, call - // DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + // The default setting is INHERIT. To + // specify an agent version, you must use the complete version number, not the + // abbreviated number shown on the console. For a list of available agent version + // numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. AgentVersion *string // The ID of the AMI that was used to create the instance. The value of this @@ -101,27 +101,27 @@ type UpdateInstanceInput struct { // The instance's operating system, which must be set to one of the following. You // cannot update an instance that is using a custom AMI. // - // * A supported Linux + // * A supported Linux // operating system: An Amazon Linux version, such as Amazon Linux 2018.03, Amazon // Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, Amazon Linux 2016.03, // Amazon Linux 2015.09, or Amazon Linux 2015.03. // - // * A supported Ubuntu - // operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 - // LTS. + // * A supported Ubuntu operating + // system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, or Ubuntu 12.04 LTS. // - // * CentOS Linux 7 + // * + // CentOS Linux 7 // - // * Red Hat Enterprise Linux 7 + // * Red Hat Enterprise Linux 7 // - // * A supported - // Windows operating system, such as Microsoft Windows Server 2012 R2 Base, - // Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft Windows - // Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server 2012 R2 - // with SQL Server Web. + // * A supported Windows operating + // system, such as Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server + // 2012 R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL + // Server Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. // - // For more information about supported operating systems, - // see AWS OpsWorks Stacks Operating Systems + // For + // more information about supported operating systems, see AWS OpsWorks Stacks + // Operating Systems // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // The default option is the current Amazon Linux version. If you set this // parameter to Custom, you must use the AmiId parameter to specify the custom AMI diff --git a/service/opsworks/api_op_UpdateStack.go b/service/opsworks/api_op_UpdateStack.go index 60d98cb3c33..4de88e28187 100644 --- a/service/opsworks/api_op_UpdateStack.go +++ b/service/opsworks/api_op_UpdateStack.go @@ -38,24 +38,24 @@ type UpdateStackInput struct { // This member is required. StackId *string - // The default AWS OpsWorks Stacks agent version. You have the following options: + // The default AWS OpsWorks Stacks agent version. You have the following + // options: // + // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks + // automatically installs new agent versions on the stack's instances as soon as + // they are available. // - // * Auto-update - Set this parameter to LATEST. AWS OpsWorks Stacks automatically - // installs new agent versions on the stack's instances as soon as they are - // available. + // * Fixed version - Set this parameter to your preferred + // agent version. To update the agent version, you must edit the stack + // configuration and specify a new version. AWS OpsWorks Stacks then automatically + // installs that version on the stack's instances. // - // * Fixed version - Set this parameter to your preferred agent - // version. To update the agent version, you must edit the stack configuration and - // specify a new version. AWS OpsWorks Stacks then automatically installs that - // version on the stack's instances. - // - // The default setting is LATEST. To specify an - // agent version, you must use the complete version number, not the abbreviated - // number shown on the console. For a list of available agent version numbers, call - // DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. You can also - // specify an agent version when you create or update an instance, which overrides - // the stack's default setting. + // The default setting is LATEST. + // To specify an agent version, you must use the complete version number, not the + // abbreviated number shown on the console. For a list of available agent version + // numbers, call DescribeAgentVersions. AgentVersion cannot be set to Chef 12.2. + // You can also specify an agent version when you create or update an instance, + // which overrides the stack's default setting. AgentVersion *string // One or more user-defined key-value pairs to be added to the stack attributes. @@ -102,28 +102,28 @@ type UpdateStackInput struct { // The stack's operating system, which must be set to one of the following: // - // * - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, + // * A + // supported Linux operating system: An Amazon Linux version, such as Amazon Linux + // 2018.03, Amazon Linux 2017.09, Amazon Linux 2017.03, Amazon Linux 2016.09, // Amazon Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. // - // * A + // * A // supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu 14.04 LTS, // or Ubuntu 12.04 LTS. // - // * CentOS Linux 7 - // - // * Red Hat Enterprise Linux 7 + // * CentOS Linux 7 // + // * Red Hat Enterprise Linux 7 // - // * A supported Windows operating system, such as Microsoft Windows Server 2012 R2 + // * A + // supported Windows operating system, such as Microsoft Windows Server 2012 R2 // Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server // 2012 R2 with SQL Server Web. // - // * A custom AMI: Custom. You specify the custom - // AMI you want to use when you create instances. For more information about how to - // use custom AMIs with OpsWorks, see Using Custom AMIs + // * A custom AMI: Custom. You specify the custom AMI + // you want to use when you create instances. For more information about how to use + // custom AMIs with OpsWorks, see Using Custom AMIs // (https://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // // The @@ -163,33 +163,33 @@ type UpdateStackInput struct { // HostnameTheme is set to Layer_Dependent, which creates host names by appending // integers to the layer's short name. The other themes are: // - // * Baked_Goods - // - // - // * Clouds - // - // * Europe_Cities + // * Baked_Goods // - // * Fruits + // * + // Clouds // - // * Greek_Deities_and_Titans + // * Europe_Cities // + // * Fruits // - // * Legendary_creatures_from_Japan + // * Greek_Deities_and_Titans // - // * Planets_and_Moons + // * + // Legendary_creatures_from_Japan // - // * Roman_Deities + // * Planets_and_Moons // + // * Roman_Deities // - // * Scottish_Islands + // * + // Scottish_Islands // - // * US_Cities + // * US_Cities // - // * Wild_Cats + // * Wild_Cats // - // To obtain a generated host - // name, call GetHostNameSuggestion, which returns a host name based on the current + // To obtain a generated host name, + // call GetHostNameSuggestion, which returns a host name based on the current // theme. HostnameTheme *string @@ -209,17 +209,17 @@ type UpdateStackInput struct { // instead of using the built-in groups. UseOpsworksSecurityGroups has the // following settings: // - // * True - AWS OpsWorks Stacks automatically associates - // the appropriate built-in security group with each layer (default setting). You - // can associate additional security groups with a layer after you create it, but - // you cannot delete the built-in security group. + // * True - AWS OpsWorks Stacks automatically associates the + // appropriate built-in security group with each layer (default setting). You can + // associate additional security groups with a layer after you create it, but you + // cannot delete the built-in security group. // - // * False - AWS OpsWorks - // Stacks does not associate built-in security groups with layers. You must create - // appropriate EC2 security groups and associate a security group with each layer - // that you create. However, you can still manually associate a built-in security - // group with a layer on. Custom security groups are required only for those layers - // that need custom settings. + // * False - AWS OpsWorks Stacks does + // not associate built-in security groups with layers. You must create appropriate + // EC2 security groups and associate a security group with each layer that you + // create. However, you can still manually associate a built-in security group with + // a layer on. Custom security groups are required only for those layers that need + // custom settings. // // For more information, see Create a New Stack // (https://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). diff --git a/service/opsworks/doc.go b/service/opsworks/doc.go index edf7e206baa..29ae0b8ccad 100644 --- a/service/opsworks/doc.go +++ b/service/opsworks/doc.go @@ -14,81 +14,79 @@ // SDKs to implement applications in your preferred language. For more information, // see: // -// * AWS CLI +// * AWS CLI // (https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) // -// * -// AWS SDK for Java +// * AWS +// SDK for Java // (https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) // -// -// * AWS SDK for .NET +// * +// AWS SDK for .NET // (https://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) // -// -// * AWS SDK for PHP 2 +// * +// AWS SDK for PHP 2 // (https://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) // +// * +// AWS SDK for Ruby (http://docs.aws.amazon.com/sdkforruby/api/) // -// * AWS SDK for Ruby (http://docs.aws.amazon.com/sdkforruby/api/) -// -// * AWS SDK -// for Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) -// -// * AWS -// SDK for Python(Boto) -// (http://docs.pythonboto.org/en/latest/ref/opsworks.html) +// * AWS SDK for +// Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) // -// Endpoints AWS OpsWorks -// Stacks supports the following endpoints, all HTTPS. You must connect to one of -// the following endpoints. Stacks can only be accessed or managed within the -// endpoint in which they are created. +// * AWS SDK for +// Python(Boto) (http://docs.pythonboto.org/en/latest/ref/opsworks.html) // -// * opsworks.us-east-1.amazonaws.com +// Endpoints +// AWS OpsWorks Stacks supports the following endpoints, all HTTPS. You must +// connect to one of the following endpoints. Stacks can only be accessed or +// managed within the endpoint in which they are created. // +// * +// opsworks.us-east-1.amazonaws.com // // * opsworks.us-east-2.amazonaws.com // -// * opsworks.us-west-1.amazonaws.com -// +// * +// opsworks.us-west-1.amazonaws.com // // * opsworks.us-west-2.amazonaws.com // -// * opsworks.ca-central-1.amazonaws.com -// (API only; not available in the AWS console) -// -// * -// opsworks.eu-west-1.amazonaws.com +// * +// opsworks.ca-central-1.amazonaws.com (API only; not available in the AWS +// console) // -// * opsworks.eu-west-2.amazonaws.com +// * opsworks.eu-west-1.amazonaws.com // -// * -// opsworks.eu-west-3.amazonaws.com +// * +// opsworks.eu-west-2.amazonaws.com // -// * opsworks.eu-central-1.amazonaws.com +// * opsworks.eu-west-3.amazonaws.com // +// * +// opsworks.eu-central-1.amazonaws.com // // * opsworks.ap-northeast-1.amazonaws.com // -// * +// * // opsworks.ap-northeast-2.amazonaws.com // -// * opsworks.ap-south-1.amazonaws.com -// +// * opsworks.ap-south-1.amazonaws.com // -// * opsworks.ap-southeast-1.amazonaws.com +// * +// opsworks.ap-southeast-1.amazonaws.com // -// * +// * // opsworks.ap-southeast-2.amazonaws.com // -// * -// opsworks.sa-east-1.amazonaws.com +// * opsworks.sa-east-1.amazonaws.com // -// Chef Versions When you call CreateStack, -// CloneStack, or UpdateStack we recommend you use the ConfigurationManager -// parameter to specify the Chef version. The recommended and default value for -// Linux stacks is currently 12. Windows stacks use Chef 12.2. For more -// information, see Chef Versions +// Chef +// Versions When you call CreateStack, CloneStack, or UpdateStack we recommend you +// use the ConfigurationManager parameter to specify the Chef version. The +// recommended and default value for Linux stacks is currently 12. Windows stacks +// use Chef 12.2. For more information, see Chef Versions // (https://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). // You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend // migrating your existing Linux stacks to Chef 12 as soon as possible. diff --git a/service/opsworks/types/enums.go b/service/opsworks/types/enums.go index 592de5df4e6..c46c114b334 100644 --- a/service/opsworks/types/enums.go +++ b/service/opsworks/types/enums.go @@ -56,8 +56,8 @@ type Architecture string // Enum values for Architecture const ( - ArchitectureX86_64 Architecture = "x86_64" - ArchitectureI386 Architecture = "i386" + ArchitectureX8664 Architecture = "x86_64" + ArchitectureI386 Architecture = "i386" ) // Values returns all known values for Architecture. Note that this can be expanded @@ -92,98 +92,98 @@ type CloudWatchLogsEncoding string // Enum values for CloudWatchLogsEncoding const ( - CloudWatchLogsEncodingAscii CloudWatchLogsEncoding = "ascii" - CloudWatchLogsEncodingBig5 CloudWatchLogsEncoding = "big5" - CloudWatchLogsEncodingBig5hkscs CloudWatchLogsEncoding = "big5hkscs" - CloudWatchLogsEncodingCp037 CloudWatchLogsEncoding = "cp037" - CloudWatchLogsEncodingCp424 CloudWatchLogsEncoding = "cp424" - CloudWatchLogsEncodingCp437 CloudWatchLogsEncoding = "cp437" - CloudWatchLogsEncodingCp500 CloudWatchLogsEncoding = "cp500" - CloudWatchLogsEncodingCp720 CloudWatchLogsEncoding = "cp720" - CloudWatchLogsEncodingCp737 CloudWatchLogsEncoding = "cp737" - CloudWatchLogsEncodingCp775 CloudWatchLogsEncoding = "cp775" - CloudWatchLogsEncodingCp850 CloudWatchLogsEncoding = "cp850" - CloudWatchLogsEncodingCp852 CloudWatchLogsEncoding = "cp852" - CloudWatchLogsEncodingCp855 CloudWatchLogsEncoding = "cp855" - CloudWatchLogsEncodingCp856 CloudWatchLogsEncoding = "cp856" - CloudWatchLogsEncodingCp857 CloudWatchLogsEncoding = "cp857" - CloudWatchLogsEncodingCp858 CloudWatchLogsEncoding = "cp858" - CloudWatchLogsEncodingCp860 CloudWatchLogsEncoding = "cp860" - CloudWatchLogsEncodingCp861 CloudWatchLogsEncoding = "cp861" - CloudWatchLogsEncodingCp862 CloudWatchLogsEncoding = "cp862" - CloudWatchLogsEncodingCp863 CloudWatchLogsEncoding = "cp863" - CloudWatchLogsEncodingCp864 CloudWatchLogsEncoding = "cp864" - CloudWatchLogsEncodingCp865 CloudWatchLogsEncoding = "cp865" - CloudWatchLogsEncodingCp866 CloudWatchLogsEncoding = "cp866" - CloudWatchLogsEncodingCp869 CloudWatchLogsEncoding = "cp869" - CloudWatchLogsEncodingCp874 CloudWatchLogsEncoding = "cp874" - CloudWatchLogsEncodingCp875 CloudWatchLogsEncoding = "cp875" - CloudWatchLogsEncodingCp932 CloudWatchLogsEncoding = "cp932" - CloudWatchLogsEncodingCp949 CloudWatchLogsEncoding = "cp949" - CloudWatchLogsEncodingCp950 CloudWatchLogsEncoding = "cp950" - CloudWatchLogsEncodingCp1006 CloudWatchLogsEncoding = "cp1006" - CloudWatchLogsEncodingCp1026 CloudWatchLogsEncoding = "cp1026" - CloudWatchLogsEncodingCp1140 CloudWatchLogsEncoding = "cp1140" - CloudWatchLogsEncodingCp1250 CloudWatchLogsEncoding = "cp1250" - CloudWatchLogsEncodingCp1251 CloudWatchLogsEncoding = "cp1251" - CloudWatchLogsEncodingCp1252 CloudWatchLogsEncoding = "cp1252" - CloudWatchLogsEncodingCp1253 CloudWatchLogsEncoding = "cp1253" - CloudWatchLogsEncodingCp1254 CloudWatchLogsEncoding = "cp1254" - CloudWatchLogsEncodingCp1255 CloudWatchLogsEncoding = "cp1255" - CloudWatchLogsEncodingCp1256 CloudWatchLogsEncoding = "cp1256" - CloudWatchLogsEncodingCp1257 CloudWatchLogsEncoding = "cp1257" - CloudWatchLogsEncodingCp1258 CloudWatchLogsEncoding = "cp1258" - CloudWatchLogsEncodingEuc_jp CloudWatchLogsEncoding = "euc_jp" - CloudWatchLogsEncodingEuc_jis_2004 CloudWatchLogsEncoding = "euc_jis_2004" - CloudWatchLogsEncodingEuc_jisx0213 CloudWatchLogsEncoding = "euc_jisx0213" - CloudWatchLogsEncodingEuc_kr CloudWatchLogsEncoding = "euc_kr" - CloudWatchLogsEncodingGb2312 CloudWatchLogsEncoding = "gb2312" - CloudWatchLogsEncodingGbk CloudWatchLogsEncoding = "gbk" - CloudWatchLogsEncodingGb18030 CloudWatchLogsEncoding = "gb18030" - CloudWatchLogsEncodingHz CloudWatchLogsEncoding = "hz" - CloudWatchLogsEncodingIso2022_jp CloudWatchLogsEncoding = "iso2022_jp" - CloudWatchLogsEncodingIso2022_jp_1 CloudWatchLogsEncoding = "iso2022_jp_1" - CloudWatchLogsEncodingIso2022_jp_2 CloudWatchLogsEncoding = "iso2022_jp_2" - CloudWatchLogsEncodingIso2022_jp_2004 CloudWatchLogsEncoding = "iso2022_jp_2004" - CloudWatchLogsEncodingIso2022_jp_3 CloudWatchLogsEncoding = "iso2022_jp_3" - CloudWatchLogsEncodingIso2022_jp_ext CloudWatchLogsEncoding = "iso2022_jp_ext" - CloudWatchLogsEncodingIso2022_kr CloudWatchLogsEncoding = "iso2022_kr" - CloudWatchLogsEncodingLatin_1 CloudWatchLogsEncoding = "latin_1" - CloudWatchLogsEncodingIso8859_2 CloudWatchLogsEncoding = "iso8859_2" - CloudWatchLogsEncodingIso8859_3 CloudWatchLogsEncoding = "iso8859_3" - CloudWatchLogsEncodingIso8859_4 CloudWatchLogsEncoding = "iso8859_4" - CloudWatchLogsEncodingIso8859_5 CloudWatchLogsEncoding = "iso8859_5" - CloudWatchLogsEncodingIso8859_6 CloudWatchLogsEncoding = "iso8859_6" - CloudWatchLogsEncodingIso8859_7 CloudWatchLogsEncoding = "iso8859_7" - CloudWatchLogsEncodingIso8859_8 CloudWatchLogsEncoding = "iso8859_8" - CloudWatchLogsEncodingIso8859_9 CloudWatchLogsEncoding = "iso8859_9" - CloudWatchLogsEncodingIso8859_10 CloudWatchLogsEncoding = "iso8859_10" - CloudWatchLogsEncodingIso8859_13 CloudWatchLogsEncoding = "iso8859_13" - CloudWatchLogsEncodingIso8859_14 CloudWatchLogsEncoding = "iso8859_14" - CloudWatchLogsEncodingIso8859_15 CloudWatchLogsEncoding = "iso8859_15" - CloudWatchLogsEncodingIso8859_16 CloudWatchLogsEncoding = "iso8859_16" - CloudWatchLogsEncodingJohab CloudWatchLogsEncoding = "johab" - CloudWatchLogsEncodingKoi8_r CloudWatchLogsEncoding = "koi8_r" - CloudWatchLogsEncodingKoi8_u CloudWatchLogsEncoding = "koi8_u" - CloudWatchLogsEncodingMac_cyrillic CloudWatchLogsEncoding = "mac_cyrillic" - CloudWatchLogsEncodingMac_greek CloudWatchLogsEncoding = "mac_greek" - CloudWatchLogsEncodingMac_iceland CloudWatchLogsEncoding = "mac_iceland" - CloudWatchLogsEncodingMac_latin2 CloudWatchLogsEncoding = "mac_latin2" - CloudWatchLogsEncodingMac_roman CloudWatchLogsEncoding = "mac_roman" - CloudWatchLogsEncodingMac_turkish CloudWatchLogsEncoding = "mac_turkish" - CloudWatchLogsEncodingPtcp154 CloudWatchLogsEncoding = "ptcp154" - CloudWatchLogsEncodingShift_jis CloudWatchLogsEncoding = "shift_jis" - CloudWatchLogsEncodingShift_jis_2004 CloudWatchLogsEncoding = "shift_jis_2004" - CloudWatchLogsEncodingShift_jisx0213 CloudWatchLogsEncoding = "shift_jisx0213" - CloudWatchLogsEncodingUtf_32 CloudWatchLogsEncoding = "utf_32" - CloudWatchLogsEncodingUtf_32_be CloudWatchLogsEncoding = "utf_32_be" - CloudWatchLogsEncodingUtf_32_le CloudWatchLogsEncoding = "utf_32_le" - CloudWatchLogsEncodingUtf_16 CloudWatchLogsEncoding = "utf_16" - CloudWatchLogsEncodingUtf_16_be CloudWatchLogsEncoding = "utf_16_be" - CloudWatchLogsEncodingUtf_16_le CloudWatchLogsEncoding = "utf_16_le" - CloudWatchLogsEncodingUtf_7 CloudWatchLogsEncoding = "utf_7" - CloudWatchLogsEncodingUtf_8 CloudWatchLogsEncoding = "utf_8" - CloudWatchLogsEncodingUtf_8_sig CloudWatchLogsEncoding = "utf_8_sig" + CloudWatchLogsEncodingAscii CloudWatchLogsEncoding = "ascii" + CloudWatchLogsEncodingBig5 CloudWatchLogsEncoding = "big5" + CloudWatchLogsEncodingBig5hkscs CloudWatchLogsEncoding = "big5hkscs" + CloudWatchLogsEncodingCp037 CloudWatchLogsEncoding = "cp037" + CloudWatchLogsEncodingCp424 CloudWatchLogsEncoding = "cp424" + CloudWatchLogsEncodingCp437 CloudWatchLogsEncoding = "cp437" + CloudWatchLogsEncodingCp500 CloudWatchLogsEncoding = "cp500" + CloudWatchLogsEncodingCp720 CloudWatchLogsEncoding = "cp720" + CloudWatchLogsEncodingCp737 CloudWatchLogsEncoding = "cp737" + CloudWatchLogsEncodingCp775 CloudWatchLogsEncoding = "cp775" + CloudWatchLogsEncodingCp850 CloudWatchLogsEncoding = "cp850" + CloudWatchLogsEncodingCp852 CloudWatchLogsEncoding = "cp852" + CloudWatchLogsEncodingCp855 CloudWatchLogsEncoding = "cp855" + CloudWatchLogsEncodingCp856 CloudWatchLogsEncoding = "cp856" + CloudWatchLogsEncodingCp857 CloudWatchLogsEncoding = "cp857" + CloudWatchLogsEncodingCp858 CloudWatchLogsEncoding = "cp858" + CloudWatchLogsEncodingCp860 CloudWatchLogsEncoding = "cp860" + CloudWatchLogsEncodingCp861 CloudWatchLogsEncoding = "cp861" + CloudWatchLogsEncodingCp862 CloudWatchLogsEncoding = "cp862" + CloudWatchLogsEncodingCp863 CloudWatchLogsEncoding = "cp863" + CloudWatchLogsEncodingCp864 CloudWatchLogsEncoding = "cp864" + CloudWatchLogsEncodingCp865 CloudWatchLogsEncoding = "cp865" + CloudWatchLogsEncodingCp866 CloudWatchLogsEncoding = "cp866" + CloudWatchLogsEncodingCp869 CloudWatchLogsEncoding = "cp869" + CloudWatchLogsEncodingCp874 CloudWatchLogsEncoding = "cp874" + CloudWatchLogsEncodingCp875 CloudWatchLogsEncoding = "cp875" + CloudWatchLogsEncodingCp932 CloudWatchLogsEncoding = "cp932" + CloudWatchLogsEncodingCp949 CloudWatchLogsEncoding = "cp949" + CloudWatchLogsEncodingCp950 CloudWatchLogsEncoding = "cp950" + CloudWatchLogsEncodingCp1006 CloudWatchLogsEncoding = "cp1006" + CloudWatchLogsEncodingCp1026 CloudWatchLogsEncoding = "cp1026" + CloudWatchLogsEncodingCp1140 CloudWatchLogsEncoding = "cp1140" + CloudWatchLogsEncodingCp1250 CloudWatchLogsEncoding = "cp1250" + CloudWatchLogsEncodingCp1251 CloudWatchLogsEncoding = "cp1251" + CloudWatchLogsEncodingCp1252 CloudWatchLogsEncoding = "cp1252" + CloudWatchLogsEncodingCp1253 CloudWatchLogsEncoding = "cp1253" + CloudWatchLogsEncodingCp1254 CloudWatchLogsEncoding = "cp1254" + CloudWatchLogsEncodingCp1255 CloudWatchLogsEncoding = "cp1255" + CloudWatchLogsEncodingCp1256 CloudWatchLogsEncoding = "cp1256" + CloudWatchLogsEncodingCp1257 CloudWatchLogsEncoding = "cp1257" + CloudWatchLogsEncodingCp1258 CloudWatchLogsEncoding = "cp1258" + CloudWatchLogsEncodingEucJp CloudWatchLogsEncoding = "euc_jp" + CloudWatchLogsEncodingEucJis2004 CloudWatchLogsEncoding = "euc_jis_2004" + CloudWatchLogsEncodingEucJisx0213 CloudWatchLogsEncoding = "euc_jisx0213" + CloudWatchLogsEncodingEucKr CloudWatchLogsEncoding = "euc_kr" + CloudWatchLogsEncodingGb2312 CloudWatchLogsEncoding = "gb2312" + CloudWatchLogsEncodingGbk CloudWatchLogsEncoding = "gbk" + CloudWatchLogsEncodingGb18030 CloudWatchLogsEncoding = "gb18030" + CloudWatchLogsEncodingHz CloudWatchLogsEncoding = "hz" + CloudWatchLogsEncodingIso2022Jp CloudWatchLogsEncoding = "iso2022_jp" + CloudWatchLogsEncodingIso2022Jp1 CloudWatchLogsEncoding = "iso2022_jp_1" + CloudWatchLogsEncodingIso2022Jp2 CloudWatchLogsEncoding = "iso2022_jp_2" + CloudWatchLogsEncodingIso2022Jp2004 CloudWatchLogsEncoding = "iso2022_jp_2004" + CloudWatchLogsEncodingIso2022Jp3 CloudWatchLogsEncoding = "iso2022_jp_3" + CloudWatchLogsEncodingIso2022JpExt CloudWatchLogsEncoding = "iso2022_jp_ext" + CloudWatchLogsEncodingIso2022Kr CloudWatchLogsEncoding = "iso2022_kr" + CloudWatchLogsEncodingLatin1 CloudWatchLogsEncoding = "latin_1" + CloudWatchLogsEncodingIso88592 CloudWatchLogsEncoding = "iso8859_2" + CloudWatchLogsEncodingIso88593 CloudWatchLogsEncoding = "iso8859_3" + CloudWatchLogsEncodingIso88594 CloudWatchLogsEncoding = "iso8859_4" + CloudWatchLogsEncodingIso88595 CloudWatchLogsEncoding = "iso8859_5" + CloudWatchLogsEncodingIso88596 CloudWatchLogsEncoding = "iso8859_6" + CloudWatchLogsEncodingIso88597 CloudWatchLogsEncoding = "iso8859_7" + CloudWatchLogsEncodingIso88598 CloudWatchLogsEncoding = "iso8859_8" + CloudWatchLogsEncodingIso88599 CloudWatchLogsEncoding = "iso8859_9" + CloudWatchLogsEncodingIso885910 CloudWatchLogsEncoding = "iso8859_10" + CloudWatchLogsEncodingIso885913 CloudWatchLogsEncoding = "iso8859_13" + CloudWatchLogsEncodingIso885914 CloudWatchLogsEncoding = "iso8859_14" + CloudWatchLogsEncodingIso885915 CloudWatchLogsEncoding = "iso8859_15" + CloudWatchLogsEncodingIso885916 CloudWatchLogsEncoding = "iso8859_16" + CloudWatchLogsEncodingJohab CloudWatchLogsEncoding = "johab" + CloudWatchLogsEncodingKoi8R CloudWatchLogsEncoding = "koi8_r" + CloudWatchLogsEncodingKoi8U CloudWatchLogsEncoding = "koi8_u" + CloudWatchLogsEncodingMacCyrillic CloudWatchLogsEncoding = "mac_cyrillic" + CloudWatchLogsEncodingMacGreek CloudWatchLogsEncoding = "mac_greek" + CloudWatchLogsEncodingMacIceland CloudWatchLogsEncoding = "mac_iceland" + CloudWatchLogsEncodingMacLatin2 CloudWatchLogsEncoding = "mac_latin2" + CloudWatchLogsEncodingMacRoman CloudWatchLogsEncoding = "mac_roman" + CloudWatchLogsEncodingMacTurkish CloudWatchLogsEncoding = "mac_turkish" + CloudWatchLogsEncodingPtcp154 CloudWatchLogsEncoding = "ptcp154" + CloudWatchLogsEncodingShiftJis CloudWatchLogsEncoding = "shift_jis" + CloudWatchLogsEncodingShiftJis2004 CloudWatchLogsEncoding = "shift_jis_2004" + CloudWatchLogsEncodingShiftJisx0213 CloudWatchLogsEncoding = "shift_jisx0213" + CloudWatchLogsEncodingUtf32 CloudWatchLogsEncoding = "utf_32" + CloudWatchLogsEncodingUtf32Be CloudWatchLogsEncoding = "utf_32_be" + CloudWatchLogsEncodingUtf32Le CloudWatchLogsEncoding = "utf_32_le" + CloudWatchLogsEncodingUtf16 CloudWatchLogsEncoding = "utf_16" + CloudWatchLogsEncodingUtf16Be CloudWatchLogsEncoding = "utf_16_be" + CloudWatchLogsEncodingUtf16Le CloudWatchLogsEncoding = "utf_16_le" + CloudWatchLogsEncodingUtf7 CloudWatchLogsEncoding = "utf_7" + CloudWatchLogsEncodingUtf8 CloudWatchLogsEncoding = "utf_8" + CloudWatchLogsEncodingUtf8Sig CloudWatchLogsEncoding = "utf_8_sig" ) // Values returns all known values for CloudWatchLogsEncoding. Note that this can @@ -290,8 +290,8 @@ type CloudWatchLogsInitialPosition string // Enum values for CloudWatchLogsInitialPosition const ( - CloudWatchLogsInitialPositionStart_of_file CloudWatchLogsInitialPosition = "start_of_file" - CloudWatchLogsInitialPositionEnd_of_file CloudWatchLogsInitialPosition = "end_of_file" + CloudWatchLogsInitialPositionStartOfFile CloudWatchLogsInitialPosition = "start_of_file" + CloudWatchLogsInitialPositionEndOfFile CloudWatchLogsInitialPosition = "end_of_file" ) // Values returns all known values for CloudWatchLogsInitialPosition. Note that @@ -327,18 +327,18 @@ type DeploymentCommandName string // Enum values for DeploymentCommandName const ( - DeploymentCommandNameInstall_dependencies DeploymentCommandName = "install_dependencies" - DeploymentCommandNameUpdate_dependencies DeploymentCommandName = "update_dependencies" - DeploymentCommandNameUpdate_custom_cookbooks DeploymentCommandName = "update_custom_cookbooks" - DeploymentCommandNameExecute_recipes DeploymentCommandName = "execute_recipes" - DeploymentCommandNameConfigure DeploymentCommandName = "configure" - DeploymentCommandNameSetup DeploymentCommandName = "setup" - DeploymentCommandNameDeploy DeploymentCommandName = "deploy" - DeploymentCommandNameRollback DeploymentCommandName = "rollback" - DeploymentCommandNameStart DeploymentCommandName = "start" - DeploymentCommandNameStop DeploymentCommandName = "stop" - DeploymentCommandNameRestart DeploymentCommandName = "restart" - DeploymentCommandNameUndeploy DeploymentCommandName = "undeploy" + DeploymentCommandNameInstallDependencies DeploymentCommandName = "install_dependencies" + DeploymentCommandNameUpdateDependencies DeploymentCommandName = "update_dependencies" + DeploymentCommandNameUpdateCustomCookbooks DeploymentCommandName = "update_custom_cookbooks" + DeploymentCommandNameExecuteRecipes DeploymentCommandName = "execute_recipes" + DeploymentCommandNameConfigure DeploymentCommandName = "configure" + DeploymentCommandNameSetup DeploymentCommandName = "setup" + DeploymentCommandNameDeploy DeploymentCommandName = "deploy" + DeploymentCommandNameRollback DeploymentCommandName = "rollback" + DeploymentCommandNameStart DeploymentCommandName = "start" + DeploymentCommandNameStop DeploymentCommandName = "stop" + DeploymentCommandNameRestart DeploymentCommandName = "restart" + DeploymentCommandNameUndeploy DeploymentCommandName = "undeploy" ) // Values returns all known values for DeploymentCommandName. Note that this can be diff --git a/service/opsworks/types/types.go b/service/opsworks/types/types.go index 3d2000545c7..d804ae21227 100644 --- a/service/opsworks/types/types.go +++ b/service/opsworks/types/types.go @@ -252,43 +252,42 @@ type Command struct { // The command status: // - // * failed + // * failed // - // * successful + // * successful // - // * skipped + // * skipped // - // * - // pending + // * pending Status *string // The command type: // - // * configure + // * configure // - // * deploy + // * deploy // - // * execute_recipes + // * execute_recipes // - // * + // * // install_dependencies // - // * restart + // * restart // - // * rollback + // * rollback // - // * setup - // - // * start + // * setup // + // * start // // * stop // - // * undeploy + // * + // undeploy // - // * update_custom_cookbooks + // * update_custom_cookbooks // - // * update_dependencies + // * update_dependencies Type *string } @@ -349,11 +348,11 @@ type Deployment struct { // The deployment status: // - // * running + // * running // - // * successful + // * successful // - // * failed + // * failed Status *string } @@ -363,44 +362,43 @@ type DeploymentCommand struct { // Specifies the operation. You can specify only one command. For stacks, the // following commands are available: // - // * execute_recipes: Execute one or more + // * execute_recipes: Execute one or more // recipes. To specify the recipes, set an Args parameter named recipes to the list // of recipes to be executed. For example, to execute phpapp::appsetup, set Args to // {"recipes":["phpapp::appsetup"]}. // - // * install_dependencies: Install the - // stack's dependencies. + // * install_dependencies: Install the stack's + // dependencies. // - // * update_custom_cookbooks: Update the stack's custom + // * update_custom_cookbooks: Update the stack's custom // cookbooks. // - // * update_dependencies: Update the stack's dependencies. + // * update_dependencies: Update the stack's dependencies. // // The // update_dependencies and install_dependencies commands are supported only for // Linux instances. You can run the commands successfully on Windows instances, but // they do nothing. For apps, the following commands are available: // - // * deploy: + // * deploy: // Deploy an app. Ruby on Rails apps have an optional Args parameter named migrate. // Set Args to {"migrate":["true"]} to migrate the database. The default setting is // {"migrate":["false"]}. // - // * rollback Roll the app back to the previous - // version. When you update an app, AWS OpsWorks Stacks stores the previous - // version, up to a maximum of five versions. You can use this command to roll an - // app back as many as four versions. - // - // * start: Start the app's web or - // application server. + // * rollback Roll the app back to the previous version. + // When you update an app, AWS OpsWorks Stacks stores the previous version, up to a + // maximum of five versions. You can use this command to roll an app back as many + // as four versions. // - // * stop: Stop the app's web or application server. + // * start: Start the app's web or application server. // + // * stop: + // Stop the app's web or application server. // - // * restart: Restart the app's web or application server. + // * restart: Restart the app's web or + // application server. // - // * undeploy: - // Undeploy the app. + // * undeploy: Undeploy the app. // // This member is required. Name DeploymentCommandName @@ -410,18 +408,18 @@ type DeploymentCommand struct { // "arg_name2" : ["value1", "value2", ...], ...} The update_dependencies command // takes two arguments: // - // * upgrade_os_to - Specifies the desired Amazon Linux + // * upgrade_os_to - Specifies the desired Amazon Linux // version for instances whose OS you want to upgrade, such as Amazon Linux // 2016.09. You must also set the allow_reboot argument to true. // - // * - // allow_reboot - Specifies whether to allow AWS OpsWorks Stacks to reboot the - // instances if necessary, after installing the updates. This argument can be set - // to either true or false. The default value is false. + // * allow_reboot - + // Specifies whether to allow AWS OpsWorks Stacks to reboot the instances if + // necessary, after installing the updates. This argument can be set to either true + // or false. The default value is false. // - // For example, to upgrade an - // instance to Amazon Linux 2016.09, set Args to the following. { - // "upgrade_os_to":["Amazon Linux 2016.09"], "allow_reboot":["true"] } + // For example, to upgrade an instance to + // Amazon Linux 2016.09, set Args to the following. { "upgrade_os_to":["Amazon + // Linux 2016.09"], "allow_reboot":["true"] } Args map[string][]*string } @@ -681,38 +679,37 @@ type Instance struct { // The instance status: // - // * booting + // * booting // - // * connection_lost + // * connection_lost // - // * online + // * online // - // * - // pending + // * pending // - // * rebooting + // * + // rebooting // - // * requested + // * requested // - // * running_setup + // * running_setup // - // * - // setup_failed + // * setup_failed // - // * shutting_down + // * shutting_down // - // * start_failed + // * + // start_failed // - // * stop_failed + // * stop_failed // - // * - // stopped + // * stopped // - // * stopping + // * stopping // - // * terminated + // * terminated // - // * terminating + // * terminating Status *string // The instance's subnet ID; applicable only if the stack is running in a VPC. @@ -984,19 +981,19 @@ type Permission struct { // The user's permission level, which must be the following: // - // * deny + // * deny // - // * - // show + // * show // - // * deploy + // * + // deploy // - // * manage + // * manage // - // * iam_only + // * iam_only // - // For more information on the - // permissions associated with these levels, see Managing User Permissions + // For more information on the permissions associated + // with these levels, see Managing User Permissions // (https://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) Level *string @@ -1183,14 +1180,14 @@ type Source struct { // When included in a request, the parameter depends on the repository type. // - // * - // For Amazon S3 bundles, set Password to the appropriate IAM secret access key. + // * For + // Amazon S3 bundles, set Password to the appropriate IAM secret access key. // + // * For + // HTTP bundles and Subversion repositories, set Password to the password. // - // * For HTTP bundles and Subversion repositories, set Password to the - // password. - // - // For more information on how to safely handle IAM credentials, see + // For + // more information on how to safely handle IAM credentials, see // https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html // (https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). // In responses, AWS OpsWorks Stacks returns *****FILTERED***** instead of the @@ -1216,10 +1213,10 @@ type Source struct { // This parameter depends on the repository type. // - // * For Amazon S3 bundles, set + // * For Amazon S3 bundles, set // Username to the appropriate IAM access key ID. // - // * For HTTP bundles, Git + // * For HTTP bundles, Git // repositories, and Subversion repositories, set Username to the user name. Username *string } @@ -1470,23 +1467,23 @@ type Volume struct { // The volume type. For more information, see Amazon EBS Volume Types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). // - // - // * standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a + // * + // standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a // maximum size of 1024 GiB. // - // * io1 - Provisioned IOPS (SSD). PIOPS volumes - // must have a minimum size of 4 GiB and a maximum size of 16384 GiB. + // * io1 - Provisioned IOPS (SSD). PIOPS volumes must + // have a minimum size of 4 GiB and a maximum size of 16384 GiB. // - // * gp2 - - // General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB - // and a maximum size of 16384 GiB. + // * gp2 - General + // Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a + // maximum size of 16384 GiB. // - // * st1 - Throughput Optimized hard disk - // drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 - // GiB and a maximum size of 16384 GiB. + // * st1 - Throughput Optimized hard disk drive (HDD). + // Throughput optimized HDD volumes must have a minimum size of 500 GiB and a + // maximum size of 16384 GiB. // - // * sc1 - Cold HDD. Cold HDD volumes - // must have a minimum size of 500 GiB and a maximum size of 16384 GiB. + // * sc1 - Cold HDD. Cold HDD volumes must have a + // minimum size of 500 GiB and a maximum size of 16384 GiB. VolumeType *string } @@ -1522,34 +1519,34 @@ type VolumeConfiguration struct { // The volume type. For more information, see Amazon EBS Volume Types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html). // - // - // * standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a + // * + // standard - Magnetic. Magnetic volumes must have a minimum size of 1 GiB and a // maximum size of 1024 GiB. // - // * io1 - Provisioned IOPS (SSD). PIOPS volumes - // must have a minimum size of 4 GiB and a maximum size of 16384 GiB. + // * io1 - Provisioned IOPS (SSD). PIOPS volumes must + // have a minimum size of 4 GiB and a maximum size of 16384 GiB. // - // * gp2 - - // General Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB - // and a maximum size of 16384 GiB. + // * gp2 - General + // Purpose (SSD). General purpose volumes must have a minimum size of 1 GiB and a + // maximum size of 16384 GiB. // - // * st1 - Throughput Optimized hard disk - // drive (HDD). Throughput optimized HDD volumes must have a minimum size of 500 - // GiB and a maximum size of 16384 GiB. + // * st1 - Throughput Optimized hard disk drive (HDD). + // Throughput optimized HDD volumes must have a minimum size of 500 GiB and a + // maximum size of 16384 GiB. // - // * sc1 - Cold HDD. Cold HDD volumes - // must have a minimum size of 500 GiB and a maximum size of 16384 GiB. + // * sc1 - Cold HDD. Cold HDD volumes must have a + // minimum size of 500 GiB and a maximum size of 16384 GiB. VolumeType *string } // Describes a time-based instance's auto scaling schedule. The schedule consists // of a set of key-value pairs. // -// * The key is the time period (a UTC hour) and -// must be an integer from 0 - 23. +// * The key is the time period (a UTC hour) and must +// be an integer from 0 - 23. // -// * The value indicates whether the instance -// should be online or offline for the specified period, and must be set to "on" or +// * The value indicates whether the instance should be +// online or offline for the specified period, and must be set to "on" or // "off" // // The default setting for all time periods is off, so you use the following diff --git a/service/opsworkscm/api_op_AssociateNode.go b/service/opsworkscm/api_op_AssociateNode.go index c0f44e07b2a..0cf241abbf9 100644 --- a/service/opsworkscm/api_op_AssociateNode.go +++ b/service/opsworkscm/api_op_AssociateNode.go @@ -46,18 +46,18 @@ type AssociateNodeInput struct { // Engine attributes used for associating the node. Attributes accepted in a // AssociateNode request for Chef // - // * CHEF_ORGANIZATION: The Chef organization - // with which the node is associated. By default only one organization named - // default can exist. + // * CHEF_ORGANIZATION: The Chef organization with + // which the node is associated. By default only one organization named default can + // exist. // - // * CHEF_NODE_PUBLIC_KEY: A PEM-formatted public key. This - // key is required for the chef-client agent to access the Chef API. + // * CHEF_NODE_PUBLIC_KEY: A PEM-formatted public key. This key is required + // for the chef-client agent to access the Chef API. // - // Attributes - // accepted in a AssociateNode request for Puppet + // Attributes accepted in a + // AssociateNode request for Puppet // - // * PUPPET_NODE_CSR: A - // PEM-formatted certificate-signing request (CSR) that is created by the node. + // * PUPPET_NODE_CSR: A PEM-formatted + // certificate-signing request (CSR) that is created by the node. // // This member is required. EngineAttributes []*types.EngineAttribute diff --git a/service/opsworkscm/api_op_CreateBackup.go b/service/opsworkscm/api_op_CreateBackup.go index 98f705e7cb3..c6a22e37efb 100644 --- a/service/opsworkscm/api_op_CreateBackup.go +++ b/service/opsworkscm/api_op_CreateBackup.go @@ -48,22 +48,21 @@ type CreateBackupInput struct { // A map that contains tag keys and tag values to attach to an AWS OpsWorks-CM // server backup. // - // * The key cannot be empty. + // * The key cannot be empty. // - // * The key can be a maximum - // of 127 characters, and can contain only Unicode letters, numbers, or separators, - // or the following special characters: + - = . _ : / + // * The key can be a maximum of 127 + // characters, and can contain only Unicode letters, numbers, or separators, or the + // following special characters: + - = . _ : / // - // * The value can be a - // maximum 255 characters, and contain only Unicode letters, numbers, or - // separators, or the following special characters: + - = . _ : / + // * The value can be a maximum 255 + // characters, and contain only Unicode letters, numbers, or separators, or the + // following special characters: + - = . _ : / // - // * Leading - // and trailing white spaces are trimmed from both the key and value. + // * Leading and trailing white spaces + // are trimmed from both the key and value. // - // * A - // maximum of 50 user-applied tags is allowed for tag-supported AWS OpsWorks-CM - // resources. + // * A maximum of 50 user-applied tags is + // allowed for tag-supported AWS OpsWorks-CM resources. Tags []*types.Tag } diff --git a/service/opsworkscm/api_op_CreateServer.go b/service/opsworkscm/api_op_CreateServer.go index f367c8ba7ce..7842b1f8a63 100644 --- a/service/opsworkscm/api_op_CreateServer.go +++ b/service/opsworkscm/api_op_CreateServer.go @@ -107,23 +107,23 @@ type CreateServerInput struct { // must also specify values for CustomDomain and CustomPrivateKey. The following // are requirements for the CustomCertificate value: // - // * You can provide either - // a self-signed, custom certificate, or the full certificate chain. + // * You can provide either a + // self-signed, custom certificate, or the full certificate chain. // - // * The + // * The // certificate must be a valid X509 certificate, or a certificate chain in PEM // format. // - // * The certificate must be valid at the time of upload. A - // certificate can't be used before its validity period begins (the certificate's - // NotBefore date), or after it expires (the certificate's NotAfter date). + // * The certificate must be valid at the time of upload. A certificate + // can't be used before its validity period begins (the certificate's NotBefore + // date), or after it expires (the certificate's NotAfter date). // - // * - // The certificate’s common name or subject alternative names (SANs), if present, - // must match the value of CustomDomain. + // * The + // certificate’s common name or subject alternative names (SANs), if present, must + // match the value of CustomDomain. // - // * The certificate must match the - // value of CustomPrivateKey. + // * The certificate must match the value of + // CustomPrivateKey. CustomCertificate *string // An optional public endpoint of a server, such as https://aws.my-company.com. To @@ -148,33 +148,33 @@ type CreateServerInput struct { // Optional engine attributes on a specified server. Attributes accepted in a Chef // createServer request: // - // * CHEF_AUTOMATE_PIVOTAL_KEY: A base64-encoded RSA - // public key. The corresponding private key is required to access the Chef API. - // When no CHEF_AUTOMATE_PIVOTAL_KEY is set, a private key is generated and - // returned in the response. + // * CHEF_AUTOMATE_PIVOTAL_KEY: A base64-encoded RSA public + // key. The corresponding private key is required to access the Chef API. When no + // CHEF_AUTOMATE_PIVOTAL_KEY is set, a private key is generated and returned in the + // response. // - // * CHEF_AUTOMATE_ADMIN_PASSWORD: The password for - // the administrative user in the Chef Automate web-based dashboard. The password - // length is a minimum of eight characters, and a maximum of 32. The password can - // contain letters, numbers, and special characters (!/@#$%^&+=_). The password - // must contain at least one lower case letter, one upper case letter, one number, - // and one special character. When no CHEF_AUTOMATE_ADMIN_PASSWORD is set, one is - // generated and returned in the response. + // * CHEF_AUTOMATE_ADMIN_PASSWORD: The password for the administrative + // user in the Chef Automate web-based dashboard. The password length is a minimum + // of eight characters, and a maximum of 32. The password can contain letters, + // numbers, and special characters (!/@#$%^&+=_). The password must contain at + // least one lower case letter, one upper case letter, one number, and one special + // character. When no CHEF_AUTOMATE_ADMIN_PASSWORD is set, one is generated and + // returned in the response. // - // Attributes accepted in a Puppet - // createServer request: + // Attributes accepted in a Puppet createServer + // request: // - // * PUPPET_ADMIN_PASSWORD: To work with the Puppet - // Enterprise console, a password must use ASCII characters. + // * PUPPET_ADMIN_PASSWORD: To work with the Puppet Enterprise console, a + // password must use ASCII characters. // - // * - // PUPPET_R10K_REMOTE: The r10k remote is the URL of your control repository (for - // example, ssh://git@your.git-repo.com:user/control-repo.git). Specifying an r10k - // remote opens TCP port 8170. + // * PUPPET_R10K_REMOTE: The r10k remote is + // the URL of your control repository (for example, + // ssh://git@your.git-repo.com:user/control-repo.git). Specifying an r10k remote + // opens TCP port 8170. // - // * PUPPET_R10K_PRIVATE_KEY: If you are using a - // private Git repository, add PUPPET_R10K_PRIVATE_KEY to specify a PEM-encoded - // private SSH key. + // * PUPPET_R10K_PRIVATE_KEY: If you are using a private Git + // repository, add PUPPET_R10K_PRIVATE_KEY to specify a PEM-encoded private SSH + // key. EngineAttributes []*types.EngineAttribute // The engine model of the server. Valid values in this release include Monolithic @@ -195,10 +195,10 @@ type CreateServerInput struct { // application-level data on your server if automated backups are enabled. Valid // values must be specified in one of the following formats: // - // * HH:MM for daily + // * HH:MM for daily // backups // - // * DDD:HH:MM for weekly backups + // * DDD:HH:MM for weekly backups // // MM must be specified as 00. The // specified time is in coordinated universal time (UTC). The default value is a @@ -236,22 +236,22 @@ type CreateServerInput struct { // A map that contains tag keys and tag values to attach to an AWS OpsWorks for // Chef Automate or AWS OpsWorks for Puppet Enterprise server. // - // * The key - // cannot be empty. + // * The key cannot be + // empty. // - // * The key can be a maximum of 127 characters, and can - // contain only Unicode letters, numbers, or separators, or the following special - // characters: + - = . _ : / @ + // * The key can be a maximum of 127 characters, and can contain only + // Unicode letters, numbers, or separators, or the following special characters: + + // - = . _ : / @ // - // * The value can be a maximum 255 characters, - // and contain only Unicode letters, numbers, or separators, or the following - // special characters: + - = . _ : / @ + // * The value can be a maximum 255 characters, and contain only + // Unicode letters, numbers, or separators, or the following special characters: + + // - = . _ : / @ // - // * Leading and trailing white spaces are - // trimmed from both the key and value. + // * Leading and trailing white spaces are trimmed from both the key + // and value. // - // * A maximum of 50 user-applied tags is - // allowed for any AWS OpsWorks-CM server. + // * A maximum of 50 user-applied tags is allowed for any AWS + // OpsWorks-CM server. Tags []*types.Tag } diff --git a/service/opsworkscm/api_op_DescribeNodeAssociationStatus.go b/service/opsworkscm/api_op_DescribeNodeAssociationStatus.go index e76b76afe0d..2b2235e66c8 100644 --- a/service/opsworkscm/api_op_DescribeNodeAssociationStatus.go +++ b/service/opsworkscm/api_op_DescribeNodeAssociationStatus.go @@ -53,14 +53,14 @@ type DescribeNodeAssociationStatusOutput struct { // The status of the association or disassociation request. Possible values: // - // * + // * // SUCCESS: The association or disassociation succeeded. // - // * FAILED: The - // association or disassociation failed. + // * FAILED: The association + // or disassociation failed. // - // * IN_PROGRESS: The association or - // disassociation is still in progress. + // * IN_PROGRESS: The association or disassociation is + // still in progress. NodeAssociationStatus types.NodeAssociationStatus // Metadata pertaining to the operation's result. diff --git a/service/opsworkscm/api_op_DisassociateNode.go b/service/opsworkscm/api_op_DisassociateNode.go index 39c23ad825a..f0f91ca4910 100644 --- a/service/opsworkscm/api_op_DisassociateNode.go +++ b/service/opsworkscm/api_op_DisassociateNode.go @@ -50,7 +50,7 @@ type DisassociateNodeInput struct { // required for Puppet. Attributes required in a DisassociateNode request for // Chef // - // * CHEF_ORGANIZATION: The Chef organization with which the node was + // * CHEF_ORGANIZATION: The Chef organization with which the node was // associated. By default only one organization named default can exist. EngineAttributes []*types.EngineAttribute } diff --git a/service/opsworkscm/api_op_ExportServerEngineAttribute.go b/service/opsworkscm/api_op_ExportServerEngineAttribute.go index dbc73f2866f..7ada933a4e6 100644 --- a/service/opsworkscm/api_op_ExportServerEngineAttribute.go +++ b/service/opsworkscm/api_op_ExportServerEngineAttribute.go @@ -52,22 +52,21 @@ type ExportServerEngineAttributeInput struct { // value. For the Userdata ExportAttributeName, the following are supported engine // attribute names. // - // * RunList In Chef, a list of roles or recipes that are run - // in the specified order. In Puppet, this parameter is ignored. + // * RunList In Chef, a list of roles or recipes that are run in + // the specified order. In Puppet, this parameter is ignored. // - // * - // OrganizationName In Chef, an organization name. AWS OpsWorks for Chef Automate - // always creates the organization default. In Puppet, this parameter is ignored. + // * OrganizationName + // In Chef, an organization name. AWS OpsWorks for Chef Automate always creates the + // organization default. In Puppet, this parameter is ignored. // + // * NodeEnvironment + // In Chef, a node environment (for example, development, staging, or one-box). In + // Puppet, this parameter is ignored. // - // * NodeEnvironment In Chef, a node environment (for example, development, - // staging, or one-box). In Puppet, this parameter is ignored. - // - // * - // NodeClientVersion In Chef, the version of the Chef engine (three numbers - // separated by dots, such as 13.8.5). If this attribute is empty, OpsWorks for - // Chef Automate uses the most current version. In Puppet, this parameter is - // ignored. + // * NodeClientVersion In Chef, the version of + // the Chef engine (three numbers separated by dots, such as 13.8.5). If this + // attribute is empty, OpsWorks for Chef Automate uses the most current version. In + // Puppet, this parameter is ignored. InputAttributes []*types.EngineAttribute } diff --git a/service/opsworkscm/api_op_StartMaintenance.go b/service/opsworkscm/api_op_StartMaintenance.go index 551029baebd..a627b718621 100644 --- a/service/opsworkscm/api_op_StartMaintenance.go +++ b/service/opsworkscm/api_op_StartMaintenance.go @@ -43,7 +43,7 @@ type StartMaintenanceInput struct { // Engine attributes that are specific to the server on which you want to run // maintenance. Attributes accepted in a StartMaintenance request for Chef // - // * + // * // CHEF_MAJOR_UPGRADE: If a Chef Automate server is eligible for upgrade to Chef // Automate 2, add this engine attribute to a StartMaintenance request and set the // value to true to upgrade the server to Chef Automate 2. For more information, diff --git a/service/opsworkscm/api_op_TagResource.go b/service/opsworkscm/api_op_TagResource.go index 95f91ea948d..612bc910b47 100644 --- a/service/opsworkscm/api_op_TagResource.go +++ b/service/opsworkscm/api_op_TagResource.go @@ -40,22 +40,21 @@ type TagResourceInput struct { // A map that contains tag keys and tag values to attach to AWS OpsWorks-CM servers // or backups. // - // * The key cannot be empty. + // * The key cannot be empty. // - // * The key can be a maximum of - // 127 characters, and can contain only Unicode letters, numbers, or separators, or - // the following special characters: + - = . _ : / + // * The key can be a maximum of 127 + // characters, and can contain only Unicode letters, numbers, or separators, or the + // following special characters: + - = . _ : / // - // * The value can be a - // maximum 255 characters, and contain only Unicode letters, numbers, or - // separators, or the following special characters: + - = . _ : / + // * The value can be a maximum 255 + // characters, and contain only Unicode letters, numbers, or separators, or the + // following special characters: + - = . _ : / // - // * Leading - // and trailing white spaces are trimmed from both the key and value. + // * Leading and trailing white spaces + // are trimmed from both the key and value. // - // * A - // maximum of 50 user-applied tags is allowed for any AWS OpsWorks-CM server or - // backup. + // * A maximum of 50 user-applied tags is + // allowed for any AWS OpsWorks-CM server or backup. // // This member is required. Tags []*types.Tag diff --git a/service/opsworkscm/doc.go b/service/opsworkscm/doc.go index c9ee030eb12..defe76d0fb7 100644 --- a/service/opsworkscm/doc.go +++ b/service/opsworkscm/doc.go @@ -9,7 +9,7 @@ // Enterprise servers, and add or remove nodes for the servers to manage. Glossary // of terms // -// * Server: A configuration management server that can be +// * Server: A configuration management server that can be // highly-available. The configuration management server runs on an Amazon Elastic // Compute Cloud (EC2) instance, and may use various other AWS services, such as // Amazon Relational Database Service (RDS) and Elastic Load Balancing. A server is @@ -17,22 +17,22 @@ // like Amazon RDS. In AWS OpsWorks CM, you do not start or stop servers. After you // create servers, they continue to run until they are deleted. // -// * Engine: The +// * Engine: The // engine is the specific configuration manager that you want to use. Valid values // in this release include ChefAutomate and Puppet. // -// * Backup: This is an +// * Backup: This is an // application-level backup of the data that the configuration manager stores. AWS // OpsWorks CM creates an S3 bucket for backups when you launch the first server. A // backup maintains a snapshot of a server's configuration-related attributes at // the time the backup starts. // -// * Events: Events are always related to a -// server. Events are written during server creation, when health checks run, when -// backups are created, when system maintenance is performed, etc. When you delete -// a server, the server's events are also deleted. +// * Events: Events are always related to a server. +// Events are written during server creation, when health checks run, when backups +// are created, when system maintenance is performed, etc. When you delete a +// server, the server's events are also deleted. // -// * Account attributes: Every +// * Account attributes: Every // account has attributes that are assigned in the AWS OpsWorks CM database. These // attributes store information about configuration limits (servers, backups, etc.) // and your customer account. @@ -42,29 +42,29 @@ // servers can only be accessed or managed within the endpoint in which they are // created. // -// * opsworks-cm.us-east-1.amazonaws.com +// * opsworks-cm.us-east-1.amazonaws.com // -// * +// * // opsworks-cm.us-east-2.amazonaws.com // -// * opsworks-cm.us-west-1.amazonaws.com +// * opsworks-cm.us-west-1.amazonaws.com // +// * +// opsworks-cm.us-west-2.amazonaws.com // -// * opsworks-cm.us-west-2.amazonaws.com -// -// * +// * // opsworks-cm.ap-northeast-1.amazonaws.com // -// * +// * // opsworks-cm.ap-southeast-1.amazonaws.com // -// * +// * // opsworks-cm.ap-southeast-2.amazonaws.com // -// * +// * // opsworks-cm.eu-central-1.amazonaws.com // -// * +// * // opsworks-cm.eu-west-1.amazonaws.com // // For more information, see AWS OpsWorks diff --git a/service/opsworkscm/types/enums.go b/service/opsworkscm/types/enums.go index d96cd9ec8ca..b00c05f0954 100644 --- a/service/opsworkscm/types/enums.go +++ b/service/opsworkscm/types/enums.go @@ -6,10 +6,10 @@ type BackupStatus string // Enum values for BackupStatus const ( - BackupStatusIn_progress BackupStatus = "IN_PROGRESS" - BackupStatusOk BackupStatus = "OK" - BackupStatusFailed BackupStatus = "FAILED" - BackupStatusDeleting BackupStatus = "DELETING" + BackupStatusInProgress BackupStatus = "IN_PROGRESS" + BackupStatusOk BackupStatus = "OK" + BackupStatusFailed BackupStatus = "FAILED" + BackupStatusDeleting BackupStatus = "DELETING" ) // Values returns all known values for BackupStatus. Note that this can be expanded @@ -64,9 +64,9 @@ type NodeAssociationStatus string // Enum values for NodeAssociationStatus const ( - NodeAssociationStatusSuccess NodeAssociationStatus = "SUCCESS" - NodeAssociationStatusFailed NodeAssociationStatus = "FAILED" - NodeAssociationStatusIn_progress NodeAssociationStatus = "IN_PROGRESS" + NodeAssociationStatusSuccess NodeAssociationStatus = "SUCCESS" + NodeAssociationStatusFailed NodeAssociationStatus = "FAILED" + NodeAssociationStatusInProgress NodeAssociationStatus = "IN_PROGRESS" ) // Values returns all known values for NodeAssociationStatus. Note that this can be @@ -84,19 +84,19 @@ type ServerStatus string // Enum values for ServerStatus const ( - ServerStatusBacking_up ServerStatus = "BACKING_UP" - ServerStatusConnection_lost ServerStatus = "CONNECTION_LOST" - ServerStatusCreating ServerStatus = "CREATING" - ServerStatusDeleting ServerStatus = "DELETING" - ServerStatusModifying ServerStatus = "MODIFYING" - ServerStatusFailed ServerStatus = "FAILED" - ServerStatusHealthy ServerStatus = "HEALTHY" - ServerStatusRunning ServerStatus = "RUNNING" - ServerStatusRestoring ServerStatus = "RESTORING" - ServerStatusSetup ServerStatus = "SETUP" - ServerStatusUnder_maintenance ServerStatus = "UNDER_MAINTENANCE" - ServerStatusUnhealthy ServerStatus = "UNHEALTHY" - ServerStatusTerminated ServerStatus = "TERMINATED" + ServerStatusBackingUp ServerStatus = "BACKING_UP" + ServerStatusConnectionLost ServerStatus = "CONNECTION_LOST" + ServerStatusCreating ServerStatus = "CREATING" + ServerStatusDeleting ServerStatus = "DELETING" + ServerStatusModifying ServerStatus = "MODIFYING" + ServerStatusFailed ServerStatus = "FAILED" + ServerStatusHealthy ServerStatus = "HEALTHY" + ServerStatusRunning ServerStatus = "RUNNING" + ServerStatusRestoring ServerStatus = "RESTORING" + ServerStatusSetup ServerStatus = "SETUP" + ServerStatusUnderMaintenance ServerStatus = "UNDER_MAINTENANCE" + ServerStatusUnhealthy ServerStatus = "UNHEALTHY" + ServerStatusTerminated ServerStatus = "TERMINATED" ) // Values returns all known values for ServerStatus. Note that this can be expanded diff --git a/service/opsworkscm/types/types.go b/service/opsworkscm/types/types.go index ec5ce25cba2..fc245c0ab1f 100644 --- a/service/opsworkscm/types/types.go +++ b/service/opsworkscm/types/types.go @@ -14,13 +14,13 @@ type AccountAttribute struct { // The attribute name. The following are supported attribute names. // - // * - // ServerLimit: The number of current servers/maximum number of servers allowed. By - // default, you can have a maximum of 10 servers. + // * ServerLimit: + // The number of current servers/maximum number of servers allowed. By default, you + // can have a maximum of 10 servers. // - // * ManualBackupLimit: The - // number of current manual backups/maximum number of backups allowed. By default, - // you can have a maximum of 50 manual backups saved. + // * ManualBackupLimit: The number of current + // manual backups/maximum number of backups allowed. By default, you can have a + // maximum of 50 manual backups saved. Name *string // The current usage, such as the current number of servers that are associated @@ -162,11 +162,11 @@ type Server struct { // CM; they are returned only as part of the result of createServer(). Attributes // returned in a createServer response for Chef // - // * CHEF_AUTOMATE_PIVOTAL_KEY: A + // * CHEF_AUTOMATE_PIVOTAL_KEY: A // base64-encoded RSA private key that is generated by AWS OpsWorks for Chef // Automate. This private key is required to access the Chef API. // - // * + // * // CHEF_STARTER_KIT: A base64-encoded ZIP file. The ZIP file contains a Chef // starter kit, which includes a README, a configuration file, and the required RSA // private key. Save this file, unzip it, and then change to the directory where @@ -175,14 +175,14 @@ type Server struct { // // Attributes returned in a createServer response for Puppet // - // * + // * // PUPPET_STARTER_KIT: A base64-encoded ZIP file. The ZIP file contains a Puppet // starter kit, including a README and a required private key. Save this file, // unzip it, and then change to the directory where you've unzipped the file // contents. // - // * PUPPET_ADMIN_PASSWORD: An administrator password that you can - // use to sign in to the Puppet Enterprise console after the server is online. + // * PUPPET_ADMIN_PASSWORD: An administrator password that you can use + // to sign in to the Puppet Enterprise console after the server is online. EngineAttributes []*EngineAttribute // The engine model of the server. Valid values in this release include Monolithic diff --git a/service/organizations/api_op_AcceptHandshake.go b/service/organizations/api_op_AcceptHandshake.go index 75c65e2276d..d380adf0fdc 100644 --- a/service/organizations/api_op_AcceptHandshake.go +++ b/service/organizations/api_op_AcceptHandshake.go @@ -15,7 +15,7 @@ import ( // proposed by the handshake request. This operation can be called only by the // following principals when they also have the relevant IAM permissions: // -// * +// * // Invitation to join or Approve all features request handshakes: only a principal // from the member account. The user who calls the API for an invitation to join // must have the organizations:AcceptHandshake permission. If you enabled all @@ -26,10 +26,9 @@ import ( // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integration_services.html#orgs_integration_service-linked-roles) // in the AWS Organizations User Guide. // -// * Enable all features final -// confirmation handshake: only a principal from the management account. For more -// information about invitations, see Inviting an AWS Account to Join Your -// Organization +// * Enable all features final confirmation +// handshake: only a principal from the management account. For more information +// about invitations, see Inviting an AWS Account to Join Your Organization // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_invites.html) // in the AWS Organizations User Guide. For more information about requests to // enable all features in the organization, see Enabling All Features in Your diff --git a/service/organizations/api_op_AttachPolicy.go b/service/organizations/api_op_AttachPolicy.go index 5df04224e98..99937b7715f 100644 --- a/service/organizations/api_op_AttachPolicy.go +++ b/service/organizations/api_op_AttachPolicy.go @@ -14,20 +14,20 @@ import ( // account. How the policy affects accounts depends on the type of policy. Refer to // the AWS Organizations User Guide for information about each policy type: // -// * +// * // AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // -// -// * BACKUP_POLICY +// * +// BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // -// -// * SERVICE_CONTROL_POLICY +// * +// SERVICE_CONTROL_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // -// -// * TAG_POLICY +// * +// TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This @@ -64,16 +64,16 @@ type AttachPolicyInput struct { // (http://wikipedia.org/wiki/regex) for a target ID string requires one of the // following: // - // * Root - A string that begins with "r-" followed by from 4 to 32 + // * Root - A string that begins with "r-" followed by from 4 to 32 // lowercase letters or digits. // - // * Account - A string that consists of exactly - // 12 digits. + // * Account - A string that consists of exactly 12 + // digits. // - // * Organizational unit (OU) - A string that begins with "ou-" - // followed by from 4 to 32 lowercase letters or digits (the ID of the root that - // the OU is in). This string is followed by a second "-" dash and from 8 to 32 - // additional lowercase letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is + // in). This string is followed by a second "-" dash and from 8 to 32 additional + // lowercase letters or digits. // // This member is required. TargetId *string diff --git a/service/organizations/api_op_CreateAccount.go b/service/organizations/api_op_CreateAccount.go index 79de367347e..aa2398b9920 100644 --- a/service/organizations/api_op_CreateAccount.go +++ b/service/organizations/api_op_CreateAccount.go @@ -19,13 +19,13 @@ import ( // access the account. To check the status of the request, do one of the // following: // -// * Use the Id member of the CreateAccountStatus response element -// from this operation to provide as a parameter to the DescribeCreateAccountStatus +// * Use the Id member of the CreateAccountStatus response element from +// this operation to provide as a parameter to the DescribeCreateAccountStatus // operation. // -// * Check the AWS CloudTrail log for the CreateAccountResult -// event. For information on using AWS CloudTrail with AWS Organizations, see -// Monitoring the Activity in Your Organization +// * Check the AWS CloudTrail log for the CreateAccountResult event. +// For information on using AWS CloudTrail with AWS Organizations, see Monitoring +// the Activity in Your Organization // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html) // in the AWS Organizations User Guide. // @@ -48,7 +48,7 @@ import ( // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) // in the AWS Organizations User Guide. // -// * When you create an account in an +// * When you create an account in an // organization using the AWS Organizations console, API, or CLI commands, the // information required for the account to operate as a standalone account, such as // a payment method and signing the end user license agreement (EULA) is not @@ -58,20 +58,20 @@ import ( // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * If you get an exception that -// indicates that you exceeded your account limits for the organization, contact -// AWS Support (https://console.aws.amazon.com/support/home#/). +// * If you get an exception that indicates +// that you exceeded your account limits for the organization, contact AWS Support +// (https://console.aws.amazon.com/support/home#/). // -// * If you get -// an exception that indicates that the operation failed because your organization -// is still initializing, wait one hour and then try again. If the error persists, -// contact AWS Support (https://console.aws.amazon.com/support/home#/). +// * If you get an exception that +// indicates that the operation failed because your organization is still +// initializing, wait one hour and then try again. If the error persists, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). // -// * -// Using CreateAccount to create multiple temporary accounts isn't recommended. You -// can only close an account from the Billing and Cost Management Console, and you -// must be signed in as the root user. For information on the requirements and -// process for closing an account, see Closing an AWS Account +// * Using +// CreateAccount to create multiple temporary accounts isn't recommended. You can +// only close an account from the Billing and Cost Management Console, and you must +// be signed in as the root user. For information on the requirements and process +// for closing an account, see Closing an AWS Account // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html) // in the AWS Organizations User Guide. // @@ -132,12 +132,12 @@ type CreateAccountInput struct { // role name defaults to OrganizationAccountAccessRole. For more information about // how to use this role to access the member account, see the following links: // - // - // * Accessing and Administering the Member Accounts in Your Organization + // * + // Accessing and Administering the Member Accounts in Your Organization // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role) // in the AWS Organizations User Guide // - // * Steps 2 and 3 in Tutorial: Delegate + // * Steps 2 and 3 in Tutorial: Delegate // Access Across AWS Accounts Using IAM Roles // (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html) // in the IAM User Guide diff --git a/service/organizations/api_op_CreateGovCloudAccount.go b/service/organizations/api_op_CreateGovCloudAccount.go index 49170c25b09..6c900f31c24 100644 --- a/service/organizations/api_op_CreateGovCloudAccount.go +++ b/service/organizations/api_op_CreateGovCloudAccount.go @@ -13,35 +13,35 @@ import ( // This action is available if all of the following are true: // -// * You're -// authorized to create accounts in the AWS GovCloud (US) Region. For more -// information on the AWS GovCloud (US) Region, see the AWS GovCloud User Guide. +// * You're authorized +// to create accounts in the AWS GovCloud (US) Region. For more information on the +// AWS GovCloud (US) Region, see the AWS GovCloud User Guide. // (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/welcome.html) // -// * -// You already have an account in the AWS GovCloud (US) Region that is paired with -// a management account of an organization in the commercial Region. +// * You +// already have an account in the AWS GovCloud (US) Region that is paired with a +// management account of an organization in the commercial Region. // -// * You -// call this action from the management account of your organization in the -// commercial Region. +// * You call this +// action from the management account of your organization in the commercial +// Region. // -// * You have the organizations:CreateGovCloudAccount -// permission. +// * You have the organizations:CreateGovCloudAccount permission. // -// AWS Organizations automatically creates the required service-linked -// role named AWSServiceRoleForOrganizations. For more information, see AWS -// Organizations and Service-Linked Roles +// AWS +// Organizations automatically creates the required service-linked role named +// AWSServiceRoleForOrganizations. For more information, see AWS Organizations and +// Service-Linked Roles // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs) // in the AWS Organizations User Guide. AWS automatically enables AWS CloudTrail // for AWS GovCloud (US) accounts, but you should also do the following: // -// * -// Verify that AWS CloudTrail is enabled to store logs. +// * Verify +// that AWS CloudTrail is enabled to store logs. // -// * Create an S3 bucket -// for AWS CloudTrail log storage. For more information, see Verifying AWS -// CloudTrail Is Enabled +// * Create an S3 bucket for AWS +// CloudTrail log storage. For more information, see Verifying AWS CloudTrail Is +// Enabled // (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/verifying-cloudtrail.html) // in the AWS GovCloud User Guide. // @@ -64,12 +64,12 @@ import ( // a few minutes before you can successfully access the account. To check the // status of the request, do one of the following: // -// * Use the OperationId -// response element from this operation to provide as a parameter to the +// * Use the OperationId response +// element from this operation to provide as a parameter to the // DescribeCreateAccountStatus operation. // -// * Check the AWS CloudTrail log for -// the CreateAccountResult event. For information on using AWS CloudTrail with +// * Check the AWS CloudTrail log for the +// CreateAccountResult event. For information on using AWS CloudTrail with // Organizations, see Monitoring the Activity in Your Organization // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_monitoring.html) // in the AWS Organizations User Guide. @@ -93,7 +93,7 @@ import ( // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) // in the AWS Organizations User Guide. // -// * When you create an account in an +// * When you create an account in an // organization using the AWS Organizations console, API, or CLI commands, the // information required for the account to operate as a standalone account is not // automatically collected. This includes a payment method and signing the end user @@ -103,21 +103,20 @@ import ( // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * If you get an exception that -// indicates that you exceeded your account limits for the organization, contact +// * If you get an exception that indicates +// that you exceeded your account limits for the organization, contact AWS Support +// (https://console.aws.amazon.com/support/home#/). +// +// * If you get an exception that +// indicates that the operation failed because your organization is still +// initializing, wait one hour and then try again. If the error persists, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // -// * If you get -// an exception that indicates that the operation failed because your organization -// is still initializing, wait one hour and then try again. If the error persists, -// contact AWS Support (https://console.aws.amazon.com/support/home#/). -// -// * -// Using CreateGovCloudAccount to create multiple temporary accounts isn't -// recommended. You can only close an account from the AWS Billing and Cost -// Management console, and you must be signed in as the root user. For information -// on the requirements and process for closing an account, see Closing an AWS -// Account +// * Using +// CreateGovCloudAccount to create multiple temporary accounts isn't recommended. +// You can only close an account from the AWS Billing and Cost Management console, +// and you must be signed in as the root user. For information on the requirements +// and process for closing an account, see Closing an AWS Account // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_close.html) // in the AWS Organizations User Guide. // diff --git a/service/organizations/api_op_CreateOrganization.go b/service/organizations/api_op_CreateOrganization.go index 128371fc115..4a5dc10c86a 100644 --- a/service/organizations/api_op_CreateOrganization.go +++ b/service/organizations/api_op_CreateOrganization.go @@ -43,17 +43,17 @@ type CreateOrganizationInput struct { // Specifies the feature set supported by the new organization. Each feature set // supports different levels of functionality. // - // * CONSOLIDATED_BILLING: All - // member accounts have their bills consolidated to and paid by the management - // account. For more information, see Consolidated billing + // * CONSOLIDATED_BILLING: All member + // accounts have their bills consolidated to and paid by the management account. + // For more information, see Consolidated billing // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#feature-set-cb-only) // in the AWS Organizations User Guide. The consolidated billing feature subset // isn't available for organizations in the AWS GovCloud (US) Region. // - // * ALL: - // In addition to all the features supported by the consolidated billing feature - // set, the management account can also apply any policy type to any member account - // in the organization. For more information, see All features + // * ALL: In + // addition to all the features supported by the consolidated billing feature set, + // the management account can also apply any policy type to any member account in + // the organization. For more information, see All features // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#feature-set-all) // in the AWS Organizations User Guide. FeatureSet types.OrganizationFeatureSet diff --git a/service/organizations/api_op_CreateOrganizationalUnit.go b/service/organizations/api_op_CreateOrganizationalUnit.go index 8d15a808e51..5f56f6f5cce 100644 --- a/service/organizations/api_op_CreateOrganizationalUnit.go +++ b/service/organizations/api_op_CreateOrganizationalUnit.go @@ -47,11 +47,11 @@ type CreateOrganizationalUnitInput struct { // new OU in. The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID // string requires one of the following: // - // * Root - A string that begins with - // "r-" followed by from 4 to 32 lowercase letters or digits. + // * Root - A string that begins with "r-" + // followed by from 4 to 32 lowercase letters or digits. // - // * Organizational - // unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase + // * Organizational unit + // (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase // letters or digits (the ID of the root that the OU is in). This string is // followed by a second "-" dash and from 8 to 32 additional lowercase letters or // digits. diff --git a/service/organizations/api_op_CreatePolicy.go b/service/organizations/api_op_CreatePolicy.go index 9a087c69a0f..a23135c89a0 100644 --- a/service/organizations/api_op_CreatePolicy.go +++ b/service/organizations/api_op_CreatePolicy.go @@ -55,20 +55,20 @@ type CreatePolicyInput struct { // The type of policy to create. You can specify one of the following values: // - // - // * AISERVICES_OPT_OUT_POLICY + // * + // AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // - // - // * BACKUP_POLICY + // * + // BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // - // - // * SERVICE_CONTROL_POLICY + // * + // SERVICE_CONTROL_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // - // - // * TAG_POLICY + // * + // TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This member is required. diff --git a/service/organizations/api_op_DescribeEffectivePolicy.go b/service/organizations/api_op_DescribeEffectivePolicy.go index 6a003695368..626a8dfeffe 100644 --- a/service/organizations/api_op_DescribeEffectivePolicy.go +++ b/service/organizations/api_op_DescribeEffectivePolicy.go @@ -41,15 +41,15 @@ type DescribeEffectivePolicyInput struct { // The type of policy that you want information about. You can specify one of the // following values: // - // * AISERVICES_OPT_OUT_POLICY + // * AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // - // - // * BACKUP_POLICY + // * + // BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // - // - // * TAG_POLICY + // * + // TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This member is required. diff --git a/service/organizations/api_op_DetachPolicy.go b/service/organizations/api_op_DetachPolicy.go index 0d19fc4cf0a..3e17d033981 100644 --- a/service/organizations/api_op_DetachPolicy.go +++ b/service/organizations/api_op_DetachPolicy.go @@ -57,16 +57,16 @@ type DetachPolicyInput struct { // (http://wikipedia.org/wiki/regex) for a target ID string requires one of the // following: // - // * Root - A string that begins with "r-" followed by from 4 to 32 + // * Root - A string that begins with "r-" followed by from 4 to 32 // lowercase letters or digits. // - // * Account - A string that consists of exactly - // 12 digits. + // * Account - A string that consists of exactly 12 + // digits. // - // * Organizational unit (OU) - A string that begins with "ou-" - // followed by from 4 to 32 lowercase letters or digits (the ID of the root that - // the OU is in). This string is followed by a second "-" dash and from 8 to 32 - // additional lowercase letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is + // in). This string is followed by a second "-" dash and from 8 to 32 additional + // lowercase letters or digits. // // This member is required. TargetId *string diff --git a/service/organizations/api_op_DisablePolicyType.go b/service/organizations/api_op_DisablePolicyType.go index a0c5dc7371a..6ee7ad4b6d6 100644 --- a/service/organizations/api_op_DisablePolicyType.go +++ b/service/organizations/api_op_DisablePolicyType.go @@ -45,19 +45,19 @@ type DisablePolicyTypeInput struct { // The policy type that you want to disable in this root. You can specify one of // the following values: // - // * AISERVICES_OPT_OUT_POLICY + // * AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // - // - // * BACKUP_POLICY + // * + // BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // - // - // * SERVICE_CONTROL_POLICY + // * + // SERVICE_CONTROL_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // - // - // * TAG_POLICY + // * + // TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This member is required. diff --git a/service/organizations/api_op_EnablePolicyType.go b/service/organizations/api_op_EnablePolicyType.go index b3ce6b7b090..2514b897915 100644 --- a/service/organizations/api_op_EnablePolicyType.go +++ b/service/organizations/api_op_EnablePolicyType.go @@ -40,19 +40,19 @@ type EnablePolicyTypeInput struct { // The policy type that you want to enable. You can specify one of the following // values: // - // * AISERVICES_OPT_OUT_POLICY + // * AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // - // - // * BACKUP_POLICY + // * + // BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // - // - // * SERVICE_CONTROL_POLICY + // * + // SERVICE_CONTROL_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // - // - // * TAG_POLICY + // * + // TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This member is required. diff --git a/service/organizations/api_op_InviteAccountToOrganization.go b/service/organizations/api_op_InviteAccountToOrganization.go index 65907f90796..173a7a1e3fe 100644 --- a/service/organizations/api_op_InviteAccountToOrganization.go +++ b/service/organizations/api_op_InviteAccountToOrganization.go @@ -16,19 +16,19 @@ import ( // is associated with the other account's owner. The invitation is implemented as a // Handshake whose details are in the response. // -// * You can invite AWS accounts -// only from the same seller as the management account. For example, if your +// * You can invite AWS accounts only +// from the same seller as the management account. For example, if your // organization's management account was created by Amazon Internet Services Pvt. // Ltd (AISPL), an AWS seller in India, you can invite only other AISPL accounts to // your organization. You can't combine accounts from AISPL and AWS or from any // other AWS seller. For more information, see Consolidated Billing in India // (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html). // -// -// * If you receive an exception that indicates that you exceeded your account -// limits for the organization or that the operation failed because your -// organization is still initializing, wait one hour and then try again. If the -// error persists after an hour, contact AWS Support +// * +// If you receive an exception that indicates that you exceeded your account limits +// for the organization or that the operation failed because your organization is +// still initializing, wait one hour and then try again. If the error persists +// after an hour, contact AWS Support // (https://console.aws.amazon.com/support/home#/). // // If the request includes tags, diff --git a/service/organizations/api_op_LeaveOrganization.go b/service/organizations/api_op_LeaveOrganization.go index 36b2109a459..3cf6812cbd2 100644 --- a/service/organizations/api_op_LeaveOrganization.go +++ b/service/organizations/api_op_LeaveOrganization.go @@ -16,45 +16,45 @@ import ( // instead. This operation can be called only from a member account in the // organization. // -// * The management account in an organization with all features +// * The management account in an organization with all features // enabled can set service control policies (SCPs) that can restrict what // administrators of member accounts can do. This includes preventing them from // successfully calling LeaveOrganization and leaving the organization. // -// * You -// can leave an organization as a member account only if the account is configured -// with the information required to operate as a standalone account. When you -// create an account in an organization using the AWS Organizations console, API, -// or CLI commands, the information required of standalone accounts is not -// automatically collected. For each account that you want to make standalone, you -// must perform the following steps. If any of the steps are already completed for -// this account, that step doesn't appear. +// * You can +// leave an organization as a member account only if the account is configured with +// the information required to operate as a standalone account. When you create an +// account in an organization using the AWS Organizations console, API, or CLI +// commands, the information required of standalone accounts is not automatically +// collected. For each account that you want to make standalone, you must perform +// the following steps. If any of the steps are already completed for this account, +// that step doesn't appear. // -// * Choose a support plan +// * Choose a support plan // +// * Provide and verify the +// required contact information // -// * Provide and verify the required contact information +// * Provide a current payment method // -// * Provide a -// current payment method -// -// AWS uses the payment method to charge for any -// billable (not free tier) AWS activity that occurs while the account isn't -// attached to an organization. Follow the steps at To leave an organization when -// all required account information has not yet been provided +// AWS uses the +// payment method to charge for any billable (not free tier) AWS activity that +// occurs while the account isn't attached to an organization. Follow the steps at +// To leave an organization when all required account information has not yet been +// provided // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * You can leave an organization only -// after you enable IAM user access to billing in your account. For more -// information, see Activating Access to the Billing and Cost Management Console +// * You can leave an organization only after +// you enable IAM user access to billing in your account. For more information, see +// Activating Access to the Billing and Cost Management Console // (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate) // in the AWS Billing and Cost Management User Guide. // -// * After the account -// leaves the organization, all tags that were attached to the account object in -// the organization are deleted. AWS accounts outside of an organization do not -// support tags. +// * After the account leaves +// the organization, all tags that were attached to the account object in the +// organization are deleted. AWS accounts outside of an organization do not support +// tags. func (c *Client) LeaveOrganization(ctx context.Context, params *LeaveOrganizationInput, optFns ...func(*Options)) (*LeaveOrganizationOutput, error) { if params == nil { params = &LeaveOrganizationInput{} diff --git a/service/organizations/api_op_ListChildren.go b/service/organizations/api_op_ListChildren.go index 691675e34a8..f31247f7c1f 100644 --- a/service/organizations/api_op_ListChildren.go +++ b/service/organizations/api_op_ListChildren.go @@ -46,14 +46,13 @@ type ListChildrenInput struct { // list. The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root - A string that begins with "r-" - // followed by from 4 to 32 lowercase letters or digits. + // * Root - A string that begins with "r-" followed + // by from 4 to 32 lowercase letters or digits. // - // * Organizational unit - // (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase - // letters or digits (the ID of the root that the OU is in). This string is - // followed by a second "-" dash and from 8 to 32 additional lowercase letters or - // digits. + // * Organizational unit (OU) - A + // string that begins with "ou-" followed by from 4 to 32 lowercase letters or + // digits (the ID of the root that the OU is in). This string is followed by a + // second "-" dash and from 8 to 32 additional lowercase letters or digits. // // This member is required. ParentId *string diff --git a/service/organizations/api_op_ListOrganizationalUnitsForParent.go b/service/organizations/api_op_ListOrganizationalUnitsForParent.go index 98355cdabfa..b5add2ed504 100644 --- a/service/organizations/api_op_ListOrganizationalUnitsForParent.go +++ b/service/organizations/api_op_ListOrganizationalUnitsForParent.go @@ -39,14 +39,13 @@ type ListOrganizationalUnitsForParentInput struct { // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string // requires one of the following: // - // * Root - A string that begins with "r-" - // followed by from 4 to 32 lowercase letters or digits. + // * Root - A string that begins with "r-" followed + // by from 4 to 32 lowercase letters or digits. // - // * Organizational unit - // (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase - // letters or digits (the ID of the root that the OU is in). This string is - // followed by a second "-" dash and from 8 to 32 additional lowercase letters or - // digits. + // * Organizational unit (OU) - A + // string that begins with "ou-" followed by from 4 to 32 lowercase letters or + // digits (the ID of the root that the OU is in). This string is followed by a + // second "-" dash and from 8 to 32 additional lowercase letters or digits. // // This member is required. ParentId *string diff --git a/service/organizations/api_op_ListParents.go b/service/organizations/api_op_ListParents.go index a292d10ac9d..941876e9736 100644 --- a/service/organizations/api_op_ListParents.go +++ b/service/organizations/api_op_ListParents.go @@ -43,9 +43,9 @@ type ListParentsInput struct { // (http://wikipedia.org/wiki/regex) for a child ID string requires one of the // following: // - // * Account - A string that consists of exactly 12 digits. + // * Account - A string that consists of exactly 12 digits. // - // * + // * // Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to // 32 lowercase letters or digits (the ID of the root that contains the OU). This // string is followed by a second "-" dash and from 8 to 32 additional lowercase diff --git a/service/organizations/api_op_ListPolicies.go b/service/organizations/api_op_ListPolicies.go index 447b030f2b2..0a3c6c85b52 100644 --- a/service/organizations/api_op_ListPolicies.go +++ b/service/organizations/api_op_ListPolicies.go @@ -38,19 +38,19 @@ type ListPoliciesInput struct { // Specifies the type of policy that you want to include in the response. You must // specify one of the following values: // - // * AISERVICES_OPT_OUT_POLICY + // * AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // - // - // * BACKUP_POLICY + // * + // BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // - // - // * SERVICE_CONTROL_POLICY + // * + // SERVICE_CONTROL_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // - // - // * TAG_POLICY + // * + // TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This member is required. diff --git a/service/organizations/api_op_ListPoliciesForTarget.go b/service/organizations/api_op_ListPoliciesForTarget.go index 8696bf2ad31..2bdeae1c3f9 100644 --- a/service/organizations/api_op_ListPoliciesForTarget.go +++ b/service/organizations/api_op_ListPoliciesForTarget.go @@ -40,19 +40,19 @@ type ListPoliciesForTargetInput struct { // The type of policy that you want to include in the returned list. You must // specify one of the following values: // - // * AISERVICES_OPT_OUT_POLICY + // * AISERVICES_OPT_OUT_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // - // - // * BACKUP_POLICY + // * + // BACKUP_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // - // - // * SERVICE_CONTROL_POLICY + // * + // SERVICE_CONTROL_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) // - // - // * TAG_POLICY + // * + // TAG_POLICY // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This member is required. @@ -62,14 +62,14 @@ type ListPoliciesForTargetInput struct { // policies you want to list. The regex pattern (http://wikipedia.org/wiki/regex) // for a target ID string requires one of the following: // - // * Root - A string - // that begins with "r-" followed by from 4 to 32 lowercase letters or digits. - // + // * Root - A string that + // begins with "r-" followed by from 4 to 32 lowercase letters or digits. // - // * Account - A string that consists of exactly 12 digits. + // * + // Account - A string that consists of exactly 12 digits. // - // * Organizational - // unit (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase + // * Organizational unit + // (OU) - A string that begins with "ou-" followed by from 4 to 32 lowercase // letters or digits (the ID of the root that the OU is in). This string is // followed by a second "-" dash and from 8 to 32 additional lowercase letters or // digits. diff --git a/service/organizations/api_op_ListTagsForResource.go b/service/organizations/api_op_ListTagsForResource.go index c4ec6a0e98d..307d274064d 100644 --- a/service/organizations/api_op_ListTagsForResource.go +++ b/service/organizations/api_op_ListTagsForResource.go @@ -14,18 +14,18 @@ import ( // Lists tags that are attached to the specified resource. You can attach tags to // the following resources in AWS Organizations. // -// * AWS account +// * AWS account // -// * -// Organization root +// * Organization +// root // -// * Organizational unit (OU) +// * Organizational unit (OU) // -// * Policy (any type) +// * Policy (any type) // -// This -// operation can be called only from the organization's management account or by a -// member account that is a delegated administrator for an AWS service. +// This operation can be +// called only from the organization's management account or by a member account +// that is a delegated administrator for an AWS service. func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { if params == nil { params = &ListTagsForResourceInput{} @@ -46,17 +46,17 @@ type ListTagsForResourceInput struct { // The ID of the resource with the tags to list. You can specify any of the // following taggable resources. // - // * AWS account – specify the account ID - // number. + // * AWS account – specify the account ID number. // - // * Organizational unit – specify the OU ID that begins with ou- and - // looks similar to: ou-1a2b-34uvwxyz + // * + // Organizational unit – specify the OU ID that begins with ou- and looks similar + // to: ou-1a2b-34uvwxyz // - // * Root – specify the root ID that begins - // with r- and looks similar to: r-1a2b + // * Root – specify the root ID that begins with r- and looks + // similar to: r-1a2b // - // * Policy – specify the policy ID that - // begins with p- andlooks similar to: p-12abcdefg3 + // * Policy – specify the policy ID that begins with p- + // andlooks similar to: p-12abcdefg3 // // This member is required. ResourceId *string diff --git a/service/organizations/api_op_MoveAccount.go b/service/organizations/api_op_MoveAccount.go index 8d0516b1175..09bd87ae4dd 100644 --- a/service/organizations/api_op_MoveAccount.go +++ b/service/organizations/api_op_MoveAccount.go @@ -41,10 +41,10 @@ type MoveAccountInput struct { // move the account to. The regex pattern (http://wikipedia.org/wiki/regex) for a // parent ID string requires one of the following: // - // * Root - A string that - // begins with "r-" followed by from 4 to 32 lowercase letters or digits. + // * Root - A string that begins + // with "r-" followed by from 4 to 32 lowercase letters or digits. // - // * + // * // Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to // 32 lowercase letters or digits (the ID of the root that the OU is in). This // string is followed by a second "-" dash and from 8 to 32 additional lowercase @@ -57,10 +57,10 @@ type MoveAccountInput struct { // move the account from. The regex pattern (http://wikipedia.org/wiki/regex) for a // parent ID string requires one of the following: // - // * Root - A string that - // begins with "r-" followed by from 4 to 32 lowercase letters or digits. + // * Root - A string that begins + // with "r-" followed by from 4 to 32 lowercase letters or digits. // - // * + // * // Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to // 32 lowercase letters or digits (the ID of the root that the OU is in). This // string is followed by a second "-" dash and from 8 to 32 additional lowercase diff --git a/service/organizations/api_op_RemoveAccountFromOrganization.go b/service/organizations/api_op_RemoveAccountFromOrganization.go index 827e8452272..4769c8adbd8 100644 --- a/service/organizations/api_op_RemoveAccountFromOrganization.go +++ b/service/organizations/api_op_RemoveAccountFromOrganization.go @@ -18,7 +18,7 @@ import ( // can be called only from the organization's management account. Member accounts // can remove themselves with LeaveOrganization instead. // -// * You can remove an +// * You can remove an // account from your organization only if the account is configured with the // information required to operate as a standalone account. When you create an // account in an organization using the AWS Organizations console, API, or CLI @@ -33,7 +33,7 @@ import ( // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * After the account leaves the +// * After the account leaves the // organization, all tags that were attached to the account object in the // organization are deleted. AWS accounts outside of an organization do not support // tags. diff --git a/service/organizations/api_op_TagResource.go b/service/organizations/api_op_TagResource.go index 23d993738e6..b298e13448a 100644 --- a/service/organizations/api_op_TagResource.go +++ b/service/organizations/api_op_TagResource.go @@ -14,17 +14,17 @@ import ( // Adds one or more tags to the specified resource. Currently, you can attach tags // to the following resources in AWS Organizations. // -// * AWS account +// * AWS account // -// * -// Organization root +// * Organization +// root // -// * Organizational unit (OU) +// * Organizational unit (OU) // -// * Policy (any type) +// * Policy (any type) // -// This -// operation can be called only from the organization's management account. +// This operation can be +// called only from the organization's management account. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} @@ -50,23 +50,23 @@ type TagResourceInput struct { // A list of tags to add to the specified resource. You can specify any of the // following taggable resources. // - // * AWS account – specify the account ID - // number. + // * AWS account – specify the account ID number. // - // * Organizational unit – specify the OU ID that begins with ou- and - // looks similar to: ou-1a2b-34uvwxyz + // * + // Organizational unit – specify the OU ID that begins with ou- and looks similar + // to: ou-1a2b-34uvwxyz // - // * Root – specify the root ID that begins - // with r- and looks similar to: r-1a2b + // * Root – specify the root ID that begins with r- and looks + // similar to: r-1a2b // - // * Policy – specify the policy ID that - // begins with p- andlooks similar to: p-12abcdefg3 + // * Policy – specify the policy ID that begins with p- + // andlooks similar to: p-12abcdefg3 // - // For each tag in the list, you - // must specify both a tag key and a value. You can set the value to an empty - // string, but you can't set it to null. If any one of the tags is invalid or if - // you exceed the allowed number of tags for an account user, then the entire - // request fails and the account is not created. + // For each tag in the list, you must specify + // both a tag key and a value. You can set the value to an empty string, but you + // can't set it to null. If any one of the tags is invalid or if you exceed the + // allowed number of tags for an account user, then the entire request fails and + // the account is not created. // // This member is required. Tags []*types.Tag diff --git a/service/organizations/api_op_UntagResource.go b/service/organizations/api_op_UntagResource.go index 89647a5442b..77c702c8807 100644 --- a/service/organizations/api_op_UntagResource.go +++ b/service/organizations/api_op_UntagResource.go @@ -13,18 +13,17 @@ import ( // Removes any tags with the specified keys from the specified resource. You can // attach tags to the following resources in AWS Organizations. // -// * AWS -// account +// * AWS account // -// * Organization root +// * +// Organization root // -// * Organizational unit (OU) +// * Organizational unit (OU) // -// * Policy -// (any type) +// * Policy (any type) // -// This operation can be called only from the organization's management -// account. +// This +// operation can be called only from the organization's management account. func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} @@ -45,17 +44,17 @@ type UntagResourceInput struct { // The ID of the resource to remove a tag from. You can specify any of the // following taggable resources. // - // * AWS account – specify the account ID - // number. + // * AWS account – specify the account ID number. // - // * Organizational unit – specify the OU ID that begins with ou- and - // looks similar to: ou-1a2b-34uvwxyz + // * + // Organizational unit – specify the OU ID that begins with ou- and looks similar + // to: ou-1a2b-34uvwxyz // - // * Root – specify the root ID that begins - // with r- and looks similar to: r-1a2b + // * Root – specify the root ID that begins with r- and looks + // similar to: r-1a2b // - // * Policy – specify the policy ID that - // begins with p- andlooks similar to: p-12abcdefg3 + // * Policy – specify the policy ID that begins with p- + // andlooks similar to: p-12abcdefg3 // // This member is required. ResourceId *string diff --git a/service/organizations/types/enums.go b/service/organizations/types/enums.go index 66f8a831576..9aef68ec02d 100644 --- a/service/organizations/types/enums.go +++ b/service/organizations/types/enums.go @@ -6,7 +6,7 @@ type AccessDeniedForDependencyExceptionReason string // Enum values for AccessDeniedForDependencyExceptionReason const ( - AccessDeniedForDependencyExceptionReasonAccess_denied_during_create_service_linked_role AccessDeniedForDependencyExceptionReason = "ACCESS_DENIED_DURING_CREATE_SERVICE_LINKED_ROLE" + AccessDeniedForDependencyExceptionReasonAccessDeniedDuringCreateServiceLinkedRole AccessDeniedForDependencyExceptionReason = "ACCESS_DENIED_DURING_CREATE_SERVICE_LINKED_ROLE" ) // Values returns all known values for AccessDeniedForDependencyExceptionReason. @@ -59,10 +59,10 @@ type ActionType string // Enum values for ActionType const ( - ActionTypeInvite_account_to_organization ActionType = "INVITE" - ActionTypeEnable_all_features ActionType = "ENABLE_ALL_FEATURES" - ActionTypeApprove_all_features ActionType = "APPROVE_ALL_FEATURES" - ActionTypeAdd_organizations_service_linked_role ActionType = "ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE" + ActionTypeInviteAccountToOrganization ActionType = "INVITE" + ActionTypeEnableAllFeatures ActionType = "ENABLE_ALL_FEATURES" + ActionTypeApproveAllFeatures ActionType = "APPROVE_ALL_FEATURES" + ActionTypeAddOrganizationsServiceLinkedRole ActionType = "ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE" ) // Values returns all known values for ActionType. Note that this can be expanded @@ -81,8 +81,8 @@ type ChildType string // Enum values for ChildType const ( - ChildTypeAccount ChildType = "ACCOUNT" - ChildTypeOrganizational_unit ChildType = "ORGANIZATIONAL_UNIT" + ChildTypeAccount ChildType = "ACCOUNT" + ChildTypeOrganizationalUnit ChildType = "ORGANIZATIONAL_UNIT" ) // Values returns all known values for ChildType. Note that this can be expanded in @@ -99,34 +99,34 @@ type ConstraintViolationExceptionReason string // Enum values for ConstraintViolationExceptionReason const ( - ConstraintViolationExceptionReasonAccount_number_limit_exceeded ConstraintViolationExceptionReason = "ACCOUNT_NUMBER_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonHandshake_rate_limit_exceeded ConstraintViolationExceptionReason = "HANDSHAKE_RATE_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonOu_number_limit_exceeded ConstraintViolationExceptionReason = "OU_NUMBER_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonOu_depth_limit_exceeded ConstraintViolationExceptionReason = "OU_DEPTH_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonPolicy_number_limit_exceeded ConstraintViolationExceptionReason = "POLICY_NUMBER_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonPolicy_content_limit_exceeded ConstraintViolationExceptionReason = "POLICY_CONTENT_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonMax_policy_type_attachment_limit_exceeded ConstraintViolationExceptionReason = "MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonMin_policy_type_attachment_limit_exceeded ConstraintViolationExceptionReason = "MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonAccount_cannot_leave_organization ConstraintViolationExceptionReason = "ACCOUNT_CANNOT_LEAVE_ORGANIZATION" - ConstraintViolationExceptionReasonAccount_cannot_leave_without_eula ConstraintViolationExceptionReason = "ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA" - ConstraintViolationExceptionReasonAccount_cannot_leave_without_phone_verification ConstraintViolationExceptionReason = "ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION" - ConstraintViolationExceptionReasonMaster_account_payment_instrument_required ConstraintViolationExceptionReason = "MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED" - ConstraintViolationExceptionReasonMember_account_payment_instrument_required ConstraintViolationExceptionReason = "MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED" - ConstraintViolationExceptionReasonAccount_creation_rate_limit_exceeded ConstraintViolationExceptionReason = "ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonMaster_account_address_does_not_match_marketplace ConstraintViolationExceptionReason = "MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE" - ConstraintViolationExceptionReasonMaster_account_missing_contact_info ConstraintViolationExceptionReason = "MASTER_ACCOUNT_MISSING_CONTACT_INFO" - ConstraintViolationExceptionReasonMaster_account_not_govcloud_enabled ConstraintViolationExceptionReason = "MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED" - ConstraintViolationExceptionReasonOrganization_not_in_all_features_mode ConstraintViolationExceptionReason = "ORGANIZATION_NOT_IN_ALL_FEATURES_MODE" - ConstraintViolationExceptionReasonCreate_organization_in_billing_mode_unsupported_region ConstraintViolationExceptionReason = "CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION" - ConstraintViolationExceptionReasonEmail_verification_code_expired ConstraintViolationExceptionReason = "EMAIL_VERIFICATION_CODE_EXPIRED" - ConstraintViolationExceptionReasonWait_period_active ConstraintViolationExceptionReason = "WAIT_PERIOD_ACTIVE" - ConstraintViolationExceptionReasonMax_tag_limit_exceeded ConstraintViolationExceptionReason = "MAX_TAG_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonTag_policy_violation ConstraintViolationExceptionReason = "TAG_POLICY_VIOLATION" - ConstraintViolationExceptionReasonMax_delegated_administrators_for_service_limit_exceeded ConstraintViolationExceptionReason = "MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED" - ConstraintViolationExceptionReasonCannot_register_master_as_delegated_administrator ConstraintViolationExceptionReason = "CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR" - ConstraintViolationExceptionReasonCannot_remove_delegated_administrator_from_org ConstraintViolationExceptionReason = "CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG" - ConstraintViolationExceptionReasonDelegated_administrator_exists_for_this_service ConstraintViolationExceptionReason = "DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE" - ConstraintViolationExceptionReasonMaster_account_missing_business_license ConstraintViolationExceptionReason = "MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE" + ConstraintViolationExceptionReasonAccountNumberLimitExceeded ConstraintViolationExceptionReason = "ACCOUNT_NUMBER_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonHandshakeRateLimitExceeded ConstraintViolationExceptionReason = "HANDSHAKE_RATE_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonOuNumberLimitExceeded ConstraintViolationExceptionReason = "OU_NUMBER_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonOuDepthLimitExceeded ConstraintViolationExceptionReason = "OU_DEPTH_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonPolicyNumberLimitExceeded ConstraintViolationExceptionReason = "POLICY_NUMBER_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonPolicyContentLimitExceeded ConstraintViolationExceptionReason = "POLICY_CONTENT_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonMaxPolicyTypeAttachmentLimitExceeded ConstraintViolationExceptionReason = "MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonMinPolicyTypeAttachmentLimitExceeded ConstraintViolationExceptionReason = "MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonAccountCannotLeaveOrganization ConstraintViolationExceptionReason = "ACCOUNT_CANNOT_LEAVE_ORGANIZATION" + ConstraintViolationExceptionReasonAccountCannotLeaveWithoutEula ConstraintViolationExceptionReason = "ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA" + ConstraintViolationExceptionReasonAccountCannotLeaveWithoutPhoneVerification ConstraintViolationExceptionReason = "ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION" + ConstraintViolationExceptionReasonMasterAccountPaymentInstrumentRequired ConstraintViolationExceptionReason = "MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED" + ConstraintViolationExceptionReasonMemberAccountPaymentInstrumentRequired ConstraintViolationExceptionReason = "MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED" + ConstraintViolationExceptionReasonAccountCreationRateLimitExceeded ConstraintViolationExceptionReason = "ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonMasterAccountAddressDoesNotMatchMarketplace ConstraintViolationExceptionReason = "MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE" + ConstraintViolationExceptionReasonMasterAccountMissingContactInfo ConstraintViolationExceptionReason = "MASTER_ACCOUNT_MISSING_CONTACT_INFO" + ConstraintViolationExceptionReasonMasterAccountNotGovcloudEnabled ConstraintViolationExceptionReason = "MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED" + ConstraintViolationExceptionReasonOrganizationNotInAllFeaturesMode ConstraintViolationExceptionReason = "ORGANIZATION_NOT_IN_ALL_FEATURES_MODE" + ConstraintViolationExceptionReasonCreateOrganizationInBillingModeUnsupportedRegion ConstraintViolationExceptionReason = "CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION" + ConstraintViolationExceptionReasonEmailVerificationCodeExpired ConstraintViolationExceptionReason = "EMAIL_VERIFICATION_CODE_EXPIRED" + ConstraintViolationExceptionReasonWaitPeriodActive ConstraintViolationExceptionReason = "WAIT_PERIOD_ACTIVE" + ConstraintViolationExceptionReasonMaxTagLimitExceeded ConstraintViolationExceptionReason = "MAX_TAG_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonTagPolicyViolation ConstraintViolationExceptionReason = "TAG_POLICY_VIOLATION" + ConstraintViolationExceptionReasonMaxDelegatedAdministratorsForServiceLimitExceeded ConstraintViolationExceptionReason = "MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED" + ConstraintViolationExceptionReasonCannotRegisterMasterAsDelegatedAdministrator ConstraintViolationExceptionReason = "CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR" + ConstraintViolationExceptionReasonCannotRemoveDelegatedAdministratorFromOrg ConstraintViolationExceptionReason = "CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG" + ConstraintViolationExceptionReasonDelegatedAdministratorExistsForThisService ConstraintViolationExceptionReason = "DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE" + ConstraintViolationExceptionReasonMasterAccountMissingBusinessLicense ConstraintViolationExceptionReason = "MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE" ) // Values returns all known values for ConstraintViolationExceptionReason. Note @@ -170,15 +170,15 @@ type CreateAccountFailureReason string // Enum values for CreateAccountFailureReason const ( - CreateAccountFailureReasonAccount_limit_exceeded CreateAccountFailureReason = "ACCOUNT_LIMIT_EXCEEDED" - CreateAccountFailureReasonEmail_already_exists CreateAccountFailureReason = "EMAIL_ALREADY_EXISTS" - CreateAccountFailureReasonInvalid_address CreateAccountFailureReason = "INVALID_ADDRESS" - CreateAccountFailureReasonInvalid_email CreateAccountFailureReason = "INVALID_EMAIL" - CreateAccountFailureReasonConcurrent_account_modification CreateAccountFailureReason = "CONCURRENT_ACCOUNT_MODIFICATION" - CreateAccountFailureReasonInternal_failure CreateAccountFailureReason = "INTERNAL_FAILURE" - CreateAccountFailureReasonGovcloud_account_already_exists CreateAccountFailureReason = "GOVCLOUD_ACCOUNT_ALREADY_EXISTS" - CreateAccountFailureReasonMissing_business_validation CreateAccountFailureReason = "MISSING_BUSINESS_VALIDATION" - CreateAccountFailureReasonMissing_payment_instrument CreateAccountFailureReason = "MISSING_PAYMENT_INSTRUMENT" + CreateAccountFailureReasonAccountLimitExceeded CreateAccountFailureReason = "ACCOUNT_LIMIT_EXCEEDED" + CreateAccountFailureReasonEmailAlreadyExists CreateAccountFailureReason = "EMAIL_ALREADY_EXISTS" + CreateAccountFailureReasonInvalidAddress CreateAccountFailureReason = "INVALID_ADDRESS" + CreateAccountFailureReasonInvalidEmail CreateAccountFailureReason = "INVALID_EMAIL" + CreateAccountFailureReasonConcurrentAccountModification CreateAccountFailureReason = "CONCURRENT_ACCOUNT_MODIFICATION" + CreateAccountFailureReasonInternalFailure CreateAccountFailureReason = "INTERNAL_FAILURE" + CreateAccountFailureReasonGovcloudAccountAlreadyExists CreateAccountFailureReason = "GOVCLOUD_ACCOUNT_ALREADY_EXISTS" + CreateAccountFailureReasonMissingBusinessValidation CreateAccountFailureReason = "MISSING_BUSINESS_VALIDATION" + CreateAccountFailureReasonMissingPaymentInstrument CreateAccountFailureReason = "MISSING_PAYMENT_INSTRUMENT" ) // Values returns all known values for CreateAccountFailureReason. Note that this @@ -202,9 +202,9 @@ type CreateAccountState string // Enum values for CreateAccountState const ( - CreateAccountStateIn_progress CreateAccountState = "IN_PROGRESS" - CreateAccountStateSucceeded CreateAccountState = "SUCCEEDED" - CreateAccountStateFailed CreateAccountState = "FAILED" + CreateAccountStateInProgress CreateAccountState = "IN_PROGRESS" + CreateAccountStateSucceeded CreateAccountState = "SUCCEEDED" + CreateAccountStateFailed CreateAccountState = "FAILED" ) // Values returns all known values for CreateAccountState. Note that this can be @@ -222,9 +222,9 @@ type EffectivePolicyType string // Enum values for EffectivePolicyType const ( - EffectivePolicyTypeTag_policy EffectivePolicyType = "TAG_POLICY" - EffectivePolicyTypeBackup_policy EffectivePolicyType = "BACKUP_POLICY" - EffectivePolicyTypeAiservices_opt_out_policy EffectivePolicyType = "AISERVICES_OPT_OUT_POLICY" + EffectivePolicyTypeTagPolicy EffectivePolicyType = "TAG_POLICY" + EffectivePolicyTypeBackupPolicy EffectivePolicyType = "BACKUP_POLICY" + EffectivePolicyTypeAiservicesOptOutPolicy EffectivePolicyType = "AISERVICES_OPT_OUT_POLICY" ) // Values returns all known values for EffectivePolicyType. Note that this can be @@ -242,14 +242,14 @@ type HandshakeConstraintViolationExceptionReason string // Enum values for HandshakeConstraintViolationExceptionReason const ( - HandshakeConstraintViolationExceptionReasonAccount_number_limit_exceeded HandshakeConstraintViolationExceptionReason = "ACCOUNT_NUMBER_LIMIT_EXCEEDED" - HandshakeConstraintViolationExceptionReasonHandshake_rate_limit_exceeded HandshakeConstraintViolationExceptionReason = "HANDSHAKE_RATE_LIMIT_EXCEEDED" - HandshakeConstraintViolationExceptionReasonAlready_in_an_organization HandshakeConstraintViolationExceptionReason = "ALREADY_IN_AN_ORGANIZATION" - HandshakeConstraintViolationExceptionReasonOrganization_already_has_all_features HandshakeConstraintViolationExceptionReason = "ORGANIZATION_ALREADY_HAS_ALL_FEATURES" - HandshakeConstraintViolationExceptionReasonInvite_disabled_during_enable_all_features HandshakeConstraintViolationExceptionReason = "INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES" - HandshakeConstraintViolationExceptionReasonPayment_instrument_required HandshakeConstraintViolationExceptionReason = "PAYMENT_INSTRUMENT_REQUIRED" - HandshakeConstraintViolationExceptionReasonOrganization_from_different_seller_of_record HandshakeConstraintViolationExceptionReason = "ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD" - HandshakeConstraintViolationExceptionReasonOrganization_membership_change_rate_limit_exceeded HandshakeConstraintViolationExceptionReason = "ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED" + HandshakeConstraintViolationExceptionReasonAccountNumberLimitExceeded HandshakeConstraintViolationExceptionReason = "ACCOUNT_NUMBER_LIMIT_EXCEEDED" + HandshakeConstraintViolationExceptionReasonHandshakeRateLimitExceeded HandshakeConstraintViolationExceptionReason = "HANDSHAKE_RATE_LIMIT_EXCEEDED" + HandshakeConstraintViolationExceptionReasonAlreadyInAnOrganization HandshakeConstraintViolationExceptionReason = "ALREADY_IN_AN_ORGANIZATION" + HandshakeConstraintViolationExceptionReasonOrganizationAlreadyHasAllFeatures HandshakeConstraintViolationExceptionReason = "ORGANIZATION_ALREADY_HAS_ALL_FEATURES" + HandshakeConstraintViolationExceptionReasonInviteDisabledDuringEnableAllFeatures HandshakeConstraintViolationExceptionReason = "INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES" + HandshakeConstraintViolationExceptionReasonPaymentInstrumentRequired HandshakeConstraintViolationExceptionReason = "PAYMENT_INSTRUMENT_REQUIRED" + HandshakeConstraintViolationExceptionReasonOrganizationFromDifferentSellerOfRecord HandshakeConstraintViolationExceptionReason = "ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD" + HandshakeConstraintViolationExceptionReasonOrganizationMembershipChangeRateLimitExceeded HandshakeConstraintViolationExceptionReason = "ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED" ) // Values returns all known values for HandshakeConstraintViolationExceptionReason. @@ -293,14 +293,14 @@ type HandshakeResourceType string // Enum values for HandshakeResourceType const ( - HandshakeResourceTypeAccount HandshakeResourceType = "ACCOUNT" - HandshakeResourceTypeOrganization HandshakeResourceType = "ORGANIZATION" - HandshakeResourceTypeOrganization_feature_set HandshakeResourceType = "ORGANIZATION_FEATURE_SET" - HandshakeResourceTypeEmail HandshakeResourceType = "EMAIL" - HandshakeResourceTypeMaster_email HandshakeResourceType = "MASTER_EMAIL" - HandshakeResourceTypeMaster_name HandshakeResourceType = "MASTER_NAME" - HandshakeResourceTypeNotes HandshakeResourceType = "NOTES" - HandshakeResourceTypeParent_handshake HandshakeResourceType = "PARENT_HANDSHAKE" + HandshakeResourceTypeAccount HandshakeResourceType = "ACCOUNT" + HandshakeResourceTypeOrganization HandshakeResourceType = "ORGANIZATION" + HandshakeResourceTypeOrganizationFeatureSet HandshakeResourceType = "ORGANIZATION_FEATURE_SET" + HandshakeResourceTypeEmail HandshakeResourceType = "EMAIL" + HandshakeResourceTypeMasterEmail HandshakeResourceType = "MASTER_EMAIL" + HandshakeResourceTypeMasterName HandshakeResourceType = "MASTER_NAME" + HandshakeResourceTypeNotes HandshakeResourceType = "NOTES" + HandshakeResourceTypeParentHandshake HandshakeResourceType = "PARENT_HANDSHAKE" ) // Values returns all known values for HandshakeResourceType. Note that this can be @@ -367,29 +367,29 @@ type InvalidInputExceptionReason string // Enum values for InvalidInputExceptionReason const ( - InvalidInputExceptionReasonInvalid_party_type_target InvalidInputExceptionReason = "INVALID_PARTY_TYPE_TARGET" - InvalidInputExceptionReasonInvalid_syntax_organization InvalidInputExceptionReason = "INVALID_SYNTAX_ORGANIZATION_ARN" - InvalidInputExceptionReasonInvalid_syntax_policy InvalidInputExceptionReason = "INVALID_SYNTAX_POLICY_ID" - InvalidInputExceptionReasonInvalid_enum InvalidInputExceptionReason = "INVALID_ENUM" - InvalidInputExceptionReasonInvalid_enum_policy_type InvalidInputExceptionReason = "INVALID_ENUM_POLICY_TYPE" - InvalidInputExceptionReasonInvalid_list_member InvalidInputExceptionReason = "INVALID_LIST_MEMBER" - InvalidInputExceptionReasonMax_length_exceeded InvalidInputExceptionReason = "MAX_LENGTH_EXCEEDED" - InvalidInputExceptionReasonMax_value_exceeded InvalidInputExceptionReason = "MAX_VALUE_EXCEEDED" - InvalidInputExceptionReasonMin_length_exceeded InvalidInputExceptionReason = "MIN_LENGTH_EXCEEDED" - InvalidInputExceptionReasonMin_value_exceeded InvalidInputExceptionReason = "MIN_VALUE_EXCEEDED" - InvalidInputExceptionReasonImmutable_policy InvalidInputExceptionReason = "IMMUTABLE_POLICY" - InvalidInputExceptionReasonInvalid_pattern InvalidInputExceptionReason = "INVALID_PATTERN" - InvalidInputExceptionReasonInvalid_pattern_target_id InvalidInputExceptionReason = "INVALID_PATTERN_TARGET_ID" - InvalidInputExceptionReasonInput_required InvalidInputExceptionReason = "INPUT_REQUIRED" - InvalidInputExceptionReasonInvalid_pagination_token InvalidInputExceptionReason = "INVALID_NEXT_TOKEN" - InvalidInputExceptionReasonMax_filter_limit_exceeded InvalidInputExceptionReason = "MAX_LIMIT_EXCEEDED_FILTER" - InvalidInputExceptionReasonMoving_account_between_different_roots InvalidInputExceptionReason = "MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS" - InvalidInputExceptionReasonInvalid_full_name_target InvalidInputExceptionReason = "INVALID_FULL_NAME_TARGET" - InvalidInputExceptionReasonUnrecognized_service_principal InvalidInputExceptionReason = "UNRECOGNIZED_SERVICE_PRINCIPAL" - InvalidInputExceptionReasonInvalid_role_name InvalidInputExceptionReason = "INVALID_ROLE_NAME" - InvalidInputExceptionReasonInvalid_system_tags_parameter InvalidInputExceptionReason = "INVALID_SYSTEM_TAGS_PARAMETER" - InvalidInputExceptionReasonDuplicate_tag_key InvalidInputExceptionReason = "DUPLICATE_TAG_KEY" - InvalidInputExceptionReasonTarget_not_supported InvalidInputExceptionReason = "TARGET_NOT_SUPPORTED" + InvalidInputExceptionReasonInvalidPartyTypeTarget InvalidInputExceptionReason = "INVALID_PARTY_TYPE_TARGET" + InvalidInputExceptionReasonInvalidSyntaxOrganization InvalidInputExceptionReason = "INVALID_SYNTAX_ORGANIZATION_ARN" + InvalidInputExceptionReasonInvalidSyntaxPolicy InvalidInputExceptionReason = "INVALID_SYNTAX_POLICY_ID" + InvalidInputExceptionReasonInvalidEnum InvalidInputExceptionReason = "INVALID_ENUM" + InvalidInputExceptionReasonInvalidEnumPolicyType InvalidInputExceptionReason = "INVALID_ENUM_POLICY_TYPE" + InvalidInputExceptionReasonInvalidListMember InvalidInputExceptionReason = "INVALID_LIST_MEMBER" + InvalidInputExceptionReasonMaxLengthExceeded InvalidInputExceptionReason = "MAX_LENGTH_EXCEEDED" + InvalidInputExceptionReasonMaxValueExceeded InvalidInputExceptionReason = "MAX_VALUE_EXCEEDED" + InvalidInputExceptionReasonMinLengthExceeded InvalidInputExceptionReason = "MIN_LENGTH_EXCEEDED" + InvalidInputExceptionReasonMinValueExceeded InvalidInputExceptionReason = "MIN_VALUE_EXCEEDED" + InvalidInputExceptionReasonImmutablePolicy InvalidInputExceptionReason = "IMMUTABLE_POLICY" + InvalidInputExceptionReasonInvalidPattern InvalidInputExceptionReason = "INVALID_PATTERN" + InvalidInputExceptionReasonInvalidPatternTargetId InvalidInputExceptionReason = "INVALID_PATTERN_TARGET_ID" + InvalidInputExceptionReasonInputRequired InvalidInputExceptionReason = "INPUT_REQUIRED" + InvalidInputExceptionReasonInvalidPaginationToken InvalidInputExceptionReason = "INVALID_NEXT_TOKEN" + InvalidInputExceptionReasonMaxFilterLimitExceeded InvalidInputExceptionReason = "MAX_LIMIT_EXCEEDED_FILTER" + InvalidInputExceptionReasonMovingAccountBetweenDifferentRoots InvalidInputExceptionReason = "MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS" + InvalidInputExceptionReasonInvalidFullNameTarget InvalidInputExceptionReason = "INVALID_FULL_NAME_TARGET" + InvalidInputExceptionReasonUnrecognizedServicePrincipal InvalidInputExceptionReason = "UNRECOGNIZED_SERVICE_PRINCIPAL" + InvalidInputExceptionReasonInvalidRoleName InvalidInputExceptionReason = "INVALID_ROLE_NAME" + InvalidInputExceptionReasonInvalidSystemTagsParameter InvalidInputExceptionReason = "INVALID_SYSTEM_TAGS_PARAMETER" + InvalidInputExceptionReasonDuplicateTagKey InvalidInputExceptionReason = "DUPLICATE_TAG_KEY" + InvalidInputExceptionReasonTargetNotSupported InvalidInputExceptionReason = "TARGET_NOT_SUPPORTED" ) // Values returns all known values for InvalidInputExceptionReason. Note that this @@ -427,8 +427,8 @@ type OrganizationFeatureSet string // Enum values for OrganizationFeatureSet const ( - OrganizationFeatureSetAll OrganizationFeatureSet = "ALL" - OrganizationFeatureSetConsolidated_billing OrganizationFeatureSet = "CONSOLIDATED_BILLING" + OrganizationFeatureSetAll OrganizationFeatureSet = "ALL" + OrganizationFeatureSetConsolidatedBilling OrganizationFeatureSet = "CONSOLIDATED_BILLING" ) // Values returns all known values for OrganizationFeatureSet. Note that this can @@ -445,8 +445,8 @@ type ParentType string // Enum values for ParentType const ( - ParentTypeRoot ParentType = "ROOT" - ParentTypeOrganizational_unit ParentType = "ORGANIZATIONAL_UNIT" + ParentTypeRoot ParentType = "ROOT" + ParentTypeOrganizationalUnit ParentType = "ORGANIZATIONAL_UNIT" ) // Values returns all known values for ParentType. Note that this can be expanded @@ -463,10 +463,10 @@ type PolicyType string // Enum values for PolicyType const ( - PolicyTypeService_control_policy PolicyType = "SERVICE_CONTROL_POLICY" - PolicyTypeTag_policy PolicyType = "TAG_POLICY" - PolicyTypeBackup_policy PolicyType = "BACKUP_POLICY" - PolicyTypeAiservices_opt_out_policy PolicyType = "AISERVICES_OPT_OUT_POLICY" + PolicyTypeServiceControlPolicy PolicyType = "SERVICE_CONTROL_POLICY" + PolicyTypeTagPolicy PolicyType = "TAG_POLICY" + PolicyTypeBackupPolicy PolicyType = "BACKUP_POLICY" + PolicyTypeAiservicesOptOutPolicy PolicyType = "AISERVICES_OPT_OUT_POLICY" ) // Values returns all known values for PolicyType. Note that this can be expanded @@ -485,9 +485,9 @@ type PolicyTypeStatus string // Enum values for PolicyTypeStatus const ( - PolicyTypeStatusEnabled PolicyTypeStatus = "ENABLED" - PolicyTypeStatusPending_enable PolicyTypeStatus = "PENDING_ENABLE" - PolicyTypeStatusPending_disable PolicyTypeStatus = "PENDING_DISABLE" + PolicyTypeStatusEnabled PolicyTypeStatus = "ENABLED" + PolicyTypeStatusPendingEnable PolicyTypeStatus = "PENDING_ENABLE" + PolicyTypeStatusPendingDisable PolicyTypeStatus = "PENDING_DISABLE" ) // Values returns all known values for PolicyTypeStatus. Note that this can be @@ -505,9 +505,9 @@ type TargetType string // Enum values for TargetType const ( - TargetTypeAccount TargetType = "ACCOUNT" - TargetTypeOrganizational_unit TargetType = "ORGANIZATIONAL_UNIT" - TargetTypeRoot TargetType = "ROOT" + TargetTypeAccount TargetType = "ACCOUNT" + TargetTypeOrganizationalUnit TargetType = "ORGANIZATIONAL_UNIT" + TargetTypeRoot TargetType = "ROOT" ) // Values returns all known values for TargetType. Note that this can be expanded diff --git a/service/organizations/types/errors.go b/service/organizations/types/errors.go index 2ba005ac7f9..7683d5b0472 100644 --- a/service/organizations/types/errors.go +++ b/service/organizations/types/errors.go @@ -216,20 +216,19 @@ func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault { retur // the reasons in the following list might not be applicable to this specific API // or operation. // -// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove -// the management account from the organization. You can't remove the management +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the +// management account from the organization. You can't remove the management // account. Instead, after you remove all member accounts, delete the organization // itself. // -// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an -// account from the organization that doesn't yet have enough information to exist -// as a standalone account. This account requires you to first agree to the AWS -// Customer Agreement. Follow the steps at Removing a member account from your -// organization +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist as a +// standalone account. This account requires you to first agree to the AWS Customer +// Agreement. Follow the steps at Removing a member account from your organization // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in // the AWS Organizations User Guide. // -// * +// * // ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove an // account from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first complete phone @@ -238,53 +237,51 @@ func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault { retur // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // -// * -// ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// accounts that you can create in one day. +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: +// You attempted to exceed the number of accounts that you can create in one +// day. // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: -// You attempted to exceed the limit on the number of accounts in an organization. -// If you need more accounts, contact AWS Support -// (https://console.aws.amazon.com/support/home#/) to request an increase in your -// limit. Or the number of invitations that you tried to send would cause you to -// exceed the limit of accounts in your organization. Send fewer invitations or -// contact AWS Support to request an increase in the number of accounts. Deleted +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on the +// number of accounts in an organization. If you need more accounts, contact AWS +// Support (https://console.aws.amazon.com/support/home#/) to request an increase +// in your limit. Or the number of invitations that you tried to send would cause +// you to exceed the limit of accounts in your organization. Send fewer invitations +// or contact AWS Support to request an increase in the number of accounts. Deleted // and closed accounts still count toward your limit. If you get this exception // when running a command immediately after creating the organization, wait one // hour and try again. After an hour, if the command continues to fail with this // error, contact AWS Support (https://console.aws.amazon.com/support/home#/). // +// * +// CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register the +// management account of the organization as a delegated administrator for an AWS +// service integrated with Organizations. You can designate only a member account +// as a delegated administrator. // -// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to register -// the management account of the organization as a delegated administrator for an -// AWS service integrated with Organizations. You can designate only a member -// account as a delegated administrator. -// -// * -// CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove an -// account that is registered as a delegated administrator for a service integrated -// with your organization. To complete this operation, you must first deregister -// this account as a delegated administrator. +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: +// You attempted to remove an account that is registered as a delegated +// administrator for a service integrated with your organization. To complete this +// operation, you must first deregister this account as a delegated +// administrator. // -// * -// CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an -// organization in the specified region, you must enable all features mode. +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To +// create an organization in the specified region, you must enable all features +// mode. // -// * -// DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register an -// AWS account as a delegated administrator for an AWS service that already has a -// delegated administrator. To complete this operation, you must first deregister -// any existing delegated administrators for this service. +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to +// register an AWS account as a delegated administrator for an AWS service that +// already has a delegated administrator. To complete this operation, you must +// first deregister any existing delegated administrators for this service. // -// * +// * // EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only valid for a // limited period of time. You must resubmit the request and generate a new // verfication code. // -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed -// the number of handshakes that you can send in one day. +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the +// number of handshakes that you can send in one day. // -// * +// * // MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account in this // organization, you first must migrate the organization's management account to // the marketplace that corresponds to the management account's address. For @@ -292,77 +289,77 @@ func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault { retur // marketplace. All accounts in an organization must be associated with the same // marketplace. // -// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the -// AWS Regions in China. To create an organization, the master must have an valid +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS +// Regions in China. To create an organization, the master must have an valid // business license. For more information, contact customer support. // -// * +// * // MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you must first // provide a valid contact address and phone number for the management account. // Then try the operation again. // -// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To // complete this operation, the management account must have an associated account // in the AWS GovCloud (US-West) Region. For more information, see AWS // Organizations // (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. // -// * -// MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization with this -// management account, you first must associate a valid payment instrument, such as -// a credit card, with the account. Follow the steps at To leave an organization -// when all required account information has not yet been provided +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: +// To create an organization with this management account, you first must associate +// a valid payment instrument, such as a credit card, with the account. Follow the +// steps at To leave an organization when all required account information has not +// yet been provided // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * +// * // MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted to -// register more delegated administrators than allowed for the service principal. -// -// -// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the number -// of policies of a certain type that can be attached to an entity at one time. +// register more delegated administrators than allowed for the service +// principal. // +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed +// the number of policies of a certain type that can be attached to an entity at +// one time. // -// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed on this -// resource. +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags +// allowed on this resource. // -// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this -// operation with this member account, you first must associate a valid payment -// instrument, such as a credit card, with the account. Follow the steps at To -// leave an organization when all required account information has not yet been -// provided +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To +// complete this operation with this member account, you first must associate a +// valid payment instrument, such as a credit card, with the account. Follow the +// steps at To leave an organization when all required account information has not +// yet been provided // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // -// * +// * // MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a policy from // an entity that would cause the entity to have fewer than the minimum number of // policies of a certain type required. // -// * -// ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation -// that requires the organization to be configured to support all features. An -// organization that supports only consolidated billing features can't perform this -// operation. +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: +// You attempted to perform an operation that requires the organization to be +// configured to support all features. An organization that supports only +// consolidated billing features can't perform this operation. // -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree -// that is too many levels deep. +// * +// OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is too many +// levels deep. // -// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to -// exceed the number of OUs that you can have in an organization. +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of +// OUs that you can have in an organization. // -// * -// POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that is larger -// than the maximum size. +// * POLICY_CONTENT_LIMIT_EXCEEDED: You +// attempted to create a policy that is larger than the maximum size. // -// * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to -// exceed the number of policies that you can have in an organization. +// * +// POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of policies +// that you can have in an organization. // -// * -// TAG_POLICY_VIOLATION: You attempted to create or update a resource with tags -// that are not compliant with the tag policy requirements for this account. +// * TAG_POLICY_VIOLATION: You attempted to +// create or update a resource with tags that are not compliant with the tag policy +// requirements for this account. type ConstraintViolationException struct { Message *string @@ -590,43 +587,42 @@ func (e *HandshakeAlreadyInStateException) ErrorFault() smithy.ErrorFault { retu // code. Some of the reasons in the following list might not be applicable to this // specific API or operation: // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted -// to exceed the limit on the number of accounts in an organization. Note that -// deleted and closed accounts still count toward your limit. If you get this -// exception immediately after creating the organization, wait one hour and try -// again. If after an hour it continues to fail with this error, contact AWS -// Support (https://console.aws.amazon.com/support/home#/). +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to +// exceed the limit on the number of accounts in an organization. Note that deleted +// and closed accounts still count toward your limit. If you get this exception +// immediately after creating the organization, wait one hour and try again. If +// after an hour it continues to fail with this error, contact AWS Support +// (https://console.aws.amazon.com/support/home#/). // -// * -// ALREADY_IN_AN_ORGANIZATION: The handshake request is invalid because the invited -// account is already a member of an organization. +// * ALREADY_IN_AN_ORGANIZATION: +// The handshake request is invalid because the invited account is already a member +// of an organization. // -// * -// HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of handshakes -// that you can send in one day. +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed +// the number of handshakes that you can send in one day. // -// * INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: -// You can't issue new invitations to join an organization while it's in the -// process of enabling all features. You can resume inviting accounts after you -// finalize the process when all accounts have agreed to the change. +// * +// INVITE_DISABLED_DURING_ENABLE_ALL_FEATURES: You can't issue new invitations to +// join an organization while it's in the process of enabling all features. You can +// resume inviting accounts after you finalize the process when all accounts have +// agreed to the change. // -// * -// ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake request is invalid because -// the organization has already enabled all features. +// * ORGANIZATION_ALREADY_HAS_ALL_FEATURES: The handshake +// request is invalid because the organization has already enabled all features. // -// * +// * // ORGANIZATION_FROM_DIFFERENT_SELLER_OF_RECORD: The request failed because the // account is from a different marketplace than the accounts in the organization. // For example, accounts with India addresses must be associated with the AISPL // marketplace. All accounts in an organization must be from the same // marketplace. // -// * ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You +// * ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED: You // attempted to change the membership of an account too quickly after its previous // change. // -// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation -// with an account that doesn't have a payment instrument, such as a credit card, +// * PAYMENT_INSTRUMENT_REQUIRED: You can't complete the operation with an +// account that doesn't have a payment instrument, such as a credit card, // associated with it. type HandshakeConstraintViolationException struct { Message *string @@ -695,81 +691,80 @@ func (e *InvalidHandshakeTransitionException) ErrorFault() smithy.ErrorFault { // additional information about the violated limit: Some of the reasons in the // following list might not be applicable to this specific API or operation. // -// * +// * // DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to the same // entity. // -// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS -// and can't be modified. +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. // -// * INPUT_REQUIRED: You must include a value for all -// required parameters. +// * INPUT_REQUIRED: You must include a value for all required +// parameters. // -// * INVALID_ENUM: You specified an invalid value. +// * INVALID_ENUM: You specified an invalid value. // -// * +// * // INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // -// * +// * // INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. // -// * INVALID_LIST_MEMBER: You provided a list to a parameter that +// * INVALID_LIST_MEMBER: You provided a list to a parameter that // contains at least one invalid value. // -// * INVALID_PAGINATION_TOKEN: Get the -// value for the NextToken parameter from the response to a previous call of the +// * INVALID_PAGINATION_TOKEN: Get the value +// for the NextToken parameter from the response to a previous call of the // operation. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of -// entity (account, organization, or email) as a party. +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity +// (account, organization, or email) as a party. // -// * INVALID_PATTERN: You -// provided a value that doesn't match the required pattern. +// * INVALID_PATTERN: You provided a +// value that doesn't match the required pattern. // -// * -// INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't match -// the required pattern. +// * INVALID_PATTERN_TARGET_ID: You +// specified a policy target ID that doesn't match the required pattern. // -// * INVALID_ROLE_NAME: You provided a role name that -// isn't valid. A role name can't begin with the reserved prefix -// AWSServiceRoleFor. +// * +// INVALID_ROLE_NAME: You provided a role name that isn't valid. A role name can't +// begin with the reserved prefix AWSServiceRoleFor. // -// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an -// invalid Amazon Resource Name (ARN) for the organization. +// * +// INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource Name +// (ARN) for the organization. // -// * -// INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// * INVALID_SYNTAX_POLICY_ID: You specified an +// invalid policy ID. // -// * -// INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system tag. You -// can’t add, edit, or delete system tag keys because they're reserved for AWS use. -// System tags don’t count against your tags per resource limit. +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key +// that is a system tag. You can’t add, edit, or delete system tag keys because +// they're reserved for AWS use. System tags don’t count against your tags per +// resource limit. // -// * -// MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter for the -// operation. +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter +// parameter for the operation. // -// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is -// longer than allowed. +// * MAX_LENGTH_EXCEEDED: You provided a string +// parameter that is longer than allowed. // -// * MAX_VALUE_EXCEEDED: You provided a numeric parameter -// that has a larger value than allowed. +// * MAX_VALUE_EXCEEDED: You provided a +// numeric parameter that has a larger value than allowed. // -// * MIN_LENGTH_EXCEEDED: You provided a -// string parameter that is shorter than allowed. +// * MIN_LENGTH_EXCEEDED: +// You provided a string parameter that is shorter than allowed. // -// * MIN_VALUE_EXCEEDED: You -// provided a numeric parameter that has a smaller value than allowed. +// * +// MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller value +// than allowed. // -// * -// MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only between -// entities in the same root. +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account +// only between entities in the same root. // -// * TARGET_NOT_SUPPORTED: You can't perform the -// specified operation on that target entity. +// * TARGET_NOT_SUPPORTED: You can't +// perform the specified operation on that target entity. // -// * +// * // UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that isn't // recognized. type InvalidInputException struct { diff --git a/service/organizations/types/types.go b/service/organizations/types/types.go index a75a398cde0..9d5fac2c947 100644 --- a/service/organizations/types/types.go +++ b/service/organizations/types/types.go @@ -47,9 +47,9 @@ type Child struct { // (http://wikipedia.org/wiki/regex) for a child ID string requires one of the // following: // - // * Account - A string that consists of exactly 12 digits. + // * Account - A string that consists of exactly 12 digits. // - // * + // * // Organizational unit (OU) - A string that begins with "ou-" followed by from 4 to // 32 lowercase letters or digits (the ID of the root that contains the OU). This // string is followed by a second "-" dash and from 8 to 32 additional lowercase @@ -77,39 +77,38 @@ type CreateAccountStatus struct { // If the request failed, a description of the reason for the failure. // - // * + // * // ACCOUNT_LIMIT_EXCEEDED: The account could not be created because you have // reached the limit on the number of accounts in your organization. // - // * + // * // CONCURRENT_ACCOUNT_MODIFICATION: You already submitted a request with the same // information. // - // * EMAIL_ALREADY_EXISTS: The account could not be created - // because another AWS account with that email address already exists. + // * EMAIL_ALREADY_EXISTS: The account could not be created because + // another AWS account with that email address already exists. // - // * + // * // GOVCLOUD_ACCOUNT_ALREADY_EXISTS: The account in the AWS GovCloud (US) Region // could not be created because this Region already includes an account with that // email address. // - // * INVALID_ADDRESS: The account could not be created because - // the address you provided is not valid. + // * INVALID_ADDRESS: The account could not be created because the + // address you provided is not valid. // - // * INVALID_EMAIL: The account could - // not be created because the email address you provided is not valid. + // * INVALID_EMAIL: The account could not be + // created because the email address you provided is not valid. // - // * + // * // INTERNAL_FAILURE: The account could not be created because of an internal // failure. Try again later. If the problem persists, contact Customer Support. // + // * + // MISSING_BUSINESS_VALIDATION: The AWS account that owns your organization has not + // received Business Validation. // - // * MISSING_BUSINESS_VALIDATION: The AWS account that owns your organization has - // not received Business Validation. - // - // * MISSING_PAYMENT_INSTRUMENT: You must - // configure the management account with a valid payment method, such as a credit - // card. + // * MISSING_PAYMENT_INSTRUMENT: You must configure + // the management account with a valid payment method, such as a credit card. FailureReason CreateAccountFailureReason // If the account was created successfully, the unique identifier (ID) of the new @@ -215,18 +214,18 @@ type Handshake struct { // The type of handshake, indicating what action occurs when the recipient accepts // the handshake. The following handshake types are supported: // - // * INVITE: This - // type of handshake represents a request to join an organization. It is always - // sent from the management account to only non-member accounts. + // * INVITE: This type + // of handshake represents a request to join an organization. It is always sent + // from the management account to only non-member accounts. // - // * - // ENABLE_ALL_FEATURES: This type of handshake represents a request to enable all - // features in an organization. It is always sent from the management account to - // only invited member accounts. Created accounts do not receive this because those - // accounts were created by the organization's management account and approval is + // * ENABLE_ALL_FEATURES: + // This type of handshake represents a request to enable all features in an + // organization. It is always sent from the management account to only invited + // member accounts. Created accounts do not receive this because those accounts + // were created by the organization's management account and approval is // inferred. // - // * APPROVE_ALL_FEATURES: This type of handshake is sent from the + // * APPROVE_ALL_FEATURES: This type of handshake is sent from the // Organizations service when all member accounts have approved the // ENABLE_ALL_FEATURES invitation. It is sent only to the management account and // signals the master that it can finalize the process to enable all features. @@ -262,28 +261,28 @@ type Handshake struct { // handshake through the process from its creation to its acceptance. The meaning // of each of the valid values is as follows: // - // * REQUESTED: This handshake was - // sent to multiple recipients (applicable to only some handshake types) and not - // all recipients have responded yet. The request stays in this state until all + // * REQUESTED: This handshake was sent + // to multiple recipients (applicable to only some handshake types) and not all + // recipients have responded yet. The request stays in this state until all // recipients respond. // - // * OPEN: This handshake was sent to multiple recipients + // * OPEN: This handshake was sent to multiple recipients // (applicable to only some policy types) and all recipients have responded, // allowing the originator to complete the handshake action. // - // * CANCELED: This + // * CANCELED: This // handshake is no longer active because it was canceled by the originating // account. // - // * ACCEPTED: This handshake is complete because it has been - // accepted by the recipient. + // * ACCEPTED: This handshake is complete because it has been accepted by + // the recipient. // - // * DECLINED: This handshake is no longer active - // because it was declined by the recipient account. + // * DECLINED: This handshake is no longer active because it was + // declined by the recipient account. // - // * EXPIRED: This handshake - // is no longer active because the originator did not receive a response of any - // kind from the recipient before the expiration time (15 days). + // * EXPIRED: This handshake is no longer + // active because the originator did not receive a response of any kind from the + // recipient before the expiration time (15 days). State HandshakeState } @@ -326,24 +325,24 @@ type HandshakeResource struct { // The type of information being passed, specifying how the value is to be // interpreted by the other party: // - // * ACCOUNT - Specifies an AWS account ID + // * ACCOUNT - Specifies an AWS account ID // number. // - // * ORGANIZATION - Specifies an organization ID number. + // * ORGANIZATION - Specifies an organization ID number. // - // * EMAIL - // - Specifies the email address that is associated with the account that receives + // * EMAIL - + // Specifies the email address that is associated with the account that receives // the handshake. // - // * OWNER_EMAIL - Specifies the email address associated with - // the management account. Included as information about an organization. + // * OWNER_EMAIL - Specifies the email address associated with the + // management account. Included as information about an organization. // - // * - // OWNER_NAME - Specifies the name associated with the management account. Included - // as information about an organization. + // * OWNER_NAME + // - Specifies the name associated with the management account. Included as + // information about an organization. // - // * NOTES - Additional text provided by - // the handshake initiator and intended for the recipient to read. + // * NOTES - Additional text provided by the + // handshake initiator and intended for the recipient to read. Type HandshakeResourceType // The information that is passed to the other party in the handshake. The format @@ -432,13 +431,13 @@ type Parent struct { // (http://wikipedia.org/wiki/regex) for a parent ID string requires one of the // following: // - // * Root - A string that begins with "r-" followed by from 4 to 32 + // * Root - A string that begins with "r-" followed by from 4 to 32 // lowercase letters or digits. // - // * Organizational unit (OU) - A string that - // begins with "ou-" followed by from 4 to 32 lowercase letters or digits (the ID - // of the root that the OU is in). This string is followed by a second "-" dash and - // from 8 to 32 additional lowercase letters or digits. + // * Organizational unit (OU) - A string that begins + // with "ou-" followed by from 4 to 32 lowercase letters or digits (the ID of the + // root that the OU is in). This string is followed by a second "-" dash and from 8 + // to 32 additional lowercase letters or digits. Id *string // The type of the parent entity. @@ -508,16 +507,16 @@ type PolicyTargetSummary struct { // (http://wikipedia.org/wiki/regex) for a target ID string requires one of the // following: // - // * Root - A string that begins with "r-" followed by from 4 to 32 + // * Root - A string that begins with "r-" followed by from 4 to 32 // lowercase letters or digits. // - // * Account - A string that consists of exactly - // 12 digits. + // * Account - A string that consists of exactly 12 + // digits. // - // * Organizational unit (OU) - A string that begins with "ou-" - // followed by from 4 to 32 lowercase letters or digits (the ID of the root that - // the OU is in). This string is followed by a second "-" dash and from 8 to 32 - // additional lowercase letters or digits. + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lowercase letters or digits (the ID of the root that the OU is + // in). This string is followed by a second "-" dash and from 8 to 32 additional + // lowercase letters or digits. TargetId *string // The type of the policy target. @@ -569,14 +568,14 @@ type Root struct { // A custom key-value pair associated with a resource within your organization. You // can attach tags to any of the following organization resources. // -// * AWS +// * AWS // account // -// * Organizational unit (OU) +// * Organizational unit (OU) // -// * Organization root +// * Organization root // -// * Policy +// * Policy type Tag struct { // The key identifier, or name, of the tag. diff --git a/service/personalize/api_op_CreateCampaign.go b/service/personalize/api_op_CreateCampaign.go index 5dc4618389d..a58b491d868 100644 --- a/service/personalize/api_op_CreateCampaign.go +++ b/service/personalize/api_op_CreateCampaign.go @@ -30,23 +30,23 @@ import ( // minProvisionedTPS as necessary. Status A campaign can be in one of the following // states: // -// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// -// * DELETE PENDING > DELETE IN_PROGRESS +// * +// DELETE PENDING > DELETE IN_PROGRESS // // To get the campaign status, call // DescribeCampaign. Wait until the status of the campaign is ACTIVE before asking // the campaign for recommendations. Related APIs // -// * ListCampaigns +// * ListCampaigns // -// * +// * // DescribeCampaign // -// * UpdateCampaign +// * UpdateCampaign // -// * DeleteCampaign +// * DeleteCampaign func (c *Client) CreateCampaign(ctx context.Context, params *CreateCampaignInput, optFns ...func(*Options)) (*CreateCampaignOutput, error) { if params == nil { params = &CreateCampaignInput{} diff --git a/service/personalize/api_op_CreateDataset.go b/service/personalize/api_op_CreateDataset.go index fcfe6aab14d..d073a92a619 100644 --- a/service/personalize/api_op_CreateDataset.go +++ b/service/personalize/api_op_CreateDataset.go @@ -14,34 +14,33 @@ import ( // CreateDatasetImportJob to import your training data to a dataset. There are // three types of datasets: // -// * Interactions +// * Interactions // -// * Items +// * Items // -// * Users +// * Users // -// Each -// dataset type has an associated schema with required field types. Only the -// Interactions dataset is required in order to train a model (also referred to as -// creating a solution). A dataset can be in one of the following states: +// Each dataset type +// has an associated schema with required field types. Only the Interactions +// dataset is required in order to train a model (also referred to as creating a +// solution). A dataset can be in one of the following states: // -// * -// CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// * CREATE PENDING > +// CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// * DELETE -// PENDING > DELETE IN_PROGRESS +// * DELETE PENDING > DELETE +// IN_PROGRESS // -// To get the status of the dataset, call -// DescribeDataset. Related APIs +// To get the status of the dataset, call DescribeDataset. Related +// APIs // -// * CreateDatasetGroup -// -// * ListDatasets +// * CreateDatasetGroup // +// * ListDatasets // // * DescribeDataset // -// * DeleteDataset +// * DeleteDataset func (c *Client) CreateDataset(ctx context.Context, params *CreateDatasetInput, optFns ...func(*Options)) (*CreateDatasetOutput, error) { if params == nil { params = &CreateDatasetInput{} @@ -66,12 +65,12 @@ type CreateDatasetInput struct { // The type of dataset. One of the following (case insensitive) values: // - // * + // * // Interactions // - // * Items + // * Items // - // * Users + // * Users // // This member is required. DatasetType *string diff --git a/service/personalize/api_op_CreateDatasetGroup.go b/service/personalize/api_op_CreateDatasetGroup.go index fdd27d72a1a..0cfa454ff3b 100644 --- a/service/personalize/api_op_CreateDatasetGroup.go +++ b/service/personalize/api_op_CreateDatasetGroup.go @@ -14,47 +14,46 @@ import ( // supply data for training a model. A dataset group can contain at most three // datasets, one for each type of dataset: // -// * Interactions +// * Interactions // -// * Items +// * Items // -// * -// Users +// * Users // -// To train a model (create a solution), a dataset group that contains an -// Interactions dataset is required. Call CreateDataset to add a dataset to the -// group. A dataset group can be in one of the following states: +// To +// train a model (create a solution), a dataset group that contains an Interactions +// dataset is required. Call CreateDataset to add a dataset to the group. A dataset +// group can be in one of the following states: // -// * CREATE -// PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// * CREATE PENDING > CREATE +// IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// * DELETE -// PENDING +// * DELETE PENDING // -// To get the status of the dataset group, call DescribeDatasetGroup. If -// the status shows as CREATE FAILED, the response includes a failureReason key, -// which describes why the creation failed. You must wait until the status of the -// dataset group is ACTIVE before adding a dataset to the group. You can specify an -// AWS Key Management Service (KMS) key to encrypt the datasets in the group. If -// you specify a KMS key, you must also include an AWS Identity and Access -// Management (IAM) role that has permission to access the key. APIs that require a -// dataset group ARN in the request +// To get the status of +// the dataset group, call DescribeDatasetGroup. If the status shows as CREATE +// FAILED, the response includes a failureReason key, which describes why the +// creation failed. You must wait until the status of the dataset group is ACTIVE +// before adding a dataset to the group. You can specify an AWS Key Management +// Service (KMS) key to encrypt the datasets in the group. If you specify a KMS +// key, you must also include an AWS Identity and Access Management (IAM) role that +// has permission to access the key. APIs that require a dataset group ARN in the +// request // -// * CreateDataset +// * CreateDataset // -// * -// CreateEventTracker +// * CreateEventTracker // -// * CreateSolution +// * CreateSolution // -// Related APIs +// Related +// APIs // -// * -// ListDatasetGroups +// * ListDatasetGroups // -// * DescribeDatasetGroup +// * DescribeDatasetGroup // -// * DeleteDatasetGroup +// * DeleteDatasetGroup func (c *Client) CreateDatasetGroup(ctx context.Context, params *CreateDatasetGroupInput, optFns ...func(*Options)) (*CreateDatasetGroupOutput, error) { if params == nil { params = &CreateDatasetGroupInput{} diff --git a/service/personalize/api_op_CreateDatasetImportJob.go b/service/personalize/api_op_CreateDatasetImportJob.go index 07585fe4387..562e052f8e9 100644 --- a/service/personalize/api_op_CreateDatasetImportJob.go +++ b/service/personalize/api_op_CreateDatasetImportJob.go @@ -19,7 +19,7 @@ import ( // dataset import job replaces any previous data in the dataset. Status A dataset // import job can be in one of the following states: // -// * CREATE PENDING > CREATE +// * CREATE PENDING > CREATE // IN_PROGRESS > ACTIVE -or- CREATE FAILED // // To get the status of the import job, @@ -30,10 +30,9 @@ import ( // must wait until the status shows as ACTIVE before training a model using the // dataset. Related APIs // -// * ListDatasetImportJobs +// * ListDatasetImportJobs // -// * -// DescribeDatasetImportJob +// * DescribeDatasetImportJob func (c *Client) CreateDatasetImportJob(ctx context.Context, params *CreateDatasetImportJobInput, optFns ...func(*Options)) (*CreateDatasetImportJobOutput, error) { if params == nil { params = &CreateDatasetImportJobInput{} diff --git a/service/personalize/api_op_CreateEventTracker.go b/service/personalize/api_op_CreateEventTracker.go index 4a1afc769f9..45d2f5daad7 100644 --- a/service/personalize/api_op_CreateEventTracker.go +++ b/service/personalize/api_op_CreateEventTracker.go @@ -23,22 +23,21 @@ import ( // identifies the customer and authorizes the customer to send the data. The event // tracker can be in one of the following states: // -// * CREATE PENDING > CREATE +// * CREATE PENDING > CREATE // IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// * DELETE PENDING > DELETE +// * DELETE PENDING > DELETE // IN_PROGRESS // // To get the status of the event tracker, call DescribeEventTracker. // The event tracker must be in the ACTIVE state before using the tracking ID. // Related APIs // -// * ListEventTrackers +// * ListEventTrackers // -// * DescribeEventTracker +// * DescribeEventTracker // -// * -// DeleteEventTracker +// * DeleteEventTracker func (c *Client) CreateEventTracker(ctx context.Context, params *CreateEventTrackerInput, optFns ...func(*Options)) (*CreateEventTrackerOutput, error) { if params == nil { params = &CreateEventTrackerInput{} diff --git a/service/personalize/api_op_CreateSchema.go b/service/personalize/api_op_CreateSchema.go index a937b27d9ff..9100a3ad5fc 100644 --- a/service/personalize/api_op_CreateSchema.go +++ b/service/personalize/api_op_CreateSchema.go @@ -16,12 +16,11 @@ import ( // set of required field and keywords. You specify a schema when you call // CreateDataset. Related APIs // -// * ListSchemas +// * ListSchemas // -// * DescribeSchema +// * DescribeSchema // -// * -// DeleteSchema +// * DeleteSchema func (c *Client) CreateSchema(ctx context.Context, params *CreateSchemaInput, optFns ...func(*Options)) (*CreateSchemaOutput, error) { if params == nil { params = &CreateSchemaInput{} diff --git a/service/personalize/api_op_CreateSolution.go b/service/personalize/api_op_CreateSolution.go index 5816ba07613..69a6805d076 100644 --- a/service/personalize/api_op_CreateSolution.go +++ b/service/personalize/api_op_CreateSolution.go @@ -27,28 +27,28 @@ import ( // your data and select the optimum USER_PERSONALIZATION recipe for you. Status A // solution can be in one of the following states: // -// * CREATE PENDING > CREATE +// * CREATE PENDING > CREATE // IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// * DELETE PENDING > DELETE +// * DELETE PENDING > DELETE // IN_PROGRESS // // To get the status of the solution, call DescribeSolution. Wait // until the status shows as ACTIVE before calling CreateSolutionVersion. Related // APIs // -// * ListSolutions +// * ListSolutions // -// * CreateSolutionVersion +// * CreateSolutionVersion // -// * DescribeSolution +// * DescribeSolution // +// * +// DeleteSolution // -// * DeleteSolution +// * ListSolutionVersions // -// * ListSolutionVersions -// -// * DescribeSolutionVersion +// * DescribeSolutionVersion func (c *Client) CreateSolution(ctx context.Context, params *CreateSolutionInput, optFns ...func(*Options)) (*CreateSolutionOutput, error) { if params == nil { params = &CreateSolutionInput{} diff --git a/service/personalize/api_op_CreateSolutionVersion.go b/service/personalize/api_op_CreateSolutionVersion.go index c6ed7d0d928..9e285aedf95 100644 --- a/service/personalize/api_op_CreateSolutionVersion.go +++ b/service/personalize/api_op_CreateSolutionVersion.go @@ -17,27 +17,27 @@ import ( // call this operation. Status A solution version can be in one of the following // states: // -// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE -// FAILED +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // -// To get the status of the version, call DescribeSolutionVersion. Wait -// until the status shows as ACTIVE before calling CreateCampaign. If the status -// shows as CREATE FAILED, the response includes a failureReason key, which -// describes why the job failed. Related APIs +// To +// get the status of the version, call DescribeSolutionVersion. Wait until the +// status shows as ACTIVE before calling CreateCampaign. If the status shows as +// CREATE FAILED, the response includes a failureReason key, which describes why +// the job failed. Related APIs // -// * ListSolutionVersions +// * ListSolutionVersions // -// * +// * // DescribeSolutionVersion // -// * ListSolutions +// * ListSolutions // -// * CreateSolution +// * CreateSolution // -// * +// * // DescribeSolution // -// * DeleteSolution +// * DeleteSolution func (c *Client) CreateSolutionVersion(ctx context.Context, params *CreateSolutionVersionInput, optFns ...func(*Options)) (*CreateSolutionVersionOutput, error) { if params == nil { params = &CreateSolutionVersionInput{} diff --git a/service/personalize/api_op_DeleteDatasetGroup.go b/service/personalize/api_op_DeleteDatasetGroup.go index 545109fcab6..d11e309c05f 100644 --- a/service/personalize/api_op_DeleteDatasetGroup.go +++ b/service/personalize/api_op_DeleteDatasetGroup.go @@ -13,12 +13,12 @@ import ( // Deletes a dataset group. Before you delete a dataset group, you must delete the // following: // -// * All associated event trackers. +// * All associated event trackers. // -// * All associated -// solutions. +// * All associated solutions. // -// * All datasets in the dataset group. +// * All +// datasets in the dataset group. func (c *Client) DeleteDatasetGroup(ctx context.Context, params *DeleteDatasetGroupInput, optFns ...func(*Options)) (*DeleteDatasetGroupOutput, error) { if params == nil { params = &DeleteDatasetGroupInput{} diff --git a/service/personalize/api_op_DescribeCampaign.go b/service/personalize/api_op_DescribeCampaign.go index ba04b504656..1297dc663cd 100644 --- a/service/personalize/api_op_DescribeCampaign.go +++ b/service/personalize/api_op_DescribeCampaign.go @@ -14,14 +14,14 @@ import ( // Describes the given campaign, including its status. A campaign can be in one of // the following states: // -// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- // CREATE FAILED // -// * DELETE PENDING > DELETE IN_PROGRESS +// * DELETE PENDING > DELETE IN_PROGRESS // -// When the status is -// CREATE FAILED, the response includes the failureReason key, which describes why. -// For more information on campaigns, see CreateCampaign. +// When the status is CREATE +// FAILED, the response includes the failureReason key, which describes why. For +// more information on campaigns, see CreateCampaign. func (c *Client) DescribeCampaign(ctx context.Context, params *DescribeCampaignInput, optFns ...func(*Options)) (*DescribeCampaignOutput, error) { if params == nil { params = &DescribeCampaignInput{} diff --git a/service/personalize/api_op_DescribeDatasetImportJob.go b/service/personalize/api_op_DescribeDatasetImportJob.go index cb8967f4a39..84c2899b31b 100644 --- a/service/personalize/api_op_DescribeDatasetImportJob.go +++ b/service/personalize/api_op_DescribeDatasetImportJob.go @@ -41,14 +41,14 @@ type DescribeDatasetImportJobOutput struct { // Information about the dataset import job, including the status. The status is // one of the following values: // - // * CREATE PENDING + // * CREATE PENDING // - // * CREATE IN_PROGRESS + // * CREATE IN_PROGRESS // + // * + // ACTIVE // - // * ACTIVE - // - // * CREATE FAILED + // * CREATE FAILED DatasetImportJob *types.DatasetImportJob // Metadata pertaining to the operation's result. diff --git a/service/personalize/api_op_DescribeRecipe.go b/service/personalize/api_op_DescribeRecipe.go index 23e05e629b1..357d744621c 100644 --- a/service/personalize/api_op_DescribeRecipe.go +++ b/service/personalize/api_op_DescribeRecipe.go @@ -13,20 +13,19 @@ import ( // Describes a recipe. A recipe contains three items: // -// * An algorithm that -// trains a model. +// * An algorithm that trains a +// model. // -// * Hyperparameters that govern the training. +// * Hyperparameters that govern the training. // -// * Feature -// transformation information for modifying the input data before training. +// * Feature transformation +// information for modifying the input data before training. // -// Amazon -// Personalize provides a set of predefined recipes. You specify a recipe when you -// create a solution with the CreateSolution API. CreateSolution trains a model by -// using the algorithm in the specified recipe and a training dataset. The -// solution, when deployed as a campaign, can provide recommendations using the -// GetRecommendations +// Amazon Personalize +// provides a set of predefined recipes. You specify a recipe when you create a +// solution with the CreateSolution API. CreateSolution trains a model by using the +// algorithm in the specified recipe and a training dataset. The solution, when +// deployed as a campaign, can provide recommendations using the GetRecommendations // (https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html) // API. func (c *Client) DescribeRecipe(ctx context.Context, params *DescribeRecipeInput, optFns ...func(*Options)) (*DescribeRecipeOutput, error) { diff --git a/service/personalize/types/types.go b/service/personalize/types/types.go index 0436692cfeb..2fe0a3db9ba 100644 --- a/service/personalize/types/types.go +++ b/service/personalize/types/types.go @@ -122,13 +122,13 @@ type BatchInferenceJob struct { // The status of the batch inference job. The status is one of the following // values: // - // * PENDING + // * PENDING // - // * IN PROGRESS + // * IN PROGRESS // - // * ACTIVE + // * ACTIVE // - // * CREATE FAILED + // * CREATE FAILED Status *string } @@ -186,13 +186,13 @@ type BatchInferenceJobSummary struct { // The status of the batch inference job. The status is one of the following // values: // - // * PENDING + // * PENDING // - // * IN PROGRESS + // * IN PROGRESS // - // * ACTIVE + // * ACTIVE // - // * CREATE FAILED + // * CREATE FAILED Status *string } @@ -231,10 +231,10 @@ type Campaign struct { // The status of the campaign. A campaign can be in one of the following states: // + // * + // CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // - // * DELETE + // * DELETE // PENDING > DELETE IN_PROGRESS Status *string } @@ -268,10 +268,10 @@ type CampaignSummary struct { // The status of the campaign. A campaign can be in one of the following states: // + // * + // CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // - // * DELETE + // * DELETE // PENDING > DELETE IN_PROGRESS Status *string } @@ -302,10 +302,10 @@ type CampaignUpdateSummary struct { // The status of the campaign update. A campaign update can be in one of the // following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- - // CREATE FAILED + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE + // FAILED // - // * DELETE PENDING > DELETE IN_PROGRESS + // * DELETE PENDING > DELETE IN_PROGRESS Status *string } @@ -346,11 +346,11 @@ type Dataset struct { // One of the following values: // - // * Interactions + // * Interactions // - // * Items + // * Items // - // * Users + // * Users DatasetType *string // A time stamp that shows when the dataset was updated. @@ -364,10 +364,10 @@ type Dataset struct { // The status of the dataset. A dataset can be in one of the following states: // + // * + // CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // - // * DELETE + // * DELETE // PENDING > DELETE IN_PROGRESS Status *string } @@ -404,10 +404,10 @@ type DatasetGroup struct { // The current status of the dataset group. A dataset group can be in one of the // following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- - // CREATE FAILED + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE + // FAILED // - // * DELETE PENDING + // * DELETE PENDING Status *string } @@ -433,10 +433,10 @@ type DatasetGroupSummary struct { // The status of the dataset group. A dataset group can be in one of the following // states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * DELETE PENDING + // * + // DELETE PENDING Status *string } @@ -445,7 +445,7 @@ type DatasetGroupSummary struct { // CreateDatasetImportJob. A dataset import job can be in one of the following // states: // -// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED +// * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED type DatasetImportJob struct { // The creation date and time (in Unix time) of the dataset import job. @@ -476,8 +476,8 @@ type DatasetImportJob struct { // The status of the dataset import job. A dataset import job can be in one of the // following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- - // CREATE FAILED + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE + // FAILED Status *string } @@ -503,8 +503,8 @@ type DatasetImportJobSummary struct { // The status of the dataset import job. A dataset import job can be in one of the // following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- - // CREATE FAILED + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE + // FAILED Status *string } @@ -557,14 +557,14 @@ type DatasetSummary struct { // The dataset type. One of the following values: // - // * Interactions + // * Interactions // - // * - // Items + // * Items // - // * Users + // * + // Users // - // * Event-Interactions + // * Event-Interactions DatasetType *string // The date and time (in Unix time) that the dataset was last updated. @@ -575,10 +575,10 @@ type DatasetSummary struct { // The status of the dataset. A dataset can be in one of the following states: // + // * + // CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // - // * DELETE + // * DELETE // PENDING > DELETE IN_PROGRESS Status *string } @@ -681,10 +681,10 @@ type EventTracker struct { // The status of the event tracker. An event tracker can be in one of the following // states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * DELETE PENDING > DELETE IN_PROGRESS + // * + // DELETE PENDING > DELETE IN_PROGRESS Status *string // The ID of the event tracker. Include this ID in requests to the PutEvents @@ -711,10 +711,10 @@ type EventTrackerSummary struct { // The status of the event tracker. An event tracker can be in one of the following // states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * DELETE PENDING > DELETE IN_PROGRESS + // * + // DELETE PENDING > DELETE IN_PROGRESS Status *string } @@ -741,8 +741,8 @@ type FeatureTransformation struct { // The status of the feature transformation. A feature transformation can be in one // of the following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE - // -or- CREATE FAILED + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- + // CREATE FAILED Status *string } @@ -899,12 +899,12 @@ type Recipe struct { // One of the following values: // - // * PERSONALIZED_RANKING - // - // * RELATED_ITEMS + // * PERSONALIZED_RANKING // + // * RELATED_ITEMS // - // * USER_PERSONALIZATION + // * + // USER_PERSONALIZATION RecipeType *string // The status of the recipe. @@ -993,10 +993,10 @@ type Solution struct { // The status of the solution. A solution can be in one of the following states: // + // * + // CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // - // * DELETE + // * DELETE // PENDING > DELETE IN_PROGRESS Status *string } @@ -1040,10 +1040,10 @@ type SolutionSummary struct { // The status of the solution. A solution can be in one of the following states: // + // * + // CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED - // - // * DELETE + // * DELETE // PENDING > DELETE IN_PROGRESS Status *string } @@ -1091,14 +1091,14 @@ type SolutionVersion struct { // The status of the solution version. A solution version can be in one of the // following states: // - // * CREATE PENDING + // * CREATE PENDING // - // * CREATE IN_PROGRESS + // * CREATE IN_PROGRESS // - // * - // ACTIVE + // * ACTIVE // - // * CREATE FAILED + // * CREATE + // FAILED Status *string // The time used to train the model. You are billed for the time it takes to train @@ -1140,8 +1140,8 @@ type SolutionVersionSummary struct { // The status of the solution version. A solution version can be in one of the // following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- - // CREATE FAILED + // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE + // FAILED Status *string } diff --git a/service/personalizeruntime/api_op_GetRecommendations.go b/service/personalizeruntime/api_op_GetRecommendations.go index ead86b01456..70cc3c3f9e8 100644 --- a/service/personalizeruntime/api_op_GetRecommendations.go +++ b/service/personalizeruntime/api_op_GetRecommendations.go @@ -14,10 +14,10 @@ import ( // Returns a list of recommended items. The required input depends on the recipe // type used to create the solution backing the campaign, as follows: // -// * +// * // RELATED_ITEMS - itemId required, userId not used // -// * USER_PERSONALIZATION - +// * USER_PERSONALIZATION - // itemId optional, userId required // // Campaigns that are backed by a solution diff --git a/service/pi/api_op_DescribeDimensionKeys.go b/service/pi/api_op_DescribeDimensionKeys.go index bac4d509607..8cdf180048e 100644 --- a/service/pi/api_op_DescribeDimensionKeys.go +++ b/service/pi/api_op_DescribeDimensionKeys.go @@ -58,10 +58,10 @@ type DescribeDimensionKeysInput struct { // The name of a Performance Insights metric to be measured. Valid values for // Metric are: // - // * db.load.avg - a scaled representation of the number of active + // * db.load.avg - a scaled representation of the number of active // sessions for the database engine. // - // * db.sampledload.avg - the raw number of + // * db.sampledload.avg - the raw number of // active sessions for the database engine. // // This member is required. @@ -83,12 +83,11 @@ type DescribeDimensionKeysInput struct { // One or more filters to apply in the request. Restrictions: // - // * Any number of + // * Any number of // filters by the same dimension, as specified in the GroupBy or Partition // parameters. // - // * A single filter for any other dimension in this dimension - // group. + // * A single filter for any other dimension in this dimension group. Filter map[string]*string // The maximum number of items to return in the response. If more items exist than @@ -109,20 +108,20 @@ type DescribeDimensionKeysInput struct { // Insights. A period can be as short as one second, or as long as one day (86400 // seconds). Valid values are: // - // * 1 (one second) + // * 1 (one second) // - // * 60 (one minute) + // * 60 (one minute) // - // * - // 300 (five minutes) + // * 300 (five + // minutes) // - // * 3600 (one hour) + // * 3600 (one hour) // - // * 86400 (twenty-four hours) + // * 86400 (twenty-four hours) // - // If - // you don't specify PeriodInSeconds, then Performance Insights will choose a value - // for you, with a goal of returning roughly 100-200 data points in the response. + // If you don't specify + // PeriodInSeconds, then Performance Insights will choose a value for you, with a + // goal of returning roughly 100-200 data points in the response. PeriodInSeconds *int32 } diff --git a/service/pi/api_op_GetResourceMetrics.go b/service/pi/api_op_GetResourceMetrics.go index d56bdf3d351..69847c998dd 100644 --- a/service/pi/api_op_GetResourceMetrics.go +++ b/service/pi/api_op_GetResourceMetrics.go @@ -83,20 +83,20 @@ type GetResourceMetricsInput struct { // Insights. A period can be as short as one second, or as long as one day (86400 // seconds). Valid values are: // - // * 1 (one second) + // * 1 (one second) // - // * 60 (one minute) + // * 60 (one minute) // - // * - // 300 (five minutes) + // * 300 (five + // minutes) // - // * 3600 (one hour) + // * 3600 (one hour) // - // * 86400 (twenty-four hours) + // * 86400 (twenty-four hours) // - // If - // you don't specify PeriodInSeconds, then Performance Insights will choose a value - // for you, with a goal of returning roughly 100-200 data points in the response. + // If you don't specify + // PeriodInSeconds, then Performance Insights will choose a value for you, with a + // goal of returning roughly 100-200 data points in the response. PeriodInSeconds *int32 } diff --git a/service/pi/types/types.go b/service/pi/types/types.go index 5415edd4916..cb260a2916d 100644 --- a/service/pi/types/types.go +++ b/service/pi/types/types.go @@ -28,19 +28,18 @@ type DimensionGroup struct { // The name of the dimension group. Valid values are: // - // * db.user + // * db.user // - // * - // db.host + // * db.host // - // * db.sql + // * + // db.sql // - // * db.sql_tokenized + // * db.sql_tokenized // - // * db.wait_event + // * db.wait_event // - // * - // db.wait_event_type + // * db.wait_event_type // // This member is required. Group *string @@ -50,37 +49,37 @@ type DimensionGroup struct { // requested, or are present in the response. Valid values for elements in the // Dimensions array are: // - // * db.user.id + // * db.user.id // - // * db.user.name + // * db.user.name // - // * db.host.id + // * db.host.id // + // * + // db.host.name // - // * db.host.name + // * db.sql.id // - // * db.sql.id + // * db.sql.db_id // - // * db.sql.db_id + // * db.sql.statement // - // * db.sql.statement + // * + // db.sql.tokenized_id // + // * db.sql_tokenized.id // - // * db.sql.tokenized_id + // * db.sql_tokenized.db_id // - // * db.sql_tokenized.id + // * + // db.sql_tokenized.statement // - // * db.sql_tokenized.db_id + // * db.wait_event.name // + // * db.wait_event.type // - // * db.sql_tokenized.statement - // - // * db.wait_event.name - // - // * - // db.wait_event.type - // - // * db.wait_event_type.name + // * + // db.wait_event_type.name Dimensions []*string // The maximum number of items to fetch for this dimension group. @@ -123,10 +122,10 @@ type MetricQuery struct { // The name of a Performance Insights metric to be measured. Valid values for // Metric are: // - // * db.load.avg - a scaled representation of the number of active + // * db.load.avg - a scaled representation of the number of active // sessions for the database engine. // - // * db.sampledload.avg - the raw number of + // * db.sampledload.avg - the raw number of // active sessions for the database engine. // // This member is required. @@ -134,10 +133,10 @@ type MetricQuery struct { // One or more filters to apply in the request. Restrictions: // - // * Any number of + // * Any number of // filters by the same dimension, as specified in the GroupBy parameter. // - // * A + // * A // single filter for any other dimension in this dimension group. Filter map[string]*string @@ -166,10 +165,10 @@ type ResponseResourceMetricKey struct { // The name of a Performance Insights metric to be measured. Valid values for // Metric are: // - // * db.load.avg - a scaled representation of the number of active + // * db.load.avg - a scaled representation of the number of active // sessions for the database engine. // - // * db.sampledload.avg - the raw number of + // * db.sampledload.avg - the raw number of // active sessions for the database engine. // // This member is required. diff --git a/service/pinpoint/api_op_DeleteEmailTemplate.go b/service/pinpoint/api_op_DeleteEmailTemplate.go index 16ae374f8e2..626420dc524 100644 --- a/service/pinpoint/api_op_DeleteEmailTemplate.go +++ b/service/pinpoint/api_op_DeleteEmailTemplate.go @@ -47,16 +47,16 @@ type DeleteEmailTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_DeletePushTemplate.go b/service/pinpoint/api_op_DeletePushTemplate.go index 8ad6bd77471..da66234680e 100644 --- a/service/pinpoint/api_op_DeletePushTemplate.go +++ b/service/pinpoint/api_op_DeletePushTemplate.go @@ -47,16 +47,16 @@ type DeletePushTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_DeleteSmsTemplate.go b/service/pinpoint/api_op_DeleteSmsTemplate.go index 407c769bba6..914a184eca9 100644 --- a/service/pinpoint/api_op_DeleteSmsTemplate.go +++ b/service/pinpoint/api_op_DeleteSmsTemplate.go @@ -46,16 +46,16 @@ type DeleteSmsTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_DeleteVoiceTemplate.go b/service/pinpoint/api_op_DeleteVoiceTemplate.go index fa0ce58dafb..4ca8b28f932 100644 --- a/service/pinpoint/api_op_DeleteVoiceTemplate.go +++ b/service/pinpoint/api_op_DeleteVoiceTemplate.go @@ -47,16 +47,16 @@ type DeleteVoiceTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_GetEmailTemplate.go b/service/pinpoint/api_op_GetEmailTemplate.go index d874d2b12fc..0ae73326779 100644 --- a/service/pinpoint/api_op_GetEmailTemplate.go +++ b/service/pinpoint/api_op_GetEmailTemplate.go @@ -47,16 +47,16 @@ type GetEmailTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_GetPushTemplate.go b/service/pinpoint/api_op_GetPushTemplate.go index cbb247c4bc1..30b30256a8b 100644 --- a/service/pinpoint/api_op_GetPushTemplate.go +++ b/service/pinpoint/api_op_GetPushTemplate.go @@ -47,16 +47,16 @@ type GetPushTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_GetSmsTemplate.go b/service/pinpoint/api_op_GetSmsTemplate.go index 3d5e6b0d80a..59de1856b33 100644 --- a/service/pinpoint/api_op_GetSmsTemplate.go +++ b/service/pinpoint/api_op_GetSmsTemplate.go @@ -47,16 +47,16 @@ type GetSmsTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_GetVoiceTemplate.go b/service/pinpoint/api_op_GetVoiceTemplate.go index 1830ae04253..2be6a6ea50e 100644 --- a/service/pinpoint/api_op_GetVoiceTemplate.go +++ b/service/pinpoint/api_op_GetVoiceTemplate.go @@ -47,16 +47,16 @@ type GetVoiceTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_RemoveAttributes.go b/service/pinpoint/api_op_RemoveAttributes.go index d1ca93b860b..b21afcc06e4 100644 --- a/service/pinpoint/api_op_RemoveAttributes.go +++ b/service/pinpoint/api_op_RemoveAttributes.go @@ -38,16 +38,16 @@ type RemoveAttributesInput struct { // The type of attribute or attributes to remove. Valid values are: // - // * + // * // endpoint-custom-attributes - Custom attributes that describe endpoints, such as // the date when an associated user opted in or out of receiving communications // from you through a specific type of channel. // - // * endpoint-metric-attributes - + // * endpoint-metric-attributes - // Custom metrics that your app reports to Amazon Pinpoint for endpoints, such as // the number of app sessions or the number of items left in a cart. // - // * + // * // endpoint-user-attributes - Custom attributes that describe users, such as first // name, last name, and age. // diff --git a/service/pinpoint/api_op_UpdateEmailTemplate.go b/service/pinpoint/api_op_UpdateEmailTemplate.go index 2786497bf2d..859f7655905 100644 --- a/service/pinpoint/api_op_UpdateEmailTemplate.go +++ b/service/pinpoint/api_op_UpdateEmailTemplate.go @@ -62,16 +62,16 @@ type UpdateEmailTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_UpdatePushTemplate.go b/service/pinpoint/api_op_UpdatePushTemplate.go index cbff84da82a..179876bc036 100644 --- a/service/pinpoint/api_op_UpdatePushTemplate.go +++ b/service/pinpoint/api_op_UpdatePushTemplate.go @@ -62,16 +62,16 @@ type UpdatePushTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_UpdateSmsTemplate.go b/service/pinpoint/api_op_UpdateSmsTemplate.go index 39256cd6e90..22d839ad7f9 100644 --- a/service/pinpoint/api_op_UpdateSmsTemplate.go +++ b/service/pinpoint/api_op_UpdateSmsTemplate.go @@ -62,16 +62,16 @@ type UpdateSmsTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/api_op_UpdateVoiceTemplate.go b/service/pinpoint/api_op_UpdateVoiceTemplate.go index 0c72278de76..d41368bd0bd 100644 --- a/service/pinpoint/api_op_UpdateVoiceTemplate.go +++ b/service/pinpoint/api_op_UpdateVoiceTemplate.go @@ -62,16 +62,16 @@ type UpdateVoiceTemplateInput struct { // helps ensure that race conditions don't occur. If you don't specify a value for // this parameter, Amazon Pinpoint does the following: // - // * For a get operation, + // * For a get operation, // retrieves information about the active version of the template. // - // * For an - // update operation, saves the updates to (overwrites) the latest existing version - // of the template, if the create-new-version parameter isn't used or is set to - // false. + // * For an update + // operation, saves the updates to (overwrites) the latest existing version of the + // template, if the create-new-version parameter isn't used or is set to false. // - // * For a delete operation, deletes the template, including all - // versions of the template. + // * + // For a delete operation, deletes the template, including all versions of the + // template. Version *string } diff --git a/service/pinpoint/types/enums.go b/service/pinpoint/types/enums.go index 4adc2b6d5db..481907dd040 100644 --- a/service/pinpoint/types/enums.go +++ b/service/pinpoint/types/enums.go @@ -6,18 +6,18 @@ type EndpointTypesElement string // Enum values for EndpointTypesElement const ( - EndpointTypesElementPush EndpointTypesElement = "PUSH" - EndpointTypesElementGcm EndpointTypesElement = "GCM" - EndpointTypesElementApns EndpointTypesElement = "APNS" - EndpointTypesElementApns_sandbox EndpointTypesElement = "APNS_SANDBOX" - EndpointTypesElementApns_voip EndpointTypesElement = "APNS_VOIP" - EndpointTypesElementApns_voip_sandbox EndpointTypesElement = "APNS_VOIP_SANDBOX" - EndpointTypesElementAdm EndpointTypesElement = "ADM" - EndpointTypesElementSms EndpointTypesElement = "SMS" - EndpointTypesElementVoice EndpointTypesElement = "VOICE" - EndpointTypesElementEmail EndpointTypesElement = "EMAIL" - EndpointTypesElementBaidu EndpointTypesElement = "BAIDU" - EndpointTypesElementCustom EndpointTypesElement = "CUSTOM" + EndpointTypesElementPush EndpointTypesElement = "PUSH" + EndpointTypesElementGcm EndpointTypesElement = "GCM" + EndpointTypesElementApns EndpointTypesElement = "APNS" + EndpointTypesElementApnsSandbox EndpointTypesElement = "APNS_SANDBOX" + EndpointTypesElementApnsVoip EndpointTypesElement = "APNS_VOIP" + EndpointTypesElementApnsVoipSandbox EndpointTypesElement = "APNS_VOIP_SANDBOX" + EndpointTypesElementAdm EndpointTypesElement = "ADM" + EndpointTypesElementSms EndpointTypesElement = "SMS" + EndpointTypesElementVoice EndpointTypesElement = "VOICE" + EndpointTypesElementEmail EndpointTypesElement = "EMAIL" + EndpointTypesElementBaidu EndpointTypesElement = "BAIDU" + EndpointTypesElementCustom EndpointTypesElement = "CUSTOM" ) // Values returns all known values for EndpointTypesElement. Note that this can be @@ -44,9 +44,9 @@ type Action string // Enum values for Action const ( - ActionOpen_app Action = "OPEN_APP" - ActionDeep_link Action = "DEEP_LINK" - ActionUrl Action = "URL" + ActionOpenApp Action = "OPEN_APP" + ActionDeepLink Action = "DEEP_LINK" + ActionUrl Action = "URL" ) // Values returns all known values for Action. Note that this can be expanded in @@ -82,13 +82,13 @@ type CampaignStatus string // Enum values for CampaignStatus const ( - CampaignStatusScheduled CampaignStatus = "SCHEDULED" - CampaignStatusExecuting CampaignStatus = "EXECUTING" - CampaignStatusPending_next_run CampaignStatus = "PENDING_NEXT_RUN" - CampaignStatusCompleted CampaignStatus = "COMPLETED" - CampaignStatusPaused CampaignStatus = "PAUSED" - CampaignStatusDeleted CampaignStatus = "DELETED" - CampaignStatusInvalid CampaignStatus = "INVALID" + CampaignStatusScheduled CampaignStatus = "SCHEDULED" + CampaignStatusExecuting CampaignStatus = "EXECUTING" + CampaignStatusPendingNextRun CampaignStatus = "PENDING_NEXT_RUN" + CampaignStatusCompleted CampaignStatus = "COMPLETED" + CampaignStatusPaused CampaignStatus = "PAUSED" + CampaignStatusDeleted CampaignStatus = "DELETED" + CampaignStatusInvalid CampaignStatus = "INVALID" ) // Values returns all known values for CampaignStatus. Note that this can be @@ -110,18 +110,18 @@ type ChannelType string // Enum values for ChannelType const ( - ChannelTypePush ChannelType = "PUSH" - ChannelTypeGcm ChannelType = "GCM" - ChannelTypeApns ChannelType = "APNS" - ChannelTypeApns_sandbox ChannelType = "APNS_SANDBOX" - ChannelTypeApns_voip ChannelType = "APNS_VOIP" - ChannelTypeApns_voip_sandbox ChannelType = "APNS_VOIP_SANDBOX" - ChannelTypeAdm ChannelType = "ADM" - ChannelTypeSms ChannelType = "SMS" - ChannelTypeVoice ChannelType = "VOICE" - ChannelTypeEmail ChannelType = "EMAIL" - ChannelTypeBaidu ChannelType = "BAIDU" - ChannelTypeCustom ChannelType = "CUSTOM" + ChannelTypePush ChannelType = "PUSH" + ChannelTypeGcm ChannelType = "GCM" + ChannelTypeApns ChannelType = "APNS" + ChannelTypeApnsSandbox ChannelType = "APNS_SANDBOX" + ChannelTypeApnsVoip ChannelType = "APNS_VOIP" + ChannelTypeApnsVoipSandbox ChannelType = "APNS_VOIP_SANDBOX" + ChannelTypeAdm ChannelType = "ADM" + ChannelTypeSms ChannelType = "SMS" + ChannelTypeVoice ChannelType = "VOICE" + ChannelTypeEmail ChannelType = "EMAIL" + ChannelTypeBaidu ChannelType = "BAIDU" + ChannelTypeCustom ChannelType = "CUSTOM" ) // Values returns all known values for ChannelType. Note that this can be expanded @@ -148,13 +148,13 @@ type DeliveryStatus string // Enum values for DeliveryStatus const ( - DeliveryStatusSuccessful DeliveryStatus = "SUCCESSFUL" - DeliveryStatusThrottled DeliveryStatus = "THROTTLED" - DeliveryStatusTemporary_failure DeliveryStatus = "TEMPORARY_FAILURE" - DeliveryStatusPermanent_failure DeliveryStatus = "PERMANENT_FAILURE" - DeliveryStatusUnknown_failure DeliveryStatus = "UNKNOWN_FAILURE" - DeliveryStatusOpt_out DeliveryStatus = "OPT_OUT" - DeliveryStatusDuplicate DeliveryStatus = "DUPLICATE" + DeliveryStatusSuccessful DeliveryStatus = "SUCCESSFUL" + DeliveryStatusThrottled DeliveryStatus = "THROTTLED" + DeliveryStatusTemporaryFailure DeliveryStatus = "TEMPORARY_FAILURE" + DeliveryStatusPermanentFailure DeliveryStatus = "PERMANENT_FAILURE" + DeliveryStatusUnknownFailure DeliveryStatus = "UNKNOWN_FAILURE" + DeliveryStatusOptOut DeliveryStatus = "OPT_OUT" + DeliveryStatusDuplicate DeliveryStatus = "DUPLICATE" ) // Values returns all known values for DeliveryStatus. Note that this can be @@ -194,10 +194,10 @@ type Duration string // Enum values for Duration const ( - DurationHr_24 Duration = "HR_24" - DurationDay_7 Duration = "DAY_7" - DurationDay_14 Duration = "DAY_14" - DurationDay_30 Duration = "DAY_30" + DurationHr24 Duration = "HR_24" + DurationDay7 Duration = "DAY_7" + DurationDay14 Duration = "DAY_14" + DurationDay30 Duration = "DAY_30" ) // Values returns all known values for Duration. Note that this can be expanded in @@ -298,15 +298,15 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusCreated JobStatus = "CREATED" - JobStatusPreparing_for_initialization JobStatus = "PREPARING_FOR_INITIALIZATION" - JobStatusInitializing JobStatus = "INITIALIZING" - JobStatusProcessing JobStatus = "PROCESSING" - JobStatusPending_job JobStatus = "PENDING_JOB" - JobStatusCompleting JobStatus = "COMPLETING" - JobStatusCompleted JobStatus = "COMPLETED" - JobStatusFailing JobStatus = "FAILING" - JobStatusFailed JobStatus = "FAILED" + JobStatusCreated JobStatus = "CREATED" + JobStatusPreparingForInitialization JobStatus = "PREPARING_FOR_INITIALIZATION" + JobStatusInitializing JobStatus = "INITIALIZING" + JobStatusProcessing JobStatus = "PROCESSING" + JobStatusPendingJob JobStatus = "PENDING_JOB" + JobStatusCompleting JobStatus = "COMPLETING" + JobStatusCompleted JobStatus = "COMPLETED" + JobStatusFailing JobStatus = "FAILING" + JobStatusFailed JobStatus = "FAILED" ) // Values returns all known values for JobStatus. Note that this can be expanded in diff --git a/service/pinpoint/types/types.go b/service/pinpoint/types/types.go index cd2ecd7632c..a111eefb3cd 100644 --- a/service/pinpoint/types/types.go +++ b/service/pinpoint/types/types.go @@ -217,14 +217,14 @@ type ADMMessage struct { // The action to occur if the recipient taps the push notification. Valid values // are: // - // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // * OPEN_APP - Your app opens or it becomes the foreground app if it was // sent to the background. This is the default action. // - // * DEEP_LINK - Your app + // * DEEP_LINK - Your app // opens and displays a designated user interface in the app. This action uses the // deep-linking features of the Android platform. // - // * URL - The default mobile + // * URL - The default mobile // browser on the recipient's device opens and loads the web page at a URL that you // specify. Action Action @@ -301,16 +301,16 @@ type AndroidPushNotificationTemplate struct { // The action to occur if a recipient taps a push notification that's based on the // message template. Valid values are: // - // * OPEN_APP - Your app opens or it - // becomes the foreground app if it was sent to the background. This is the default + // * OPEN_APP - Your app opens or it becomes + // the foreground app if it was sent to the background. This is the default // action. // - // * DEEP_LINK - Your app opens and displays a designated user - // interface in the app. This action uses the deep-linking features of the Android + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This action uses the deep-linking features of the Android // platform. // - // * URL - The default mobile browser on the recipient's device - // opens and loads the web page at a URL that you specify. + // * URL - The default mobile browser on the recipient's device opens + // and loads the web page at a URL that you specify. Action Action // The message body to use in a push notification that's based on the message @@ -439,33 +439,33 @@ type APNSMessage struct { // The type of push notification to send. Valid values are: // - // * alert - For a + // * alert - For a // standard notification that's displayed on recipients' devices and prompts a // recipient to interact with the notification. // - // * background - For a silent + // * background - For a silent // notification that delivers content in the background and isn't displayed on // recipients' devices. // - // * complication - For a notification that contains - // update information for an app’s complication timeline. + // * complication - For a notification that contains update + // information for an app’s complication timeline. // - // * fileprovider - For - // a notification that signals changes to a File Provider extension. + // * fileprovider - For a + // notification that signals changes to a File Provider extension. // - // * mdm - - // For a notification that tells managed devices to contact the MDM server. + // * mdm - For a + // notification that tells managed devices to contact the MDM server. // - // * - // voip - For a notification that provides information about an incoming VoIP - // call. + // * voip - For + // a notification that provides information about an incoming VoIP call. // - // Amazon Pinpoint specifies this value in the apns-push-type request header - // when it sends the notification message to APNs. If you don't specify a value for - // this property, Amazon Pinpoint sets the value to alert or background - // automatically, based on the value that you specify for the SilentPush or - // RawContent property of the message. For more information about the - // apns-push-type request header, see Sending Notification Requests to APNs + // Amazon + // Pinpoint specifies this value in the apns-push-type request header when it sends + // the notification message to APNs. If you don't specify a value for this + // property, Amazon Pinpoint sets the value to alert or background automatically, + // based on the value that you specify for the SilentPush or RawContent property of + // the message. For more information about the apns-push-type request header, see + // Sending Notification Requests to APNs // (https://developer.apple.com/documentation/usernotifications/setting_up_a_remote_notification_server/sending_notification_requests_to_apns) // on the Apple Developer website. APNSPushType *string @@ -473,15 +473,15 @@ type APNSMessage struct { // The action to occur if the recipient taps the push notification. Valid values // are: // - // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // * OPEN_APP - Your app opens or it becomes the foreground app if it was // sent to the background. This is the default action. // - // * DEEP_LINK - Your app + // * DEEP_LINK - Your app // opens and displays a designated user interface in the app. This setting uses the // deep-linking features of the iOS platform. // - // * URL - The default mobile - // browser on the recipient's device opens and loads the web page at a URL that you + // * URL - The default mobile browser + // on the recipient's device opens and loads the web page at a URL that you // specify. Action Action @@ -607,16 +607,16 @@ type APNSPushNotificationTemplate struct { // The action to occur if a recipient taps a push notification that's based on the // message template. Valid values are: // - // * OPEN_APP - Your app opens or it - // becomes the foreground app if it was sent to the background. This is the default + // * OPEN_APP - Your app opens or it becomes + // the foreground app if it was sent to the background. This is the default // action. // - // * DEEP_LINK - Your app opens and displays a designated user - // interface in the app. This setting uses the deep-linking features of the iOS - // platform. + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of the iOS platform. // - // * URL - The default mobile browser on the recipient's device - // opens and loads the web page at a URL that you specify. + // * + // URL - The default mobile browser on the recipient's device opens and loads the + // web page at a URL that you specify. Action Action // The message body to use in push notifications that are based on the message @@ -998,21 +998,21 @@ type ApplicationSettingsResource struct { // specific time range when messages aren't sent to endpoints, if all the following // conditions are met: // - // * The EndpointDemographic.Timezone property of the - // endpoint is set to a valid value. + // * The EndpointDemographic.Timezone property of the endpoint + // is set to a valid value. // - // * The current time in the endpoint's time - // zone is later than or equal to the time specified by the QuietTime.Start - // property for the application (or a campaign or journey that has custom quiet - // time settings). + // * The current time in the endpoint's time zone is + // later than or equal to the time specified by the QuietTime.Start property for + // the application (or a campaign or journey that has custom quiet time + // settings). // - // * The current time in the endpoint's time zone is earlier - // than or equal to the time specified by the QuietTime.End property for the - // application (or a campaign or journey that has custom quiet time settings). + // * The current time in the endpoint's time zone is earlier than or + // equal to the time specified by the QuietTime.End property for the application + // (or a campaign or journey that has custom quiet time settings). // - // If - // any of the preceding conditions isn't met, the endpoint will receive messages - // from a campaign or journey, even if quiet time is enabled. + // If any of the + // preceding conditions isn't met, the endpoint will receive messages from a + // campaign or journey, even if quiet time is enabled. QuietTime *QuietTime } @@ -1056,14 +1056,14 @@ type AttributesResource struct { // The type of attribute or attributes that were removed from the endpoints. Valid // values are: // - // * endpoint-custom-attributes - Custom attributes that describe + // * endpoint-custom-attributes - Custom attributes that describe // endpoints. // - // * endpoint-metric-attributes - Custom metrics that your app - // reports to Amazon Pinpoint for endpoints. + // * endpoint-metric-attributes - Custom metrics that your app reports + // to Amazon Pinpoint for endpoints. // - // * endpoint-user-attributes - - // Custom attributes that describe users. + // * endpoint-user-attributes - Custom + // attributes that describe users. // // This member is required. AttributeType *string @@ -1145,14 +1145,14 @@ type BaiduMessage struct { // The action to occur if the recipient taps the push notification. Valid values // are: // - // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // * OPEN_APP - Your app opens or it becomes the foreground app if it was // sent to the background. This is the default action. // - // * DEEP_LINK - Your app + // * DEEP_LINK - Your app // opens and displays a designated user interface in the app. This action uses the // deep-linking features of the Android platform. // - // * URL - The default mobile + // * URL - The default mobile // browser on the recipient's device opens and loads the web page at a URL that you // specify. Action Action @@ -1326,12 +1326,12 @@ type CampaignHook struct { // The mode that Amazon Pinpoint uses to invoke the AWS Lambda function. Possible // values are: // - // * FILTER - Invoke the function to customize the segment that's - // used by a campaign. + // * FILTER - Invoke the function to customize the segment that's used + // by a campaign. // - // * DELIVERY - (Deprecated) Previously, invoked the - // function to send a campaign through a custom channel. This functionality is not - // supported anymore. To send a campaign through a custom channel, use the + // * DELIVERY - (Deprecated) Previously, invoked the function to + // send a campaign through a custom channel. This functionality is not supported + // anymore. To send a campaign through a custom channel, use the // CustomDeliveryConfiguration and CampaignCustomMessage objects of the campaign. Mode Mode @@ -1640,19 +1640,18 @@ type CreateRecommenderConfigurationShape struct { // template editor on the Amazon Pinpoint console. The following restrictions apply // to these names: // - // * An attribute name must start with a letter or number and - // it can contain up to 50 characters. The characters can be letters, numbers, + // * An attribute name must start with a letter or number and it + // can contain up to 50 characters. The characters can be letters, numbers, // underscores (_), or hyphens (-). Attribute names are case sensitive and must be // unique. // - // * An attribute display name must start with a letter or number and - // it can contain up to 25 characters. The characters can be letters, numbers, - // spaces, underscores (_), or hyphens (-). + // * An attribute display name must start with a letter or number and it + // can contain up to 25 characters. The characters can be letters, numbers, spaces, + // underscores (_), or hyphens (-). // - // This object is required if the - // configuration invokes an AWS Lambda function (RecommendationTransformerUri) to - // process recommendation data. Otherwise, don't include this object in your - // request. + // This object is required if the configuration + // invokes an AWS Lambda function (RecommendationTransformerUri) to process + // recommendation data. Otherwise, don't include this object in your request. Attributes map[string]*string // A custom description of the configuration for the recommender model. The @@ -1670,16 +1669,16 @@ type CreateRecommenderConfigurationShape struct { // that’s specific to a particular endpoint or user in an Amazon Pinpoint // application. Valid values are: // - // * PINPOINT_ENDPOINT_ID - Associate each user - // in the model with a particular endpoint in Amazon Pinpoint. The data is - // correlated based on endpoint IDs in Amazon Pinpoint. This is the default - // value. + // * PINPOINT_ENDPOINT_ID - Associate each user in + // the model with a particular endpoint in Amazon Pinpoint. The data is correlated + // based on endpoint IDs in Amazon Pinpoint. This is the default value. // - // * PINPOINT_USER_ID - Associate each user in the model with a - // particular user and endpoint in Amazon Pinpoint. The data is correlated based on - // user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition - // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint ID. - // Otherwise, messages won’t be sent to the user's endpoint. + // * + // PINPOINT_USER_ID - Associate each user in the model with a particular user and + // endpoint in Amazon Pinpoint. The data is correlated based on user IDs in Amazon + // Pinpoint. If you specify this value, an endpoint definition in Amazon Pinpoint + // has to specify both a user ID (UserId) and an endpoint ID. Otherwise, messages + // won’t be sent to the user's endpoint. RecommendationProviderIdType *string // The name or Amazon Resource Name (ARN) of the AWS Lambda function to invoke for @@ -1731,11 +1730,11 @@ type CustomDeliveryConfiguration struct { // The destination to send the campaign or treatment to. This value can be one of // the following: // - // * The name or Amazon Resource Name (ARN) of an AWS Lambda + // * The name or Amazon Resource Name (ARN) of an AWS Lambda // function to invoke to handle delivery of the campaign or treatment. // - // * The - // URL for a web application or service that supports HTTPS and can receive the + // * The URL + // for a web application or service that supports HTTPS and can receive the // message. The URL has to be a full URL, including the HTTPS protocol. // // This member is required. @@ -1754,11 +1753,11 @@ type CustomMessageActivity struct { // The destination to send the campaign or treatment to. This value can be one of // the following: // - // * The name or Amazon Resource Name (ARN) of an AWS Lambda + // * The name or Amazon Resource Name (ARN) of an AWS Lambda // function to invoke to handle delivery of the campaign or treatment. // - // * The - // URL for a web application or service that supports HTTPS and can receive the + // * The URL + // for a web application or service that supports HTTPS and can receive the // message. The URL has to be a full URL, including the HTTPS protocol. DeliveryUri *string @@ -1808,16 +1807,16 @@ type DefaultPushNotificationMessage struct { // The default action to occur if a recipient taps the push notification. Valid // values are: // - // * OPEN_APP - Your app opens or it becomes the foreground app if - // it was sent to the background. This is the default action. + // * OPEN_APP - Your app opens or it becomes the foreground app if it + // was sent to the background. This is the default action. // - // * DEEP_LINK - - // Your app opens and displays a designated user interface in the app. This setting - // uses the deep-linking features of the iOS and Android platforms. + // * DEEP_LINK - Your app + // opens and displays a designated user interface in the app. This setting uses the + // deep-linking features of the iOS and Android platforms. // - // * URL - - // The default mobile browser on the recipient's device opens and loads the web - // page at a URL that you specify. + // * URL - The default + // mobile browser on the recipient's device opens and loads the web page at a URL + // that you specify. Action Action // The default body of the notification message. @@ -1854,16 +1853,16 @@ type DefaultPushNotificationTemplate struct { // The action to occur if a recipient taps a push notification that's based on the // message template. Valid values are: // - // * OPEN_APP - Your app opens or it - // becomes the foreground app if it was sent to the background. This is the default + // * OPEN_APP - Your app opens or it becomes + // the foreground app if it was sent to the background. This is the default // action. // - // * DEEP_LINK - Your app opens and displays a designated user - // interface in the app. This setting uses the deep-linking features of the iOS and - // Android platforms. + // * DEEP_LINK - Your app opens and displays a designated user interface + // in the app. This setting uses the deep-linking features of the iOS and Android + // platforms. // - // * URL - The default mobile browser on the recipient's - // device opens and loads the web page at a URL that you specify. + // * URL - The default mobile browser on the recipient's device opens + // and loads the web page at a URL that you specify. Action Action // The message body to use in push notifications that are based on the message @@ -2338,33 +2337,32 @@ type EndpointMessageResult struct { // The delivery status of the message. Possible values are: // - // * DUPLICATE - The + // * DUPLICATE - The // endpoint address is a duplicate of another endpoint address. Amazon Pinpoint // won't attempt to send the message again. // - // * OPT_OUT - The user who's - // associated with the endpoint has opted out of receiving messages from you. - // Amazon Pinpoint won't attempt to send the message again. + // * OPT_OUT - The user who's associated + // with the endpoint has opted out of receiving messages from you. Amazon Pinpoint + // won't attempt to send the message again. // - // * - // PERMANENT_FAILURE - An error occurred when delivering the message to the - // endpoint. Amazon Pinpoint won't attempt to send the message again. + // * PERMANENT_FAILURE - An error + // occurred when delivering the message to the endpoint. Amazon Pinpoint won't + // attempt to send the message again. // - // * - // SUCCESSFUL - The message was successfully delivered to the endpoint. + // * SUCCESSFUL - The message was successfully + // delivered to the endpoint. // - // * - // TEMPORARY_FAILURE - A temporary error occurred. Amazon Pinpoint won't attempt to - // send the message again. + // * TEMPORARY_FAILURE - A temporary error occurred. + // Amazon Pinpoint won't attempt to send the message again. // - // * THROTTLED - Amazon Pinpoint throttled the - // operation to send the message to the endpoint. + // * THROTTLED - Amazon + // Pinpoint throttled the operation to send the message to the endpoint. // - // * TIMEOUT - The message - // couldn't be sent within the timeout period. + // * TIMEOUT + // - The message couldn't be sent within the timeout period. // - // * UNKNOWN_FAILURE - An unknown - // error occurred. + // * UNKNOWN_FAILURE - + // An unknown error occurred. // // This member is required. DeliveryStatus DeliveryStatus @@ -2985,14 +2983,14 @@ type GCMMessage struct { // The action to occur if the recipient taps the push notification. Valid values // are: // - // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // * OPEN_APP - Your app opens or it becomes the foreground app if it was // sent to the background. This is the default action. // - // * DEEP_LINK - Your app + // * DEEP_LINK - Your app // opens and displays a designated user interface in the app. This action uses the // deep-linking features of the Android platform. // - // * URL - The default mobile + // * URL - The default mobile // browser on the recipient's device opens and loads the web page at a URL that you // specify. Action Action @@ -3402,29 +3400,28 @@ type JourneyExecutionActivityMetricsResponse struct { // The type of activity that the metric applies to. Possible values are: // - // * + // * // CONDITIONAL_SPLIT - For a yes/no split activity, which is an activity that sends // participants down one of two paths in a journey. // - // * HOLDOUT - For a holdout + // * HOLDOUT - For a holdout // activity, which is an activity that stops a journey for a specified percentage // of participants. // - // * MESSAGE - For an email activity, which is an activity - // that sends an email message to participants. + // * MESSAGE - For an email activity, which is an activity that + // sends an email message to participants. // - // * MULTI_CONDITIONAL_SPLIT - - // For a multivariate split activity, which is an activity that sends participants - // down one of as many as five paths in a journey. + // * MULTI_CONDITIONAL_SPLIT - For a + // multivariate split activity, which is an activity that sends participants down + // one of as many as five paths in a journey. // - // * RANDOM_SPLIT - For a - // random split activity, which is an activity that sends specified percentages of - // participants down one of as many as five paths in a journey. + // * RANDOM_SPLIT - For a random split + // activity, which is an activity that sends specified percentages of participants + // down one of as many as five paths in a journey. // - // * WAIT - For a - // wait activity, which is an activity that waits for a certain amount of time or - // until a specific date and time before moving participants to the next activity - // in a journey. + // * WAIT - For a wait activity, + // which is an activity that waits for a certain amount of time or until a specific + // date and time before moving participants to the next activity in a journey. // // This member is required. ActivityType *string @@ -3560,14 +3557,14 @@ type JourneyResponse struct { // when a journey doesn't send messages to participants, if all the following // conditions are met: // - // * The EndpointDemographic.Timezone property of the - // endpoint for the participant is set to a valid value. + // * The EndpointDemographic.Timezone property of the endpoint + // for the participant is set to a valid value. // - // * The current time in - // the participant's time zone is later than or equal to the time specified by the + // * The current time in the + // participant's time zone is later than or equal to the time specified by the // QuietTime.Start property for the journey. // - // * The current time in the + // * The current time in the // participant's time zone is earlier than or equal to the time specified by the // QuietTime.End property for the journey. // @@ -3591,30 +3588,30 @@ type JourneyResponse struct { // The current status of the journey. Possible values are: // - // * DRAFT - The - // journey is being developed and hasn't been published yet. - // - // * ACTIVE - The - // journey has been developed and published. Depending on the journey's schedule, - // the journey may currently be running or scheduled to start running at a later - // time. If a journey's status is ACTIVE, you can't add, change, or remove - // activities from it. - // - // * COMPLETED - The journey has been published and has - // finished running. All participants have entered the journey and no participants - // are waiting to complete the journey or any activities in the journey. - // - // * - // CANCELLED - The journey has been stopped. If a journey's status is CANCELLED, - // you can't add, change, or remove activities or segment settings from the - // journey. - // - // * CLOSED - The journey has been published and has started running. - // It may have also passed its scheduled end time, or passed its scheduled start - // time and a refresh frequency hasn't been specified for it. If a journey's status - // is CLOSED, you can't add participants to it, and no existing participants can - // enter the journey for the first time. However, any existing participants who are - // currently waiting to start an activity may continue the journey. + // * DRAFT - The journey + // is being developed and hasn't been published yet. + // + // * ACTIVE - The journey has + // been developed and published. Depending on the journey's schedule, the journey + // may currently be running or scheduled to start running at a later time. If a + // journey's status is ACTIVE, you can't add, change, or remove activities from + // it. + // + // * COMPLETED - The journey has been published and has finished running. All + // participants have entered the journey and no participants are waiting to + // complete the journey or any activities in the journey. + // + // * CANCELLED - The + // journey has been stopped. If a journey's status is CANCELLED, you can't add, + // change, or remove activities or segment settings from the journey. + // + // * CLOSED - + // The journey has been published and has started running. It may have also passed + // its scheduled end time, or passed its scheduled start time and a refresh + // frequency hasn't been specified for it. If a journey's status is CLOSED, you + // can't add participants to it, and no existing participants can enter the journey + // for the first time. However, any existing participants who are currently waiting + // to start an activity may continue the journey. State State // This object is not used or supported. @@ -3707,16 +3704,15 @@ type Message struct { // The action to occur if a recipient taps the push notification. Valid values // are: // - // * OPEN_APP - Your app opens or it becomes the foreground app if it was + // * OPEN_APP - Your app opens or it becomes the foreground app if it was // sent to the background. This is the default action. // - // * DEEP_LINK - Your app + // * DEEP_LINK - Your app // opens and displays a designated user interface in the app. This setting uses the // deep-linking features of iOS and Android. // - // * URL - The default mobile - // browser on the recipient's device opens and loads the web page at a URL that you - // specify. + // * URL - The default mobile browser on + // the recipient's device opens and loads the web page at a URL that you specify. Action Action // The body of the notification message. The maximum number of characters is 200. @@ -3885,33 +3881,32 @@ type MessageResult struct { // The delivery status of the message. Possible values are: // - // * DUPLICATE - The + // * DUPLICATE - The // endpoint address is a duplicate of another endpoint address. Amazon Pinpoint // won't attempt to send the message again. // - // * OPT_OUT - The user who's - // associated with the endpoint address has opted out of receiving messages from - // you. Amazon Pinpoint won't attempt to send the message again. - // - // * - // PERMANENT_FAILURE - An error occurred when delivering the message to the - // endpoint address. Amazon Pinpoint won't attempt to send the message again. - // + // * OPT_OUT - The user who's associated + // with the endpoint address has opted out of receiving messages from you. Amazon + // Pinpoint won't attempt to send the message again. // - // * SUCCESSFUL - The message was successfully delivered to the endpoint address. + // * PERMANENT_FAILURE - An + // error occurred when delivering the message to the endpoint address. Amazon + // Pinpoint won't attempt to send the message again. // + // * SUCCESSFUL - The message + // was successfully delivered to the endpoint address. // - // * TEMPORARY_FAILURE - A temporary error occurred. Amazon Pinpoint won't attempt - // to send the message again. + // * TEMPORARY_FAILURE - A + // temporary error occurred. Amazon Pinpoint won't attempt to send the message + // again. // - // * THROTTLED - Amazon Pinpoint throttled the - // operation to send the message to the endpoint address. + // * THROTTLED - Amazon Pinpoint throttled the operation to send the + // message to the endpoint address. // - // * TIMEOUT - The - // message couldn't be sent within the timeout period. + // * TIMEOUT - The message couldn't be sent + // within the timeout period. // - // * UNKNOWN_FAILURE - An - // unknown error occurred. + // * UNKNOWN_FAILURE - An unknown error occurred. // // This member is required. DeliveryStatus DeliveryStatus @@ -4387,12 +4382,12 @@ type RecommenderConfigurationResponse struct { // that’s specific to a particular endpoint or user in an Amazon Pinpoint // application. Possible values are: // - // * PINPOINT_ENDPOINT_ID - Each user in the + // * PINPOINT_ENDPOINT_ID - Each user in the // model is associated with a particular endpoint in Amazon Pinpoint. The data is // correlated based on endpoint IDs in Amazon Pinpoint. This is the default // value. // - // * PINPOINT_USER_ID - Each user in the model is associated with a + // * PINPOINT_USER_ID - Each user in the model is associated with a // particular user and endpoint in Amazon Pinpoint. The data is correlated based on // user IDs in Amazon Pinpoint. If this value is specified, an endpoint definition // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint ID. @@ -4491,19 +4486,19 @@ type Schedule struct { // when a campaign doesn't send messages to endpoints, if all the following // conditions are met: // - // * The EndpointDemographic.Timezone property of the - // endpoint is set to a valid value. + // * The EndpointDemographic.Timezone property of the endpoint + // is set to a valid value. // - // * The current time in the endpoint's time - // zone is later than or equal to the time specified by the QuietTime.Start - // property for the campaign. - // - // * The current time in the endpoint's time zone - // is earlier than or equal to the time specified by the QuietTime.End property for + // * The current time in the endpoint's time zone is + // later than or equal to the time specified by the QuietTime.Start property for // the campaign. // - // If any of the preceding conditions isn't met, the endpoint will - // receive messages from the campaign, even if quiet time is enabled. + // * The current time in the endpoint's time zone is earlier than or + // equal to the time specified by the QuietTime.End property for the campaign. + // + // If + // any of the preceding conditions isn't met, the endpoint will receive messages + // from the campaign, even if quiet time is enabled. QuietTime *QuietTime // The starting UTC offset for the campaign schedule, if the value of the @@ -4712,14 +4707,14 @@ type SegmentResponse struct { // The segment type. Valid values are: // - // * DIMENSIONAL - A dynamic segment, - // which is a segment that uses selection criteria that you specify and is based on - // endpoint data that's reported by your app. Dynamic segments can change over - // time. + // * DIMENSIONAL - A dynamic segment, which is + // a segment that uses selection criteria that you specify and is based on endpoint + // data that's reported by your app. Dynamic segments can change over time. // - // * IMPORT - A static segment, which is a segment that uses selection - // criteria that you specify and is based on endpoint definitions that you import - // from a file. Imported segments are static; they don't change over time. + // * + // IMPORT - A static segment, which is a segment that uses selection criteria that + // you specify and is based on endpoint definitions that you import from a file. + // Imported segments are static; they don't change over time. // // This member is required. SegmentType SegmentType @@ -5404,19 +5399,18 @@ type UpdateRecommenderConfigurationShape struct { // template editor on the Amazon Pinpoint console. The following restrictions apply // to these names: // - // * An attribute name must start with a letter or number and - // it can contain up to 50 characters. The characters can be letters, numbers, + // * An attribute name must start with a letter or number and it + // can contain up to 50 characters. The characters can be letters, numbers, // underscores (_), or hyphens (-). Attribute names are case sensitive and must be // unique. // - // * An attribute display name must start with a letter or number and - // it can contain up to 25 characters. The characters can be letters, numbers, - // spaces, underscores (_), or hyphens (-). + // * An attribute display name must start with a letter or number and it + // can contain up to 25 characters. The characters can be letters, numbers, spaces, + // underscores (_), or hyphens (-). // - // This object is required if the - // configuration invokes an AWS Lambda function (RecommendationTransformerUri) to - // process recommendation data. Otherwise, don't include this object in your - // request. + // This object is required if the configuration + // invokes an AWS Lambda function (RecommendationTransformerUri) to process + // recommendation data. Otherwise, don't include this object in your request. Attributes map[string]*string // A custom description of the configuration for the recommender model. The @@ -5434,16 +5428,16 @@ type UpdateRecommenderConfigurationShape struct { // that’s specific to a particular endpoint or user in an Amazon Pinpoint // application. Valid values are: // - // * PINPOINT_ENDPOINT_ID - Associate each user - // in the model with a particular endpoint in Amazon Pinpoint. The data is - // correlated based on endpoint IDs in Amazon Pinpoint. This is the default - // value. + // * PINPOINT_ENDPOINT_ID - Associate each user in + // the model with a particular endpoint in Amazon Pinpoint. The data is correlated + // based on endpoint IDs in Amazon Pinpoint. This is the default value. // - // * PINPOINT_USER_ID - Associate each user in the model with a - // particular user and endpoint in Amazon Pinpoint. The data is correlated based on - // user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition - // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint ID. - // Otherwise, messages won’t be sent to the user's endpoint. + // * + // PINPOINT_USER_ID - Associate each user in the model with a particular user and + // endpoint in Amazon Pinpoint. The data is correlated based on user IDs in Amazon + // Pinpoint. If you specify this value, an endpoint definition in Amazon Pinpoint + // has to specify both a user ID (UserId) and an endpoint ID. Otherwise, messages + // won’t be sent to the user's endpoint. RecommendationProviderIdType *string // The name or Amazon Resource Name (ARN) of the AWS Lambda function to invoke for @@ -5700,22 +5694,22 @@ type WriteApplicationSettingsRequest struct { // specific time range when messages aren't sent to endpoints, if all the following // conditions are met: // - // * The EndpointDemographic.Timezone property of the - // endpoint is set to a valid value. + // * The EndpointDemographic.Timezone property of the endpoint + // is set to a valid value. // - // * The current time in the endpoint's time - // zone is later than or equal to the time specified by the QuietTime.Start - // property for the application (or a campaign or journey that has custom quiet - // time settings). + // * The current time in the endpoint's time zone is + // later than or equal to the time specified by the QuietTime.Start property for + // the application (or a campaign or journey that has custom quiet time + // settings). // - // * The current time in the endpoint's time zone is earlier - // than or equal to the time specified by the QuietTime.End property for the - // application (or a campaign or journey that has custom quiet time settings). + // * The current time in the endpoint's time zone is earlier than or + // equal to the time specified by the QuietTime.End property for the application + // (or a campaign or journey that has custom quiet time settings). // - // If - // any of the preceding conditions isn't met, the endpoint will receive messages - // from a campaign or journey, even if quiet time is enabled. To override the - // default quiet time settings for a specific campaign or journey, use the Campaign + // If any of the + // preceding conditions isn't met, the endpoint will receive messages from a + // campaign or journey, even if quiet time is enabled. To override the default + // quiet time settings for a specific campaign or journey, use the Campaign // resource or the Journey resource to define a custom quiet time for the campaign // or journey. QuietTime *QuietTime @@ -5841,14 +5835,14 @@ type WriteJourneyRequest struct { // when a journey doesn't send messages to participants, if all the following // conditions are met: // - // * The EndpointDemographic.Timezone property of the - // endpoint for the participant is set to a valid value. + // * The EndpointDemographic.Timezone property of the endpoint + // for the participant is set to a valid value. // - // * The current time in - // the participant's time zone is later than or equal to the time specified by the + // * The current time in the + // participant's time zone is later than or equal to the time specified by the // QuietTime.Start property for the journey. // - // * The current time in the + // * The current time in the // participant's time zone is earlier than or equal to the time specified by the // QuietTime.End property for the journey. // @@ -5874,17 +5868,17 @@ type WriteJourneyRequest struct { // The status of the journey. Valid values are: // - // * DRAFT - Saves the journey - // and doesn't publish it. + // * DRAFT - Saves the journey and + // doesn't publish it. // - // * ACTIVE - Saves and publishes the journey. - // Depending on the journey's schedule, the journey starts running immediately or - // at the scheduled start time. If a journey's status is ACTIVE, you can't add, - // change, or remove activities from it. + // * ACTIVE - Saves and publishes the journey. Depending on + // the journey's schedule, the journey starts running immediately or at the + // scheduled start time. If a journey's status is ACTIVE, you can't add, change, or + // remove activities from it. // - // The CANCELLED, COMPLETED, and CLOSED - // values are not supported in requests to create or update a journey. To cancel a - // journey, use the Journey State resource. + // The CANCELLED, COMPLETED, and CLOSED values are not + // supported in requests to create or update a journey. To cancel a journey, use + // the Journey State resource. State State } diff --git a/service/pinpointemail/api_op_GetAccount.go b/service/pinpointemail/api_op_GetAccount.go index 8a9fdfd451b..89cd49afac8 100644 --- a/service/pinpointemail/api_op_GetAccount.go +++ b/service/pinpointemail/api_op_GetAccount.go @@ -44,15 +44,15 @@ type GetAccountOutput struct { // The reputation status of your Amazon Pinpoint account. The status can be one of // the following: // - // * HEALTHY – There are no reputation-related issues that + // * HEALTHY – There are no reputation-related issues that // currently impact your account. // - // * PROBATION – We've identified some issues - // with your Amazon Pinpoint account. We're placing your account under review while - // you work on correcting these issues. + // * PROBATION – We've identified some issues with + // your Amazon Pinpoint account. We're placing your account under review while you + // work on correcting these issues. // - // * SHUTDOWN – Your account's ability to - // send email is currently paused because of an issue with the email sent from your + // * SHUTDOWN – Your account's ability to send + // email is currently paused because of an issue with the email sent from your // account. When you correct the issue, you can contact us and request that your // account's ability to send email is resumed. EnforcementStatus *string diff --git a/service/pinpointemail/api_op_PutEmailIdentityMailFromAttributes.go b/service/pinpointemail/api_op_PutEmailIdentityMailFromAttributes.go index b4ccde390ac..589c3fecdb2 100644 --- a/service/pinpointemail/api_op_PutEmailIdentityMailFromAttributes.go +++ b/service/pinpointemail/api_op_PutEmailIdentityMailFromAttributes.go @@ -49,14 +49,14 @@ type PutEmailIdentityMailFromAttributesInput struct { // The custom MAIL FROM domain that you want the verified identity to use. The MAIL // FROM domain must meet the following criteria: // - // * It has to be a subdomain of - // the verified identity. + // * It has to be a subdomain of the + // verified identity. // - // * It can't be used to receive email. + // * It can't be used to receive email. // - // * It can't - // be used in a "From" address if the MAIL FROM domain is a destination for - // feedback forwarding emails. + // * It can't be used in + // a "From" address if the MAIL FROM domain is a destination for feedback + // forwarding emails. MailFromDomain *string } diff --git a/service/pinpointemail/api_op_SendEmail.go b/service/pinpointemail/api_op_SendEmail.go index e561fdbb693..ba56838f475 100644 --- a/service/pinpointemail/api_op_SendEmail.go +++ b/service/pinpointemail/api_op_SendEmail.go @@ -14,11 +14,11 @@ import ( // Sends an email message. You can use the Amazon Pinpoint Email API to send two // types of messages: // -// * Simple – A standard email message. When you create -// this type of message, you specify the sender, the recipient, and the message -// body, and Amazon Pinpoint assembles the message for you. +// * Simple – A standard email message. When you create this +// type of message, you specify the sender, the recipient, and the message body, +// and Amazon Pinpoint assembles the message for you. // -// * Raw – A raw, +// * Raw – A raw, // MIME-formatted email message. When you send this type of email, you have to // specify all of the message headers, as well as the message body. You can use // this message type to send messages that contain attachments. The message that diff --git a/service/pinpointemail/types/enums.go b/service/pinpointemail/types/enums.go index 0f390e85ba4..44c0e31cc5d 100644 --- a/service/pinpointemail/types/enums.go +++ b/service/pinpointemail/types/enums.go @@ -6,8 +6,8 @@ type BehaviorOnMxFailure string // Enum values for BehaviorOnMxFailure const ( - BehaviorOnMxFailureUse_default_value BehaviorOnMxFailure = "USE_DEFAULT_VALUE" - BehaviorOnMxFailureReject_message BehaviorOnMxFailure = "REJECT_MESSAGE" + BehaviorOnMxFailureUseDefaultValue BehaviorOnMxFailure = "USE_DEFAULT_VALUE" + BehaviorOnMxFailureRejectMessage BehaviorOnMxFailure = "REJECT_MESSAGE" ) // Values returns all known values for BehaviorOnMxFailure. Note that this can be @@ -24,9 +24,9 @@ type DeliverabilityDashboardAccountStatus string // Enum values for DeliverabilityDashboardAccountStatus const ( - DeliverabilityDashboardAccountStatusActive DeliverabilityDashboardAccountStatus = "ACTIVE" - DeliverabilityDashboardAccountStatusPending_expiration DeliverabilityDashboardAccountStatus = "PENDING_EXPIRATION" - DeliverabilityDashboardAccountStatusDisabled DeliverabilityDashboardAccountStatus = "DISABLED" + DeliverabilityDashboardAccountStatusActive DeliverabilityDashboardAccountStatus = "ACTIVE" + DeliverabilityDashboardAccountStatusPendingExpiration DeliverabilityDashboardAccountStatus = "PENDING_EXPIRATION" + DeliverabilityDashboardAccountStatusDisabled DeliverabilityDashboardAccountStatus = "DISABLED" ) // Values returns all known values for DeliverabilityDashboardAccountStatus. Note @@ -45,8 +45,8 @@ type DeliverabilityTestStatus string // Enum values for DeliverabilityTestStatus const ( - DeliverabilityTestStatusIn_progress DeliverabilityTestStatus = "IN_PROGRESS" - DeliverabilityTestStatusCompleted DeliverabilityTestStatus = "COMPLETED" + DeliverabilityTestStatusInProgress DeliverabilityTestStatus = "IN_PROGRESS" + DeliverabilityTestStatusCompleted DeliverabilityTestStatus = "COMPLETED" ) // Values returns all known values for DeliverabilityTestStatus. Note that this can @@ -63,9 +63,9 @@ type DimensionValueSource string // Enum values for DimensionValueSource const ( - DimensionValueSourceMessage_tag DimensionValueSource = "MESSAGE_TAG" - DimensionValueSourceEmail_header DimensionValueSource = "EMAIL_HEADER" - DimensionValueSourceLink_tag DimensionValueSource = "LINK_TAG" + DimensionValueSourceMessageTag DimensionValueSource = "MESSAGE_TAG" + DimensionValueSourceEmailHeader DimensionValueSource = "EMAIL_HEADER" + DimensionValueSourceLinkTag DimensionValueSource = "LINK_TAG" ) // Values returns all known values for DimensionValueSource. Note that this can be @@ -83,11 +83,11 @@ type DkimStatus string // Enum values for DkimStatus const ( - DkimStatusPending DkimStatus = "PENDING" - DkimStatusSuccess DkimStatus = "SUCCESS" - DkimStatusFailed DkimStatus = "FAILED" - DkimStatusTemporary_failure DkimStatus = "TEMPORARY_FAILURE" - DkimStatusNot_started DkimStatus = "NOT_STARTED" + DkimStatusPending DkimStatus = "PENDING" + DkimStatusSuccess DkimStatus = "SUCCESS" + DkimStatusFailed DkimStatus = "FAILED" + DkimStatusTemporaryFailure DkimStatus = "TEMPORARY_FAILURE" + DkimStatusNotStarted DkimStatus = "NOT_STARTED" ) // Values returns all known values for DkimStatus. Note that this can be expanded @@ -107,14 +107,14 @@ type EventType string // Enum values for EventType const ( - EventTypeSend EventType = "SEND" - EventTypeReject EventType = "REJECT" - EventTypeBounce EventType = "BOUNCE" - EventTypeComplaint EventType = "COMPLAINT" - EventTypeDelivery EventType = "DELIVERY" - EventTypeOpen EventType = "OPEN" - EventTypeClick EventType = "CLICK" - EventTypeRendering_failure EventType = "RENDERING_FAILURE" + EventTypeSend EventType = "SEND" + EventTypeReject EventType = "REJECT" + EventTypeBounce EventType = "BOUNCE" + EventTypeComplaint EventType = "COMPLAINT" + EventTypeDelivery EventType = "DELIVERY" + EventTypeOpen EventType = "OPEN" + EventTypeClick EventType = "CLICK" + EventTypeRenderingFailure EventType = "RENDERING_FAILURE" ) // Values returns all known values for EventType. Note that this can be expanded in @@ -137,9 +137,9 @@ type IdentityType string // Enum values for IdentityType const ( - IdentityTypeEmail_address IdentityType = "EMAIL_ADDRESS" - IdentityTypeDomain IdentityType = "DOMAIN" - IdentityTypeManaged_domain IdentityType = "MANAGED_DOMAIN" + IdentityTypeEmailAddress IdentityType = "EMAIL_ADDRESS" + IdentityTypeDomain IdentityType = "DOMAIN" + IdentityTypeManagedDomain IdentityType = "MANAGED_DOMAIN" ) // Values returns all known values for IdentityType. Note that this can be expanded @@ -157,10 +157,10 @@ type MailFromDomainStatus string // Enum values for MailFromDomainStatus const ( - MailFromDomainStatusPending MailFromDomainStatus = "PENDING" - MailFromDomainStatusSuccess MailFromDomainStatus = "SUCCESS" - MailFromDomainStatusFailed MailFromDomainStatus = "FAILED" - MailFromDomainStatusTemporary_failure MailFromDomainStatus = "TEMPORARY_FAILURE" + MailFromDomainStatusPending MailFromDomainStatus = "PENDING" + MailFromDomainStatusSuccess MailFromDomainStatus = "SUCCESS" + MailFromDomainStatusFailed MailFromDomainStatus = "FAILED" + MailFromDomainStatusTemporaryFailure MailFromDomainStatus = "TEMPORARY_FAILURE" ) // Values returns all known values for MailFromDomainStatus. Note that this can be @@ -197,8 +197,8 @@ type WarmupStatus string // Enum values for WarmupStatus const ( - WarmupStatusIn_progress WarmupStatus = "IN_PROGRESS" - WarmupStatusDone WarmupStatus = "DONE" + WarmupStatusInProgress WarmupStatus = "IN_PROGRESS" + WarmupStatusDone WarmupStatus = "DONE" ) // Values returns all known values for WarmupStatus. Note that this can be expanded diff --git a/service/pinpointemail/types/types.go b/service/pinpointemail/types/types.go index db23a043093..f8dafbbb853 100644 --- a/service/pinpointemail/types/types.go +++ b/service/pinpointemail/types/types.go @@ -55,11 +55,11 @@ type CloudWatchDimensionConfiguration struct { // don't provide the value of the dimension when you send an email. This value has // to meet the following criteria: // - // * It can only contain ASCII letters (a-z, - // A-Z), numbers (0-9), underscores (_), or dashes (-). + // * It can only contain ASCII letters (a-z, A-Z), + // numbers (0-9), underscores (_), or dashes (-). // - // * It can contain no - // more than 256 characters. + // * It can contain no more than + // 256 characters. // // This member is required. DefaultDimensionValue *string @@ -67,11 +67,11 @@ type CloudWatchDimensionConfiguration struct { // The name of an Amazon CloudWatch dimension associated with an email sending // metric. The name has to meet the following criteria: // - // * It can only contain + // * It can only contain // ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * - // It can contain no more than 256 characters. + // * It + // can contain no more than 256 characters. // // This member is required. DimensionName *string @@ -137,11 +137,11 @@ type DedicatedIp struct { // The warm-up status of a dedicated IP address. The status can have one of the // following values: // - // * IN_PROGRESS – The IP address isn't ready to use because - // the dedicated IP warm-up process is ongoing. + // * IN_PROGRESS – The IP address isn't ready to use because the + // dedicated IP warm-up process is ongoing. // - // * DONE – The dedicated IP - // warm-up process is complete, and the IP address is ready to use. + // * DONE – The dedicated IP warm-up + // process is complete, and the IP address is ready to use. // // This member is required. WarmupStatus WarmupStatus @@ -221,24 +221,24 @@ type DkimAttributes struct { // records in the DNS records for the domain. The status can be one of the // following: // - // * PENDING – Amazon Pinpoint hasn't yet located the DKIM records - // in the DNS configuration for the domain, but will continue to attempt to locate + // * PENDING – Amazon Pinpoint hasn't yet located the DKIM records in + // the DNS configuration for the domain, but will continue to attempt to locate // them. // - // * SUCCESS – Amazon Pinpoint located the DKIM records in the DNS + // * SUCCESS – Amazon Pinpoint located the DKIM records in the DNS // configuration for the domain and determined that they're correct. Amazon // Pinpoint can now send DKIM-signed email from the identity. // - // * FAILED – - // Amazon Pinpoint was unable to locate the DKIM records in the DNS settings for - // the domain, and won't continue to search for them. + // * FAILED – Amazon + // Pinpoint was unable to locate the DKIM records in the DNS settings for the + // domain, and won't continue to search for them. // - // * TEMPORARY_FAILURE – A + // * TEMPORARY_FAILURE – A // temporary issue occurred, which prevented Amazon Pinpoint from determining the // DKIM status for the domain. // - // * NOT_STARTED – Amazon Pinpoint hasn't yet - // started searching for the DKIM records in the DKIM records for the domain. + // * NOT_STARTED – Amazon Pinpoint hasn't yet started + // searching for the DKIM records in the DKIM records for the domain. Status DkimStatus // A set of unique strings that you use to create a set of CNAME records that you @@ -365,29 +365,29 @@ type EmailContent struct { // The raw email message. The message has to meet the following criteria: // - // * - // The message has to contain a header and a body, separated by one blank line. + // * The + // message has to contain a header and a body, separated by one blank line. // + // * All + // of the required header fields must be present in the message. // - // * All of the required header fields must be present in the message. + // * Each part of a + // multipart MIME message must be formatted properly. // - // * Each - // part of a multipart MIME message must be formatted properly. + // * If you include + // attachments, they must be in a file format that Amazon Pinpoint supports. // - // * If you - // include attachments, they must be in a file format that Amazon Pinpoint - // supports. + // * The + // entire message must be Base64 encoded. // - // * The entire message must be Base64 encoded. + // * If any of the MIME parts in your + // message contain content that is outside of the 7-bit ASCII character range, you + // should encode that content to ensure that recipients' email clients render the + // message properly. // - // * If any of the - // MIME parts in your message contain content that is outside of the 7-bit ASCII - // character range, you should encode that content to ensure that recipients' email - // clients render the message properly. - // - // * The length of any single line of - // text in the message can't exceed 1,000 characters. This restriction is defined - // in RFC 5321 (https://tools.ietf.org/html/rfc5321). + // * The length of any single line of text in the message can't + // exceed 1,000 characters. This restriction is defined in RFC 5321 + // (https://tools.ietf.org/html/rfc5321). Raw *RawMessage // The simple email message. The message consists of a subject and a message body. @@ -488,14 +488,13 @@ type IdentityInfo struct { // The email identity type. The identity type can be one of the following: // - // * + // * // EMAIL_ADDRESS – The identity is an email address. // - // * DOMAIN – The identity - // is a domain. + // * DOMAIN – The identity is a + // domain. // - // * MANAGED_DOMAIN – The identity is a domain that is managed by - // AWS. + // * MANAGED_DOMAIN – The identity is a domain that is managed by AWS. IdentityType IdentityType // Indicates whether or not you can send email from the identity. In Amazon @@ -568,21 +567,21 @@ type MailFromAttributes struct { // This member is required. MailFromDomain *string - // The status of the MAIL FROM domain. This status can have the following values: - // + // The status of the MAIL FROM domain. This status can have the following + // values: // - // * PENDING – Amazon Pinpoint hasn't started searching for the MX record yet. + // * PENDING – Amazon Pinpoint hasn't started searching for the MX record + // yet. // + // * SUCCESS – Amazon Pinpoint detected the required MX record for the MAIL + // FROM domain. // - // * SUCCESS – Amazon Pinpoint detected the required MX record for the MAIL FROM - // domain. - // - // * FAILED – Amazon Pinpoint can't find the required MX record, or + // * FAILED – Amazon Pinpoint can't find the required MX record, or // the record no longer exists. // - // * TEMPORARY_FAILURE – A temporary issue - // occurred, which prevented Amazon Pinpoint from determining the status of the - // MAIL FROM domain. + // * TEMPORARY_FAILURE – A temporary issue occurred, + // which prevented Amazon Pinpoint from determining the status of the MAIL FROM + // domain. // // This member is required. MailFromDomainStatus MailFromDomainStatus @@ -614,11 +613,10 @@ type MessageTag struct { // The name of the message tag. The message tag name has to meet the following // criteria: // - // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), // underscores (_), or dashes (-). // - // * It can contain no more than 256 - // characters. + // * It can contain no more than 256 characters. // // This member is required. Name *string @@ -626,11 +624,10 @@ type MessageTag struct { // The value of the message tag. The message tag value has to meet the following // criteria: // - // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), // underscores (_), or dashes (-). // - // * It can contain no more than 256 - // characters. + // * It can contain no more than 256 characters. // // This member is required. Value *string @@ -692,29 +689,28 @@ type RawMessage struct { // The raw email message. The message has to meet the following criteria: // - // * - // The message has to contain a header and a body, separated by one blank line. - // + // * The + // message has to contain a header and a body, separated by one blank line. // - // * All of the required header fields must be present in the message. + // * All + // of the required header fields must be present in the message. // - // * Each - // part of a multipart MIME message must be formatted properly. + // * Each part of a + // multipart MIME message must be formatted properly. // - // * Attachments - // must be in a file format that Amazon Pinpoint supports. + // * Attachments must be in a + // file format that Amazon Pinpoint supports. // - // * The entire - // message must be Base64 encoded. + // * The entire message must be Base64 + // encoded. // - // * If any of the MIME parts in your message - // contain content that is outside of the 7-bit ASCII character range, you should - // encode that content to ensure that recipients' email clients render the message - // properly. + // * If any of the MIME parts in your message contain content that is + // outside of the 7-bit ASCII character range, you should encode that content to + // ensure that recipients' email clients render the message properly. // - // * The length of any single line of text in the message can't - // exceed 1,000 characters. This restriction is defined in RFC 5321 - // (https://tools.ietf.org/html/rfc5321). + // * The length + // of any single line of text in the message can't exceed 1,000 characters. This + // restriction is defined in RFC 5321 (https://tools.ietf.org/html/rfc5321). // // This member is required. Data []byte @@ -786,20 +782,20 @@ type SnsDestination struct { // digits, white space, or one of the following symbols: _ . : / = + -. The // following additional restrictions apply to tags: // -// * Tag keys and values are -// case sensitive. +// * Tag keys and values are case +// sensitive. // -// * For each associated resource, each tag key must be unique -// and it can have only one value. +// * For each associated resource, each tag key must be unique and it +// can have only one value. // -// * The aws: prefix is reserved for use by -// AWS; you can’t use it in any tag keys or values that you define. In addition, -// you can't edit or remove tag keys or values that use this prefix. Tags that use -// this prefix don’t count against the limit of 50 tags per resource. +// * The aws: prefix is reserved for use by AWS; you +// can’t use it in any tag keys or values that you define. In addition, you can't +// edit or remove tag keys or values that use this prefix. Tags that use this +// prefix don’t count against the limit of 50 tags per resource. // -// * You -// can associate tags with public or shared resources, but the tags are available -// only for your AWS account, not any other accounts that share the resource. In +// * You can +// associate tags with public or shared resources, but the tags are available only +// for your AWS account, not any other accounts that share the resource. In // addition, the tags are available only for resources that are located in the // specified AWS Region for your AWS account. type Tag struct { diff --git a/service/pinpointsmsvoice/types/enums.go b/service/pinpointsmsvoice/types/enums.go index 7de7827fd30..71bff24151c 100644 --- a/service/pinpointsmsvoice/types/enums.go +++ b/service/pinpointsmsvoice/types/enums.go @@ -6,13 +6,13 @@ type EventType string // Enum values for EventType const ( - EventTypeInitiated_call EventType = "INITIATED_CALL" - EventTypeRinging EventType = "RINGING" - EventTypeAnswered EventType = "ANSWERED" - EventTypeCompleted_call EventType = "COMPLETED_CALL" - EventTypeBusy EventType = "BUSY" - EventTypeFailed EventType = "FAILED" - EventTypeNo_answer EventType = "NO_ANSWER" + EventTypeInitiatedCall EventType = "INITIATED_CALL" + EventTypeRinging EventType = "RINGING" + EventTypeAnswered EventType = "ANSWERED" + EventTypeCompletedCall EventType = "COMPLETED_CALL" + EventTypeBusy EventType = "BUSY" + EventTypeFailed EventType = "FAILED" + EventTypeNoAnswer EventType = "NO_ANSWER" ) // Values returns all known values for EventType. Note that this can be expanded in diff --git a/service/polly/api_op_SynthesizeSpeech.go b/service/polly/api_op_SynthesizeSpeech.go index 8c1a925180f..d3dd62aadf4 100644 --- a/service/polly/api_op_SynthesizeSpeech.go +++ b/service/polly/api_op_SynthesizeSpeech.go @@ -101,18 +101,18 @@ type SynthesizeSpeechOutput struct { // Specifies the type audio stream. This should reflect the OutputFormat parameter // in your request. // - // * If you request mp3 as the OutputFormat, the ContentType + // * If you request mp3 as the OutputFormat, the ContentType // returned is audio/mpeg. // - // * If you request ogg_vorbis as the OutputFormat, - // the ContentType returned is audio/ogg. + // * If you request ogg_vorbis as the OutputFormat, the + // ContentType returned is audio/ogg. // - // * If you request pcm as the - // OutputFormat, the ContentType returned is audio/pcm in a signed 16-bit, 1 - // channel (mono), little-endian format. + // * If you request pcm as the OutputFormat, + // the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), + // little-endian format. // - // * If you request json as the - // OutputFormat, the ContentType returned is audio/json. + // * If you request json as the OutputFormat, the + // ContentType returned is audio/json. ContentType *string // Number of characters synthesized. diff --git a/service/polly/types/enums.go b/service/polly/types/enums.go index 0cfce7e087e..9d4327b10b7 100644 --- a/service/polly/types/enums.go +++ b/service/polly/types/enums.go @@ -114,10 +114,10 @@ type OutputFormat string // Enum values for OutputFormat const ( - OutputFormatJson OutputFormat = "json" - OutputFormatMp3 OutputFormat = "mp3" - OutputFormatOgg_vorbis OutputFormat = "ogg_vorbis" - OutputFormatPcm OutputFormat = "pcm" + OutputFormatJson OutputFormat = "json" + OutputFormatMp3 OutputFormat = "mp3" + OutputFormatOggVorbis OutputFormat = "ogg_vorbis" + OutputFormatPcm OutputFormat = "pcm" ) // Values returns all known values for OutputFormat. Note that this can be expanded @@ -158,10 +158,10 @@ type TaskStatus string // Enum values for TaskStatus const ( - TaskStatusScheduled TaskStatus = "scheduled" - TaskStatusIn_progress TaskStatus = "inProgress" - TaskStatusCompleted TaskStatus = "completed" - TaskStatusFailed TaskStatus = "failed" + TaskStatusScheduled TaskStatus = "scheduled" + TaskStatusInProgress TaskStatus = "inProgress" + TaskStatusCompleted TaskStatus = "completed" + TaskStatusFailed TaskStatus = "failed" ) // Values returns all known values for TaskStatus. Note that this can be expanded diff --git a/service/pricing/doc.go b/service/pricing/doc.go index 5591aea3539..348ac864e00 100644 --- a/service/pricing/doc.go +++ b/service/pricing/doc.go @@ -20,8 +20,8 @@ // Service Endpoint AWS Price List Service API provides the following two // endpoints: // -// * https://api.pricing.us-east-1.amazonaws.com +// * https://api.pricing.us-east-1.amazonaws.com // -// * +// * // https://api.pricing.ap-south-1.amazonaws.com package pricing diff --git a/service/pricing/types/enums.go b/service/pricing/types/enums.go index 17998d788c4..6a3160d0e1e 100644 --- a/service/pricing/types/enums.go +++ b/service/pricing/types/enums.go @@ -6,7 +6,7 @@ type FilterType string // Enum values for FilterType const ( - FilterTypeTerm_match FilterType = "TERM_MATCH" + FilterTypeTermMatch FilterType = "TERM_MATCH" ) // Values returns all known values for FilterType. Note that this can be expanded diff --git a/service/qldb/api_op_ExportJournalToS3.go b/service/qldb/api_op_ExportJournalToS3.go index 163df7783cd..62f93d35c21 100644 --- a/service/qldb/api_op_ExportJournalToS3.go +++ b/service/qldb/api_op_ExportJournalToS3.go @@ -62,10 +62,10 @@ type ExportJournalToS3Input struct { // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for // a journal export job to do the following: // - // * Write objects into your Amazon + // * Write objects into your Amazon // Simple Storage Service (Amazon S3) bucket. // - // * (Optional) Use your customer + // * (Optional) Use your customer // master key (CMK) in AWS Key Management Service (AWS KMS) for server-side // encryption of your exported data. // diff --git a/service/qldb/api_op_ListJournalKinesisStreamsForLedger.go b/service/qldb/api_op_ListJournalKinesisStreamsForLedger.go index 1b7f6f59e9a..21a6f4de24d 100644 --- a/service/qldb/api_op_ListJournalKinesisStreamsForLedger.go +++ b/service/qldb/api_op_ListJournalKinesisStreamsForLedger.go @@ -55,9 +55,9 @@ type ListJournalKinesisStreamsForLedgerOutput struct { // * If NextToken is empty, the last page of results has been processed and there // are no more results to be retrieved. // - // * If NextToken is not empty, more - // results are available. To retrieve the next page of results, use the value of - // NextToken in a subsequent ListJournalKinesisStreamsForLedger call. + // * If NextToken is not empty, more results + // are available. To retrieve the next page of results, use the value of NextToken + // in a subsequent ListJournalKinesisStreamsForLedger call. NextToken *string // The array of QLDB journal stream descriptors that are associated with the given diff --git a/service/qldb/api_op_ListJournalS3Exports.go b/service/qldb/api_op_ListJournalS3Exports.go index e0c41fa69b8..16d73c5f6ef 100644 --- a/service/qldb/api_op_ListJournalS3Exports.go +++ b/service/qldb/api_op_ListJournalS3Exports.go @@ -54,9 +54,9 @@ type ListJournalS3ExportsOutput struct { // * If NextToken is empty, then the last page of results has been processed and // there are no more results to be retrieved. // - // * If NextToken is not empty, - // then there are more results available. To retrieve the next page of results, use - // the value of NextToken in a subsequent ListJournalS3Exports call. + // * If NextToken is not empty, then + // there are more results available. To retrieve the next page of results, use the + // value of NextToken in a subsequent ListJournalS3Exports call. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/qldb/api_op_ListJournalS3ExportsForLedger.go b/service/qldb/api_op_ListJournalS3ExportsForLedger.go index 7acf0959191..e1e8c3a554f 100644 --- a/service/qldb/api_op_ListJournalS3ExportsForLedger.go +++ b/service/qldb/api_op_ListJournalS3ExportsForLedger.go @@ -61,9 +61,9 @@ type ListJournalS3ExportsForLedgerOutput struct { // * If NextToken is empty, then the last page of results has been processed and // there are no more results to be retrieved. // - // * If NextToken is not empty, - // then there are more results available. To retrieve the next page of results, use - // the value of NextToken in a subsequent ListJournalS3ExportsForLedger call. + // * If NextToken is not empty, then + // there are more results available. To retrieve the next page of results, use the + // value of NextToken in a subsequent ListJournalS3ExportsForLedger call. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/qldb/api_op_ListLedgers.go b/service/qldb/api_op_ListLedgers.go index d01f4c07474..5540b941d52 100644 --- a/service/qldb/api_op_ListLedgers.go +++ b/service/qldb/api_op_ListLedgers.go @@ -49,13 +49,13 @@ type ListLedgersOutput struct { // A pagination token, indicating whether there are more results available: // - // * - // If NextToken is empty, then the last page of results has been processed and - // there are no more results to be retrieved. + // * If + // NextToken is empty, then the last page of results has been processed and there + // are no more results to be retrieved. // - // * If NextToken is not empty, - // then there are more results available. To retrieve the next page of results, use - // the value of NextToken in a subsequent ListLedgers call. + // * If NextToken is not empty, then there + // are more results available. To retrieve the next page of results, use the value + // of NextToken in a subsequent ListLedgers call. NextToken *string // Metadata pertaining to the operation's result. diff --git a/service/qldb/types/enums.go b/service/qldb/types/enums.go index c67c19cee2d..856179f2f86 100644 --- a/service/qldb/types/enums.go +++ b/service/qldb/types/enums.go @@ -6,8 +6,8 @@ type ErrorCause string // Enum values for ErrorCause const ( - ErrorCauseKinesis_stream_not_found ErrorCause = "KINESIS_STREAM_NOT_FOUND" - ErrorCauseIam_permission_revoked ErrorCause = "IAM_PERMISSION_REVOKED" + ErrorCauseKinesisStreamNotFound ErrorCause = "KINESIS_STREAM_NOT_FOUND" + ErrorCauseIamPermissionRevoked ErrorCause = "IAM_PERMISSION_REVOKED" ) // Values returns all known values for ErrorCause. Note that this can be expanded @@ -24,9 +24,9 @@ type ExportStatus string // Enum values for ExportStatus const ( - ExportStatusIn_progress ExportStatus = "IN_PROGRESS" - ExportStatusCompleted ExportStatus = "COMPLETED" - ExportStatusCancelled ExportStatus = "CANCELLED" + ExportStatusInProgress ExportStatus = "IN_PROGRESS" + ExportStatusCompleted ExportStatus = "COMPLETED" + ExportStatusCancelled ExportStatus = "CANCELLED" ) // Values returns all known values for ExportStatus. Note that this can be expanded @@ -66,7 +66,7 @@ type PermissionsMode string // Enum values for PermissionsMode const ( - PermissionsModeAllow_all PermissionsMode = "ALLOW_ALL" + PermissionsModeAllowAll PermissionsMode = "ALLOW_ALL" ) // Values returns all known values for PermissionsMode. Note that this can be @@ -82,9 +82,9 @@ type S3ObjectEncryptionType string // Enum values for S3ObjectEncryptionType const ( - S3ObjectEncryptionTypeSse_kms S3ObjectEncryptionType = "SSE_KMS" - S3ObjectEncryptionTypeSse_s3 S3ObjectEncryptionType = "SSE_S3" - S3ObjectEncryptionTypeNo_encryption S3ObjectEncryptionType = "NO_ENCRYPTION" + S3ObjectEncryptionTypeSseKms S3ObjectEncryptionType = "SSE_KMS" + S3ObjectEncryptionTypeSseS3 S3ObjectEncryptionType = "SSE_S3" + S3ObjectEncryptionTypeNoEncryption S3ObjectEncryptionType = "NO_ENCRYPTION" ) // Values returns all known values for S3ObjectEncryptionType. Note that this can diff --git a/service/qldb/types/types.go b/service/qldb/types/types.go index b8a3bbc4091..28ba7e3c575 100644 --- a/service/qldb/types/types.go +++ b/service/qldb/types/types.go @@ -101,10 +101,10 @@ type JournalS3ExportDescription struct { // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions for // a journal export job to do the following: // - // * Write objects into your Amazon + // * Write objects into your Amazon // Simple Storage Service (Amazon S3) bucket. // - // * (Optional) Use your customer + // * (Optional) Use your customer // master key (CMK) in AWS Key Management Service (AWS KMS) for server-side // encryption of your exported data. // @@ -198,12 +198,12 @@ type S3ExportConfiguration struct { // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) in the // Amazon S3 Developer Guide. The following are examples of valid Prefix values: // + // * + // JournalExports-ForMyLedger/Testing/ // - // * JournalExports-ForMyLedger/Testing/ + // * JournalExports // - // * JournalExports - // - // * My:Tests/ + // * My:Tests/ // // This member is required. Prefix *string diff --git a/service/qldbsession/api_op_SendCommand.go b/service/qldbsession/api_op_SendCommand.go index 56de9bae7e3..403beb43e34 100644 --- a/service/qldbsession/api_op_SendCommand.go +++ b/service/qldbsession/api_op_SendCommand.go @@ -15,18 +15,18 @@ import ( // this API, we recommend that you use the Amazon QLDB Driver or the QLDB Shell to // execute data transactions on a ledger. // -// * If you are working with an AWS -// SDK, use the QLDB Driver. The driver provides a high-level abstraction layer -// above this qldbsession data plane and manages SendCommand API calls for you. For +// * If you are working with an AWS SDK, +// use the QLDB Driver. The driver provides a high-level abstraction layer above +// this qldbsession data plane and manages SendCommand API calls for you. For // information and a list of supported programming languages, see Getting started // with the driver // (https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html) // in the Amazon QLDB Developer Guide. // -// * If you are working with the AWS -// Command Line Interface (AWS CLI), use the QLDB Shell. The shell is a command -// line interface that uses the QLDB Driver to interact with a ledger. For -// information, see Accessing Amazon QLDB using the QLDB Shell +// * If you are working with the AWS Command +// Line Interface (AWS CLI), use the QLDB Shell. The shell is a command line +// interface that uses the QLDB Driver to interact with a ledger. For information, +// see Accessing Amazon QLDB using the QLDB Shell // (https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html). func (c *Client) SendCommand(ctx context.Context, params *SendCommandInput, optFns ...func(*Options)) (*SendCommandOutput, error) { if params == nil { diff --git a/service/qldbsession/doc.go b/service/qldbsession/doc.go index c9ab198feef..8c1a9b6de37 100644 --- a/service/qldbsession/doc.go +++ b/service/qldbsession/doc.go @@ -7,17 +7,17 @@ // this API, we recommend that you use the Amazon QLDB Driver or the QLDB Shell to // execute data transactions on a ledger. // -// * If you are working with an AWS -// SDK, use the QLDB Driver. The driver provides a high-level abstraction layer -// above this qldbsession data plane and manages SendCommand API calls for you. For +// * If you are working with an AWS SDK, +// use the QLDB Driver. The driver provides a high-level abstraction layer above +// this qldbsession data plane and manages SendCommand API calls for you. For // information and a list of supported programming languages, see Getting started // with the driver // (https://docs.aws.amazon.com/qldb/latest/developerguide/getting-started-driver.html) // in the Amazon QLDB Developer Guide. // -// * If you are working with the AWS -// Command Line Interface (AWS CLI), use the QLDB Shell. The shell is a command -// line interface that uses the QLDB Driver to interact with a ledger. For -// information, see Accessing Amazon QLDB using the QLDB Shell +// * If you are working with the AWS Command +// Line Interface (AWS CLI), use the QLDB Shell. The shell is a command line +// interface that uses the QLDB Driver to interact with a ledger. For information, +// see Accessing Amazon QLDB using the QLDB Shell // (https://docs.aws.amazon.com/qldb/latest/developerguide/data-shell.html). package qldbsession diff --git a/service/quicksight/api_op_CreateDashboard.go b/service/quicksight/api_op_CreateDashboard.go index 20d76164f72..843d60bec1c 100644 --- a/service/quicksight/api_op_CreateDashboard.go +++ b/service/quicksight/api_op_CreateDashboard.go @@ -65,20 +65,19 @@ type CreateDashboardInput struct { // Options for publishing the dashboard when you create it: // - // * - // AvailabilityStatus for AdHocFilteringOption - This status can be either ENABLED - // or DISABLED. When this is set to DISABLED, QuickSight disables the left filter - // pane on the published dashboard, which can be used for ad hoc (one-time) - // filtering. This option is ENABLED by default. + // * AvailabilityStatus + // for AdHocFilteringOption - This status can be either ENABLED or DISABLED. When + // this is set to DISABLED, QuickSight disables the left filter pane on the + // published dashboard, which can be used for ad hoc (one-time) filtering. This + // option is ENABLED by default. // - // * AvailabilityStatus for - // ExportToCSVOption - This status can be either ENABLED or DISABLED. The visual - // option to export data to .CSV format isn't enabled when this is set to DISABLED. - // This option is ENABLED by default. + // * AvailabilityStatus for ExportToCSVOption - This + // status can be either ENABLED or DISABLED. The visual option to export data to + // .CSV format isn't enabled when this is set to DISABLED. This option is ENABLED + // by default. // - // * VisibilityState for - // SheetControlsOption - This visibility state can be either COLLAPSED or EXPANDED. - // This option is COLLAPSED by default. + // * VisibilityState for SheetControlsOption - This visibility state + // can be either COLLAPSED or EXPANDED. This option is COLLAPSED by default. DashboardPublishOptions *types.DashboardPublishOptions // The parameters for the creation of the dashboard, which you want to use to diff --git a/service/quicksight/api_op_CreateIAMPolicyAssignment.go b/service/quicksight/api_op_CreateIAMPolicyAssignment.go index 8d40c3e8173..d94f8487997 100644 --- a/service/quicksight/api_op_CreateIAMPolicyAssignment.go +++ b/service/quicksight/api_op_CreateIAMPolicyAssignment.go @@ -38,15 +38,15 @@ type CreateIAMPolicyAssignmentInput struct { // The status of the assignment. Possible values are as follows: // - // * ENABLED - + // * ENABLED - // Anything specified in this assignment is used when creating the data source. // + // * + // DISABLED - This assignment isn't used when creating the data source. // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * - // DRAFT - This assignment is an unfinished draft and isn't used when creating the - // data source. + // * DRAFT - + // This assignment is an unfinished draft and isn't used when creating the data + // source. // // This member is required. AssignmentStatus types.AssignmentStatus @@ -80,15 +80,15 @@ type CreateIAMPolicyAssignmentOutput struct { // The status of the assignment. Possible values are as follows: // - // * ENABLED - + // * ENABLED - // Anything specified in this assignment is used when creating the data source. // + // * + // DISABLED - This assignment isn't used when creating the data source. // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * - // DRAFT - This assignment is an unfinished draft and isn't used when creating the - // data source. + // * DRAFT - + // This assignment is an unfinished draft and isn't used when creating the data + // source. AssignmentStatus types.AssignmentStatus // The QuickSight users, groups, or both that the IAM policy is assigned to. diff --git a/service/quicksight/api_op_DescribeAccountCustomization.go b/service/quicksight/api_op_DescribeAccountCustomization.go index 6d559d4e321..f8443065d1c 100644 --- a/service/quicksight/api_op_DescribeAccountCustomization.go +++ b/service/quicksight/api_op_DescribeAccountCustomization.go @@ -17,48 +17,47 @@ import ( // included. To determine what customizations display when you run this command, it // can help to visualize the relationship of the entities involved. // -// * AWS -// Account - The AWS account exists at the top of the hierarchy. It has the -// potential to use all of the AWS Regions and AWS Services. When you subscribe to -// QuickSight, you choose one AWS Region to use as your home Region. That's where -// your free SPICE capacity is located. You can use QuickSight in any supported AWS +// * AWS Account +// - The AWS account exists at the top of the hierarchy. It has the potential to +// use all of the AWS Regions and AWS Services. When you subscribe to QuickSight, +// you choose one AWS Region to use as your home Region. That's where your free +// SPICE capacity is located. You can use QuickSight in any supported AWS // Region. // -// * AWS Region - In each AWS Region where you sign in to QuickSight -// at least once, QuickSight acts as a separate instance of the same service. If -// you have a user directory, it resides in us-east-1, which is the US East (N. +// * AWS Region - In each AWS Region where you sign in to QuickSight at +// least once, QuickSight acts as a separate instance of the same service. If you +// have a user directory, it resides in us-east-1, which is the US East (N. // Virginia). Generally speaking, these users have access to QuickSight in any AWS // Region, unless they are constrained to a namespace. To run the command in a // different AWS Region, you change your Region settings. If you're using the AWS // CLI, you can use one of the following options: // -// * Use command line -// options +// * Use command line options // (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-options.html). // -// -// * Use named profiles +// * +// Use named profiles // (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). // -// -// * Run aws configure to change your default AWS Region. Use Enter to key the same +// * +// Run aws configure to change your default AWS Region. Use Enter to key the same // settings for your keys. For more information, see Configuring the AWS CLI // (https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). // +// * +// Namespace - A QuickSight namespace is a partition that contains users and assets +// (data sources, datasets, dashboards, and so on). To access assets that are in a +// specific namespace, users and groups must also be part of the same namespace. +// People who share a namespace are completely isolated from users and assets in +// other namespaces, even if they are in the same AWS account and AWS Region. // -// * Namespace - A QuickSight namespace is a partition that contains users and -// assets (data sources, datasets, dashboards, and so on). To access assets that -// are in a specific namespace, users and groups must also be part of the same -// namespace. People who share a namespace are completely isolated from users and -// assets in other namespaces, even if they are in the same AWS account and AWS -// Region. -// -// * Applied customizations - Within an AWS Region, a set of -// QuickSight customizations can apply to an AWS account or to a namespace. -// Settings that you apply to a namespace override settings that you apply to an -// AWS account. All settings are isolated to a single AWS Region. To apply them in -// other AWS Regions, run the CreateAccountCustomization command in each AWS Region -// where you want to apply the same customizations. +// * +// Applied customizations - Within an AWS Region, a set of QuickSight +// customizations can apply to an AWS account or to a namespace. Settings that you +// apply to a namespace override settings that you apply to an AWS account. All +// settings are isolated to a single AWS Region. To apply them in other AWS +// Regions, run the CreateAccountCustomization command in each AWS Region where you +// want to apply the same customizations. func (c *Client) DescribeAccountCustomization(ctx context.Context, params *DescribeAccountCustomizationInput, optFns ...func(*Options)) (*DescribeAccountCustomizationOutput, error) { if params == nil { params = &DescribeAccountCustomizationInput{} diff --git a/service/quicksight/api_op_GetDashboardEmbedUrl.go b/service/quicksight/api_op_GetDashboardEmbedUrl.go index f45f725c5f2..f6bf60368f1 100644 --- a/service/quicksight/api_op_GetDashboardEmbedUrl.go +++ b/service/quicksight/api_op_GetDashboardEmbedUrl.go @@ -18,15 +18,15 @@ import ( // user's browser. The following rules apply to the combination of URL and // authorization code: // -// * They must be used together. +// * They must be used together. // -// * They can be used -// one time only. +// * They can be used one time +// only. // -// * They are valid for 5 minutes after you run this command. +// * They are valid for 5 minutes after you run this command. // -// -// * The resulting user session is valid for 10 hours. +// * The +// resulting user session is valid for 10 hours. // // For more information, see // Embedding Amazon QuickSight @@ -80,17 +80,17 @@ type GetDashboardEmbedUrlInput struct { // identity type. You can use this for any Amazon QuickSight users in your account // (readers, authors, or admins) authenticated as one of the following: // - // * - // Active Directory (AD) users or group members - // - // * Invited nonfederated users + // * Active + // Directory (AD) users or group members // + // * Invited nonfederated users // - // * IAM users and IAM role-based sessions authenticated through Federated Single - // Sign-On using SAML, OpenID Connect, or IAM federation. + // * IAM users + // and IAM role-based sessions authenticated through Federated Single Sign-On using + // SAML, OpenID Connect, or IAM federation. // - // Omit this parameter for - // users in the third group – IAM users and IAM role-based sessions. + // Omit this parameter for users in the + // third group – IAM users and IAM role-based sessions. UserArn *string } diff --git a/service/quicksight/api_op_GetSessionEmbedUrl.go b/service/quicksight/api_op_GetSessionEmbedUrl.go index 0fb8207af47..b7ffb55f276 100644 --- a/service/quicksight/api_op_GetSessionEmbedUrl.go +++ b/service/quicksight/api_op_GetSessionEmbedUrl.go @@ -20,12 +20,12 @@ import ( // operation to add a new user with a custom permission profile attached. For more // information, see the following sections in the Amazon QuickSight User Guide: // -// -// * Embedding the Amazon QuickSight Console +// * +// Embedding the Amazon QuickSight Console // (https://docs.aws.amazon.com/quicksight/latest/user/embedding-the-quicksight-console.html) // -// -// * Customizing Access to the Amazon QuickSight Console +// * +// Customizing Access to the Amazon QuickSight Console // (https://docs.aws.amazon.com/quicksight/latest/user/customizing-permissions-to-the-quicksight-console.html) func (c *Client) GetSessionEmbedUrl(ctx context.Context, params *GetSessionEmbedUrlInput, optFns ...func(*Options)) (*GetSessionEmbedUrlOutput, error) { if params == nil { @@ -52,21 +52,21 @@ type GetSessionEmbedUrlInput struct { // The URL you use to access the embedded session. The entry point URL is // constrained to the following paths: // - // * /start + // * /start // - // * /start/analyses + // * /start/analyses // - // * + // * // /start/dashboards // - // * /start/favorites + // * /start/favorites // - // * /dashboards/DashboardId - - // where DashboardId is the actual ID key from the QuickSight console URL of the + // * /dashboards/DashboardId - where + // DashboardId is the actual ID key from the QuickSight console URL of the // dashboard // - // * /analyses/AnalysisId - where AnalysisId is the actual ID key - // from the QuickSight console URL of the analysis + // * /analyses/AnalysisId - where AnalysisId is the actual ID key from + // the QuickSight console URL of the analysis EntryPoint *string // How many minutes the session is valid. The session lifetime must be 15-600 @@ -78,17 +78,17 @@ type GetSessionEmbedUrlInput struct { // account (readers, authors, or admins). They need to be authenticated as one of // the following: // - // * Active Directory (AD) users or group members + // * Active Directory (AD) users or group members // - // * - // Invited nonfederated users + // * Invited + // nonfederated users // - // * IAM users and IAM role-based sessions - // authenticated through Federated Single Sign-On using SAML, OpenID Connect, or - // IAM federation + // * IAM users and IAM role-based sessions authenticated + // through Federated Single Sign-On using SAML, OpenID Connect, or IAM + // federation // - // Omit this parameter for users in the third group – IAM users and - // IAM role-based sessions. + // Omit this parameter for users in the third group – IAM users and IAM + // role-based sessions. UserArn *string } diff --git a/service/quicksight/api_op_ListThemes.go b/service/quicksight/api_op_ListThemes.go index fc2284dabdd..09a980bcdf6 100644 --- a/service/quicksight/api_op_ListThemes.go +++ b/service/quicksight/api_op_ListThemes.go @@ -40,16 +40,16 @@ type ListThemesInput struct { // The token for the next set of results, or null if there are no more results. NextToken *string - // The type of themes that you want to list. Valid options include the following: - // + // The type of themes that you want to list. Valid options include the + // following: // // * ALL (default)- Display all existing themes. // - // * CUSTOM - Display only the - // themes created by people using Amazon QuickSight. + // * CUSTOM - Display + // only the themes created by people using Amazon QuickSight. // - // * QUICKSIGHT - Display - // only the starting themes defined by QuickSight. + // * QUICKSIGHT - + // Display only the starting themes defined by QuickSight. Type types.ThemeType } diff --git a/service/quicksight/api_op_RegisterUser.go b/service/quicksight/api_op_RegisterUser.go index 3d08f98b4c4..de9486fc332 100644 --- a/service/quicksight/api_op_RegisterUser.go +++ b/service/quicksight/api_op_RegisterUser.go @@ -44,11 +44,11 @@ type RegisterUserInput struct { // Amazon QuickSight supports several ways of managing the identity of users. This // parameter accepts two values: // - // * IAM: A user whose identity maps to an - // existing IAM user or role. + // * IAM: A user whose identity maps to an existing + // IAM user or role. // - // * QUICKSIGHT: A user whose identity is owned and - // managed internally by Amazon QuickSight. + // * QUICKSIGHT: A user whose identity is owned and managed + // internally by Amazon QuickSight. // // This member is required. IdentityType types.IdentityType @@ -61,20 +61,19 @@ type RegisterUserInput struct { // The Amazon QuickSight role for the user. The user role can be one of the // following: // - // * READER: A user who has read-only access to dashboards. + // * READER: A user who has read-only access to dashboards. // - // * - // AUTHOR: A user who can create data sources, datasets, analyses, and - // dashboards. + // * AUTHOR: + // A user who can create data sources, datasets, analyses, and dashboards. // - // * ADMIN: A user who is an author, who can also manage Amazon - // QuickSight settings. + // * + // ADMIN: A user who is an author, who can also manage Amazon QuickSight + // settings. // - // * RESTRICTED_READER: This role isn't currently - // available for use. + // * RESTRICTED_READER: This role isn't currently available for use. // - // * RESTRICTED_AUTHOR: This role isn't currently available - // for use. + // * + // RESTRICTED_AUTHOR: This role isn't currently available for use. // // This member is required. UserRole types.UserRole @@ -83,18 +82,18 @@ type RegisterUserInput struct { // want to assign to this user. Customized permissions allows you to control a // user's access by restricting access the following operations: // - // * Create and + // * Create and // update data sources // - // * Create and update datasets + // * Create and update datasets // - // * Create and update - // email reports + // * Create and update email + // reports // - // * Subscribe to email reports + // * Subscribe to email reports // - // To add custom permissions to an - // existing user, use UpdateUser instead. A set of custom permissions includes any + // To add custom permissions to an existing + // user, use UpdateUser instead. A set of custom permissions includes any // combination of these restrictions. Currently, you need to create the profile // names for custom permission sets by using the QuickSight console. Then, you use // the RegisterUser API operation to assign the named set of permissions to a diff --git a/service/quicksight/api_op_TagResource.go b/service/quicksight/api_op_TagResource.go index b373f07ddbb..dbb3a793229 100644 --- a/service/quicksight/api_op_TagResource.go +++ b/service/quicksight/api_op_TagResource.go @@ -23,13 +23,13 @@ import ( // data set, data source, dashboard, and template. Tagging for QuickSight works in // a similar way to tagging for other AWS services, except for the following: // -// -// * You can't use tags to track AWS costs for QuickSight. This restriction is +// * +// You can't use tags to track AWS costs for QuickSight. This restriction is // because QuickSight costs are based on users and SPICE capacity, which aren't // taggable resources. // -// * QuickSight doesn't currently support the Tag Editor -// for AWS Resource Groups. +// * QuickSight doesn't currently support the Tag Editor for +// AWS Resource Groups. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} diff --git a/service/quicksight/api_op_UpdateDashboard.go b/service/quicksight/api_op_UpdateDashboard.go index 9ed1ac96663..7f6d1b72d2d 100644 --- a/service/quicksight/api_op_UpdateDashboard.go +++ b/service/quicksight/api_op_UpdateDashboard.go @@ -60,20 +60,19 @@ type UpdateDashboardInput struct { // Options for publishing the dashboard when you create it: // - // * - // AvailabilityStatus for AdHocFilteringOption - This status can be either ENABLED - // or DISABLED. When this is set to DISABLED, QuickSight disables the left filter - // pane on the published dashboard, which can be used for ad hoc (one-time) - // filtering. This option is ENABLED by default. + // * AvailabilityStatus + // for AdHocFilteringOption - This status can be either ENABLED or DISABLED. When + // this is set to DISABLED, QuickSight disables the left filter pane on the + // published dashboard, which can be used for ad hoc (one-time) filtering. This + // option is ENABLED by default. // - // * AvailabilityStatus for - // ExportToCSVOption - This status can be either ENABLED or DISABLED. The visual - // option to export data to .CSV format isn't enabled when this is set to DISABLED. - // This option is ENABLED by default. + // * AvailabilityStatus for ExportToCSVOption - This + // status can be either ENABLED or DISABLED. The visual option to export data to + // .CSV format isn't enabled when this is set to DISABLED. This option is ENABLED + // by default. // - // * VisibilityState for - // SheetControlsOption - This visibility state can be either COLLAPSED or EXPANDED. - // This option is COLLAPSED by default. + // * VisibilityState for SheetControlsOption - This visibility state + // can be either COLLAPSED or EXPANDED. This option is COLLAPSED by default. DashboardPublishOptions *types.DashboardPublishOptions // A structure that contains the parameters of the dashboard. These are parameter diff --git a/service/quicksight/api_op_UpdateIAMPolicyAssignment.go b/service/quicksight/api_op_UpdateIAMPolicyAssignment.go index 2d43753b367..45ea28948da 100644 --- a/service/quicksight/api_op_UpdateIAMPolicyAssignment.go +++ b/service/quicksight/api_op_UpdateIAMPolicyAssignment.go @@ -47,15 +47,15 @@ type UpdateIAMPolicyAssignmentInput struct { // The status of the assignment. Possible values are as follows: // - // * ENABLED - + // * ENABLED - // Anything specified in this assignment is used when creating the data source. // + // * + // DISABLED - This assignment isn't used when creating the data source. // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * - // DRAFT - This assignment is an unfinished draft and isn't used when creating the - // data source. + // * DRAFT - + // This assignment is an unfinished draft and isn't used when creating the data + // source. AssignmentStatus types.AssignmentStatus // The QuickSight users, groups, or both that you want to assign the policy to. @@ -76,15 +76,15 @@ type UpdateIAMPolicyAssignmentOutput struct { // The status of the assignment. Possible values are as follows: // - // * ENABLED - + // * ENABLED - // Anything specified in this assignment is used when creating the data source. // + // * + // DISABLED - This assignment isn't used when creating the data source. // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * - // DRAFT - This assignment is an unfinished draft and isn't used when creating the - // data source. + // * DRAFT - + // This assignment is an unfinished draft and isn't used when creating the data + // source. AssignmentStatus types.AssignmentStatus // The QuickSight users, groups, or both that the IAM policy is assigned to. diff --git a/service/quicksight/api_op_UpdateThemePermissions.go b/service/quicksight/api_op_UpdateThemePermissions.go index d2df59973f5..51757af5cd7 100644 --- a/service/quicksight/api_op_UpdateThemePermissions.go +++ b/service/quicksight/api_op_UpdateThemePermissions.go @@ -16,51 +16,50 @@ import ( // permissions apply in groupings. Valid groupings include the following for the // three levels of permissions, which are user, owner, or no permissions: // -// * +// * // User // -// * "quicksight:DescribeTheme" +// * "quicksight:DescribeTheme" // -// * -// "quicksight:DescribeThemeAlias" -// -// * "quicksight:ListThemeAliases" +// * "quicksight:DescribeThemeAlias" // +// * +// "quicksight:ListThemeAliases" // // * "quicksight:ListThemeVersions" // -// * Owner +// * Owner // -// * +// * // "quicksight:DescribeTheme" // -// * "quicksight:DescribeThemeAlias" +// * "quicksight:DescribeThemeAlias" // -// * +// * // "quicksight:ListThemeAliases" // -// * "quicksight:ListThemeVersions" -// +// * "quicksight:ListThemeVersions" // -// * "quicksight:DeleteTheme" +// * +// "quicksight:DeleteTheme" // -// * "quicksight:UpdateTheme" +// * "quicksight:UpdateTheme" // -// * +// * // "quicksight:CreateThemeAlias" // -// * "quicksight:DeleteThemeAlias" -// -// -// * "quicksight:UpdateThemeAlias" +// * "quicksight:DeleteThemeAlias" // -// * "quicksight:UpdateThemePermissions" +// * +// "quicksight:UpdateThemeAlias" // +// * "quicksight:UpdateThemePermissions" // -// * "quicksight:DescribeThemePermissions" +// * +// "quicksight:DescribeThemePermissions" // -// * To specify no permissions, omit -// the permissions list. +// * To specify no permissions, omit the +// permissions list. func (c *Client) UpdateThemePermissions(ctx context.Context, params *UpdateThemePermissionsInput, optFns ...func(*Options)) (*UpdateThemePermissionsOutput, error) { if params == nil { params = &UpdateThemePermissionsInput{} diff --git a/service/quicksight/api_op_UpdateUser.go b/service/quicksight/api_op_UpdateUser.go index 3085d6b69b0..e6fc4d6e16d 100644 --- a/service/quicksight/api_op_UpdateUser.go +++ b/service/quicksight/api_op_UpdateUser.go @@ -48,17 +48,17 @@ type UpdateUserInput struct { // The Amazon QuickSight role of the user. The role can be one of the following // default security cohorts: // - // * READER: A user who has read-only access to + // * READER: A user who has read-only access to // dashboards. // - // * AUTHOR: A user who can create data sources, datasets, - // analyses, and dashboards. + // * AUTHOR: A user who can create data sources, datasets, analyses, + // and dashboards. // - // * ADMIN: A user who is an author, who can also - // manage Amazon QuickSight settings. + // * ADMIN: A user who is an author, who can also manage Amazon + // QuickSight settings. // - // The name of the QuickSight role is invisible - // to the user except for the console screens dealing with permissions. + // The name of the QuickSight role is invisible to the user + // except for the console screens dealing with permissions. // // This member is required. Role types.UserRole @@ -72,26 +72,25 @@ type UpdateUserInput struct { // want to assign to this user. Customized permissions allows you to control a // user's access by restricting access the following operations: // - // * Create and + // * Create and // update data sources // - // * Create and update datasets + // * Create and update datasets // - // * Create and update - // email reports + // * Create and update email + // reports // - // * Subscribe to email reports + // * Subscribe to email reports // - // A set of custom permissions - // includes any combination of these restrictions. Currently, you need to create - // the profile names for custom permission sets by using the QuickSight console. - // Then, you use the RegisterUser API operation to assign the named set of - // permissions to a QuickSight user. QuickSight custom permissions are applied - // through IAM policies. Therefore, they override the permissions typically granted - // by assigning QuickSight users to one of the default security cohorts in - // QuickSight (admin, author, reader). This feature is available only to QuickSight - // Enterprise edition subscriptions that use SAML 2.0-Based Federation for Single - // Sign-On (SSO). + // A set of custom permissions includes any + // combination of these restrictions. Currently, you need to create the profile + // names for custom permission sets by using the QuickSight console. Then, you use + // the RegisterUser API operation to assign the named set of permissions to a + // QuickSight user. QuickSight custom permissions are applied through IAM policies. + // Therefore, they override the permissions typically granted by assigning + // QuickSight users to one of the default security cohorts in QuickSight (admin, + // author, reader). This feature is available only to QuickSight Enterprise edition + // subscriptions that use SAML 2.0-Based Federation for Single Sign-On (SSO). CustomPermissionsName *string // A flag that you use to indicate that you want to remove all custom permissions diff --git a/service/quicksight/types/enums.go b/service/quicksight/types/enums.go index e485f6ebb4d..5540543e400 100644 --- a/service/quicksight/types/enums.go +++ b/service/quicksight/types/enums.go @@ -6,16 +6,16 @@ type AnalysisErrorType string // Enum values for AnalysisErrorType const ( - AnalysisErrorTypeAccess_denied AnalysisErrorType = "ACCESS_DENIED" - AnalysisErrorTypeSource_not_found AnalysisErrorType = "SOURCE_NOT_FOUND" - AnalysisErrorTypeData_set_not_found AnalysisErrorType = "DATA_SET_NOT_FOUND" - AnalysisErrorTypeInternal_failure AnalysisErrorType = "INTERNAL_FAILURE" - AnalysisErrorTypeParameter_value_incompatible AnalysisErrorType = "PARAMETER_VALUE_INCOMPATIBLE" - AnalysisErrorTypeParameter_type_invalid AnalysisErrorType = "PARAMETER_TYPE_INVALID" - AnalysisErrorTypeParameter_not_found AnalysisErrorType = "PARAMETER_NOT_FOUND" - AnalysisErrorTypeColumn_type_mismatch AnalysisErrorType = "COLUMN_TYPE_MISMATCH" - AnalysisErrorTypeColumn_geographic_role_mismatch AnalysisErrorType = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" - AnalysisErrorTypeColumn_replacement_missing AnalysisErrorType = "COLUMN_REPLACEMENT_MISSING" + AnalysisErrorTypeAccessDenied AnalysisErrorType = "ACCESS_DENIED" + AnalysisErrorTypeSourceNotFound AnalysisErrorType = "SOURCE_NOT_FOUND" + AnalysisErrorTypeDataSetNotFound AnalysisErrorType = "DATA_SET_NOT_FOUND" + AnalysisErrorTypeInternalFailure AnalysisErrorType = "INTERNAL_FAILURE" + AnalysisErrorTypeParameterValueIncompatible AnalysisErrorType = "PARAMETER_VALUE_INCOMPATIBLE" + AnalysisErrorTypeParameterTypeInvalid AnalysisErrorType = "PARAMETER_TYPE_INVALID" + AnalysisErrorTypeParameterNotFound AnalysisErrorType = "PARAMETER_NOT_FOUND" + AnalysisErrorTypeColumnTypeMismatch AnalysisErrorType = "COLUMN_TYPE_MISMATCH" + AnalysisErrorTypeColumnGeographicRoleMismatch AnalysisErrorType = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" + AnalysisErrorTypeColumnReplacementMissing AnalysisErrorType = "COLUMN_REPLACEMENT_MISSING" ) // Values returns all known values for AnalysisErrorType. Note that this can be @@ -40,7 +40,7 @@ type AnalysisFilterAttribute string // Enum values for AnalysisFilterAttribute const ( - AnalysisFilterAttributeQuicksight_user AnalysisFilterAttribute = "QUICKSIGHT_USER" + AnalysisFilterAttributeQuicksightUser AnalysisFilterAttribute = "QUICKSIGHT_USER" ) // Values returns all known values for AnalysisFilterAttribute. Note that this can @@ -116,16 +116,16 @@ type DashboardErrorType string // Enum values for DashboardErrorType const ( - DashboardErrorTypeAccess_denied DashboardErrorType = "ACCESS_DENIED" - DashboardErrorTypeSource_not_found DashboardErrorType = "SOURCE_NOT_FOUND" - DashboardErrorTypeData_set_not_found DashboardErrorType = "DATA_SET_NOT_FOUND" - DashboardErrorTypeInternal_failure DashboardErrorType = "INTERNAL_FAILURE" - DashboardErrorTypeParameter_value_incompatible DashboardErrorType = "PARAMETER_VALUE_INCOMPATIBLE" - DashboardErrorTypeParameter_type_invalid DashboardErrorType = "PARAMETER_TYPE_INVALID" - DashboardErrorTypeParameter_not_found DashboardErrorType = "PARAMETER_NOT_FOUND" - DashboardErrorTypeColumn_type_mismatch DashboardErrorType = "COLUMN_TYPE_MISMATCH" - DashboardErrorTypeColumn_geographic_role_mismatch DashboardErrorType = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" - DashboardErrorTypeColumn_replacement_missing DashboardErrorType = "COLUMN_REPLACEMENT_MISSING" + DashboardErrorTypeAccessDenied DashboardErrorType = "ACCESS_DENIED" + DashboardErrorTypeSourceNotFound DashboardErrorType = "SOURCE_NOT_FOUND" + DashboardErrorTypeDataSetNotFound DashboardErrorType = "DATA_SET_NOT_FOUND" + DashboardErrorTypeInternalFailure DashboardErrorType = "INTERNAL_FAILURE" + DashboardErrorTypeParameterValueIncompatible DashboardErrorType = "PARAMETER_VALUE_INCOMPATIBLE" + DashboardErrorTypeParameterTypeInvalid DashboardErrorType = "PARAMETER_TYPE_INVALID" + DashboardErrorTypeParameterNotFound DashboardErrorType = "PARAMETER_NOT_FOUND" + DashboardErrorTypeColumnTypeMismatch DashboardErrorType = "COLUMN_TYPE_MISMATCH" + DashboardErrorTypeColumnGeographicRoleMismatch DashboardErrorType = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" + DashboardErrorTypeColumnReplacementMissing DashboardErrorType = "COLUMN_REPLACEMENT_MISSING" ) // Values returns all known values for DashboardErrorType. Note that this can be @@ -150,7 +150,7 @@ type DashboardFilterAttribute string // Enum values for DashboardFilterAttribute const ( - DashboardFilterAttributeQuicksight_user DashboardFilterAttribute = "QUICKSIGHT_USER" + DashboardFilterAttributeQuicksightUser DashboardFilterAttribute = "QUICKSIGHT_USER" ) // Values returns all known values for DashboardFilterAttribute. Note that this can @@ -184,8 +184,8 @@ type DataSetImportMode string // Enum values for DataSetImportMode const ( - DataSetImportModeSpice DataSetImportMode = "SPICE" - DataSetImportModeDirect_query DataSetImportMode = "DIRECT_QUERY" + DataSetImportModeSpice DataSetImportMode = "SPICE" + DataSetImportModeDirectQuery DataSetImportMode = "DIRECT_QUERY" ) // Values returns all known values for DataSetImportMode. Note that this can be @@ -202,14 +202,14 @@ type DataSourceErrorInfoType string // Enum values for DataSourceErrorInfoType const ( - DataSourceErrorInfoTypeAccess_denied DataSourceErrorInfoType = "ACCESS_DENIED" - DataSourceErrorInfoTypeCopy_source_not_found DataSourceErrorInfoType = "COPY_SOURCE_NOT_FOUND" - DataSourceErrorInfoTypeTimeout DataSourceErrorInfoType = "TIMEOUT" - DataSourceErrorInfoTypeEngine_version_not_supported DataSourceErrorInfoType = "ENGINE_VERSION_NOT_SUPPORTED" - DataSourceErrorInfoTypeUnknown_host DataSourceErrorInfoType = "UNKNOWN_HOST" - DataSourceErrorInfoTypeGeneric_sql_failure DataSourceErrorInfoType = "GENERIC_SQL_FAILURE" - DataSourceErrorInfoTypeConflict DataSourceErrorInfoType = "CONFLICT" - DataSourceErrorInfoTypeUnknown DataSourceErrorInfoType = "UNKNOWN" + DataSourceErrorInfoTypeAccessDenied DataSourceErrorInfoType = "ACCESS_DENIED" + DataSourceErrorInfoTypeCopySourceNotFound DataSourceErrorInfoType = "COPY_SOURCE_NOT_FOUND" + DataSourceErrorInfoTypeTimeout DataSourceErrorInfoType = "TIMEOUT" + DataSourceErrorInfoTypeEngineVersionNotSupported DataSourceErrorInfoType = "ENGINE_VERSION_NOT_SUPPORTED" + DataSourceErrorInfoTypeUnknownHost DataSourceErrorInfoType = "UNKNOWN_HOST" + DataSourceErrorInfoTypeGenericSqlFailure DataSourceErrorInfoType = "GENERIC_SQL_FAILURE" + DataSourceErrorInfoTypeConflict DataSourceErrorInfoType = "CONFLICT" + DataSourceErrorInfoTypeUnknown DataSourceErrorInfoType = "UNKNOWN" ) // Values returns all known values for DataSourceErrorInfoType. Note that this can @@ -232,28 +232,28 @@ type DataSourceType string // Enum values for DataSourceType const ( - DataSourceTypeAdobe_analytics DataSourceType = "ADOBE_ANALYTICS" - DataSourceTypeAmazon_elasticsearch DataSourceType = "AMAZON_ELASTICSEARCH" - DataSourceTypeAthena DataSourceType = "ATHENA" - DataSourceTypeAurora DataSourceType = "AURORA" - DataSourceTypeAurora_postgresql DataSourceType = "AURORA_POSTGRESQL" - DataSourceTypeAws_iot_analytics DataSourceType = "AWS_IOT_ANALYTICS" - DataSourceTypeGithub DataSourceType = "GITHUB" - DataSourceTypeJira DataSourceType = "JIRA" - DataSourceTypeMariadb DataSourceType = "MARIADB" - DataSourceTypeMysql DataSourceType = "MYSQL" - DataSourceTypePostgresql DataSourceType = "POSTGRESQL" - DataSourceTypePresto DataSourceType = "PRESTO" - DataSourceTypeRedshift DataSourceType = "REDSHIFT" - DataSourceTypeS3 DataSourceType = "S3" - DataSourceTypeSalesforce DataSourceType = "SALESFORCE" - DataSourceTypeServicenow DataSourceType = "SERVICENOW" - DataSourceTypeSnowflake DataSourceType = "SNOWFLAKE" - DataSourceTypeSpark DataSourceType = "SPARK" - DataSourceTypeSqlserver DataSourceType = "SQLSERVER" - DataSourceTypeTeradata DataSourceType = "TERADATA" - DataSourceTypeTwitter DataSourceType = "TWITTER" - DataSourceTypeTimestream DataSourceType = "TIMESTREAM" + DataSourceTypeAdobeAnalytics DataSourceType = "ADOBE_ANALYTICS" + DataSourceTypeAmazonElasticsearch DataSourceType = "AMAZON_ELASTICSEARCH" + DataSourceTypeAthena DataSourceType = "ATHENA" + DataSourceTypeAurora DataSourceType = "AURORA" + DataSourceTypeAuroraPostgresql DataSourceType = "AURORA_POSTGRESQL" + DataSourceTypeAwsIotAnalytics DataSourceType = "AWS_IOT_ANALYTICS" + DataSourceTypeGithub DataSourceType = "GITHUB" + DataSourceTypeJira DataSourceType = "JIRA" + DataSourceTypeMariadb DataSourceType = "MARIADB" + DataSourceTypeMysql DataSourceType = "MYSQL" + DataSourceTypePostgresql DataSourceType = "POSTGRESQL" + DataSourceTypePresto DataSourceType = "PRESTO" + DataSourceTypeRedshift DataSourceType = "REDSHIFT" + DataSourceTypeS3 DataSourceType = "S3" + DataSourceTypeSalesforce DataSourceType = "SALESFORCE" + DataSourceTypeServicenow DataSourceType = "SERVICENOW" + DataSourceTypeSnowflake DataSourceType = "SNOWFLAKE" + DataSourceTypeSpark DataSourceType = "SPARK" + DataSourceTypeSqlserver DataSourceType = "SQLSERVER" + DataSourceTypeTeradata DataSourceType = "TERADATA" + DataSourceTypeTwitter DataSourceType = "TWITTER" + DataSourceTypeTimestream DataSourceType = "TIMESTREAM" ) // Values returns all known values for DataSourceType. Note that this can be @@ -308,15 +308,15 @@ type ExceptionResourceType string // Enum values for ExceptionResourceType const ( - ExceptionResourceTypeUser ExceptionResourceType = "USER" - ExceptionResourceTypeGroup ExceptionResourceType = "GROUP" - ExceptionResourceTypeNamespace ExceptionResourceType = "NAMESPACE" - ExceptionResourceTypeAccount_settings ExceptionResourceType = "ACCOUNT_SETTINGS" - ExceptionResourceTypeIampolicy_assignment ExceptionResourceType = "IAMPOLICY_ASSIGNMENT" - ExceptionResourceTypeData_source ExceptionResourceType = "DATA_SOURCE" - ExceptionResourceTypeData_set ExceptionResourceType = "DATA_SET" - ExceptionResourceTypeVpc_connection ExceptionResourceType = "VPC_CONNECTION" - ExceptionResourceTypeIngestion ExceptionResourceType = "INGESTION" + ExceptionResourceTypeUser ExceptionResourceType = "USER" + ExceptionResourceTypeGroup ExceptionResourceType = "GROUP" + ExceptionResourceTypeNamespace ExceptionResourceType = "NAMESPACE" + ExceptionResourceTypeAccountSettings ExceptionResourceType = "ACCOUNT_SETTINGS" + ExceptionResourceTypeIampolicyAssignment ExceptionResourceType = "IAMPOLICY_ASSIGNMENT" + ExceptionResourceTypeDataSource ExceptionResourceType = "DATA_SOURCE" + ExceptionResourceTypeDataSet ExceptionResourceType = "DATA_SET" + ExceptionResourceTypeVpcConnection ExceptionResourceType = "VPC_CONNECTION" + ExceptionResourceTypeIngestion ExceptionResourceType = "INGESTION" ) // Values returns all known values for ExceptionResourceType. Note that this can be @@ -460,46 +460,46 @@ type IngestionErrorType string // Enum values for IngestionErrorType const ( - IngestionErrorTypeFailure_to_assume_role IngestionErrorType = "FAILURE_TO_ASSUME_ROLE" - IngestionErrorTypeIngestion_superseded IngestionErrorType = "INGESTION_SUPERSEDED" - IngestionErrorTypeIngestion_canceled IngestionErrorType = "INGESTION_CANCELED" - IngestionErrorTypeData_set_deleted IngestionErrorType = "DATA_SET_DELETED" - IngestionErrorTypeData_set_not_spice IngestionErrorType = "DATA_SET_NOT_SPICE" - IngestionErrorTypeS3_uploaded_file_deleted IngestionErrorType = "S3_UPLOADED_FILE_DELETED" - IngestionErrorTypeS3_manifest_error IngestionErrorType = "S3_MANIFEST_ERROR" - IngestionErrorTypeData_tolerance_exception IngestionErrorType = "DATA_TOLERANCE_EXCEPTION" - IngestionErrorTypeSpice_table_not_found IngestionErrorType = "SPICE_TABLE_NOT_FOUND" - IngestionErrorTypeData_set_size_limit_exceeded IngestionErrorType = "DATA_SET_SIZE_LIMIT_EXCEEDED" - IngestionErrorTypeRow_size_limit_exceeded IngestionErrorType = "ROW_SIZE_LIMIT_EXCEEDED" - IngestionErrorTypeAccount_capacity_limit_exceeded IngestionErrorType = "ACCOUNT_CAPACITY_LIMIT_EXCEEDED" - IngestionErrorTypeCustomer_error IngestionErrorType = "CUSTOMER_ERROR" - IngestionErrorTypeData_source_not_found IngestionErrorType = "DATA_SOURCE_NOT_FOUND" - IngestionErrorTypeIam_role_not_available IngestionErrorType = "IAM_ROLE_NOT_AVAILABLE" - IngestionErrorTypeConnection_failure IngestionErrorType = "CONNECTION_FAILURE" - IngestionErrorTypeSql_table_not_found IngestionErrorType = "SQL_TABLE_NOT_FOUND" - IngestionErrorTypePermission_denied IngestionErrorType = "PERMISSION_DENIED" - IngestionErrorTypeSsl_certificate_validation_failure IngestionErrorType = "SSL_CERTIFICATE_VALIDATION_FAILURE" - IngestionErrorTypeOauth_token_failure IngestionErrorType = "OAUTH_TOKEN_FAILURE" - IngestionErrorTypeSource_api_limit_exceeded_failure IngestionErrorType = "SOURCE_API_LIMIT_EXCEEDED_FAILURE" - IngestionErrorTypePassword_authentication_failure IngestionErrorType = "PASSWORD_AUTHENTICATION_FAILURE" - IngestionErrorTypeSql_schema_mismatch_error IngestionErrorType = "SQL_SCHEMA_MISMATCH_ERROR" - IngestionErrorTypeInvalid_date_format IngestionErrorType = "INVALID_DATE_FORMAT" - IngestionErrorTypeInvalid_dataprep_syntax IngestionErrorType = "INVALID_DATAPREP_SYNTAX" - IngestionErrorTypeSource_resource_limit_exceeded IngestionErrorType = "SOURCE_RESOURCE_LIMIT_EXCEEDED" - IngestionErrorTypeSql_invalid_parameter_value IngestionErrorType = "SQL_INVALID_PARAMETER_VALUE" - IngestionErrorTypeQuery_timeout IngestionErrorType = "QUERY_TIMEOUT" - IngestionErrorTypeSql_numeric_overflow IngestionErrorType = "SQL_NUMERIC_OVERFLOW" - IngestionErrorTypeUnresolvable_host IngestionErrorType = "UNRESOLVABLE_HOST" - IngestionErrorTypeUnroutable_host IngestionErrorType = "UNROUTABLE_HOST" - IngestionErrorTypeSql_exception IngestionErrorType = "SQL_EXCEPTION" - IngestionErrorTypeS3_file_inaccessible IngestionErrorType = "S3_FILE_INACCESSIBLE" - IngestionErrorTypeIot_file_not_found IngestionErrorType = "IOT_FILE_NOT_FOUND" - IngestionErrorTypeIot_data_set_file_empty IngestionErrorType = "IOT_DATA_SET_FILE_EMPTY" - IngestionErrorTypeInvalid_data_source_config IngestionErrorType = "INVALID_DATA_SOURCE_CONFIG" - IngestionErrorTypeData_source_auth_failed IngestionErrorType = "DATA_SOURCE_AUTH_FAILED" - IngestionErrorTypeData_source_connection_failed IngestionErrorType = "DATA_SOURCE_CONNECTION_FAILED" - IngestionErrorTypeFailure_to_process_json_file IngestionErrorType = "FAILURE_TO_PROCESS_JSON_FILE" - IngestionErrorTypeInternal_service_error IngestionErrorType = "INTERNAL_SERVICE_ERROR" + IngestionErrorTypeFailureToAssumeRole IngestionErrorType = "FAILURE_TO_ASSUME_ROLE" + IngestionErrorTypeIngestionSuperseded IngestionErrorType = "INGESTION_SUPERSEDED" + IngestionErrorTypeIngestionCanceled IngestionErrorType = "INGESTION_CANCELED" + IngestionErrorTypeDataSetDeleted IngestionErrorType = "DATA_SET_DELETED" + IngestionErrorTypeDataSetNotSpice IngestionErrorType = "DATA_SET_NOT_SPICE" + IngestionErrorTypeS3UploadedFileDeleted IngestionErrorType = "S3_UPLOADED_FILE_DELETED" + IngestionErrorTypeS3ManifestError IngestionErrorType = "S3_MANIFEST_ERROR" + IngestionErrorTypeDataToleranceException IngestionErrorType = "DATA_TOLERANCE_EXCEPTION" + IngestionErrorTypeSpiceTableNotFound IngestionErrorType = "SPICE_TABLE_NOT_FOUND" + IngestionErrorTypeDataSetSizeLimitExceeded IngestionErrorType = "DATA_SET_SIZE_LIMIT_EXCEEDED" + IngestionErrorTypeRowSizeLimitExceeded IngestionErrorType = "ROW_SIZE_LIMIT_EXCEEDED" + IngestionErrorTypeAccountCapacityLimitExceeded IngestionErrorType = "ACCOUNT_CAPACITY_LIMIT_EXCEEDED" + IngestionErrorTypeCustomerError IngestionErrorType = "CUSTOMER_ERROR" + IngestionErrorTypeDataSourceNotFound IngestionErrorType = "DATA_SOURCE_NOT_FOUND" + IngestionErrorTypeIamRoleNotAvailable IngestionErrorType = "IAM_ROLE_NOT_AVAILABLE" + IngestionErrorTypeConnectionFailure IngestionErrorType = "CONNECTION_FAILURE" + IngestionErrorTypeSqlTableNotFound IngestionErrorType = "SQL_TABLE_NOT_FOUND" + IngestionErrorTypePermissionDenied IngestionErrorType = "PERMISSION_DENIED" + IngestionErrorTypeSslCertificateValidationFailure IngestionErrorType = "SSL_CERTIFICATE_VALIDATION_FAILURE" + IngestionErrorTypeOauthTokenFailure IngestionErrorType = "OAUTH_TOKEN_FAILURE" + IngestionErrorTypeSourceApiLimitExceededFailure IngestionErrorType = "SOURCE_API_LIMIT_EXCEEDED_FAILURE" + IngestionErrorTypePasswordAuthenticationFailure IngestionErrorType = "PASSWORD_AUTHENTICATION_FAILURE" + IngestionErrorTypeSqlSchemaMismatchError IngestionErrorType = "SQL_SCHEMA_MISMATCH_ERROR" + IngestionErrorTypeInvalidDateFormat IngestionErrorType = "INVALID_DATE_FORMAT" + IngestionErrorTypeInvalidDataprepSyntax IngestionErrorType = "INVALID_DATAPREP_SYNTAX" + IngestionErrorTypeSourceResourceLimitExceeded IngestionErrorType = "SOURCE_RESOURCE_LIMIT_EXCEEDED" + IngestionErrorTypeSqlInvalidParameterValue IngestionErrorType = "SQL_INVALID_PARAMETER_VALUE" + IngestionErrorTypeQueryTimeout IngestionErrorType = "QUERY_TIMEOUT" + IngestionErrorTypeSqlNumericOverflow IngestionErrorType = "SQL_NUMERIC_OVERFLOW" + IngestionErrorTypeUnresolvableHost IngestionErrorType = "UNRESOLVABLE_HOST" + IngestionErrorTypeUnroutableHost IngestionErrorType = "UNROUTABLE_HOST" + IngestionErrorTypeSqlException IngestionErrorType = "SQL_EXCEPTION" + IngestionErrorTypeS3FileInaccessible IngestionErrorType = "S3_FILE_INACCESSIBLE" + IngestionErrorTypeIotFileNotFound IngestionErrorType = "IOT_FILE_NOT_FOUND" + IngestionErrorTypeIotDataSetFileEmpty IngestionErrorType = "IOT_DATA_SET_FILE_EMPTY" + IngestionErrorTypeInvalidDataSourceConfig IngestionErrorType = "INVALID_DATA_SOURCE_CONFIG" + IngestionErrorTypeDataSourceAuthFailed IngestionErrorType = "DATA_SOURCE_AUTH_FAILED" + IngestionErrorTypeDataSourceConnectionFailed IngestionErrorType = "DATA_SOURCE_CONNECTION_FAILED" + IngestionErrorTypeFailureToProcessJsonFile IngestionErrorType = "FAILURE_TO_PROCESS_JSON_FILE" + IngestionErrorTypeInternalServiceError IngestionErrorType = "INTERNAL_SERVICE_ERROR" ) // Values returns all known values for IngestionErrorType. Note that this can be @@ -572,10 +572,10 @@ type IngestionRequestType string // Enum values for IngestionRequestType const ( - IngestionRequestTypeInitial_ingestion IngestionRequestType = "INITIAL_INGESTION" - IngestionRequestTypeEdit IngestionRequestType = "EDIT" - IngestionRequestTypeIncremental_refresh IngestionRequestType = "INCREMENTAL_REFRESH" - IngestionRequestTypeFull_refresh IngestionRequestType = "FULL_REFRESH" + IngestionRequestTypeInitialIngestion IngestionRequestType = "INITIAL_INGESTION" + IngestionRequestTypeEdit IngestionRequestType = "EDIT" + IngestionRequestTypeIncrementalRefresh IngestionRequestType = "INCREMENTAL_REFRESH" + IngestionRequestTypeFullRefresh IngestionRequestType = "FULL_REFRESH" ) // Values returns all known values for IngestionRequestType. Note that this can be @@ -670,8 +670,8 @@ type NamespaceErrorType string // Enum values for NamespaceErrorType const ( - NamespaceErrorTypePermission_denied NamespaceErrorType = "PERMISSION_DENIED" - NamespaceErrorTypeInternal_service_error NamespaceErrorType = "INTERNAL_SERVICE_ERROR" + NamespaceErrorTypePermissionDenied NamespaceErrorType = "PERMISSION_DENIED" + NamespaceErrorTypeInternalServiceError NamespaceErrorType = "INTERNAL_SERVICE_ERROR" ) // Values returns all known values for NamespaceErrorType. Note that this can be @@ -688,11 +688,11 @@ type NamespaceStatus string // Enum values for NamespaceStatus const ( - NamespaceStatusCreated NamespaceStatus = "CREATED" - NamespaceStatusCreating NamespaceStatus = "CREATING" - NamespaceStatusDeleting NamespaceStatus = "DELETING" - NamespaceStatusRetryable_failure NamespaceStatus = "RETRYABLE_FAILURE" - NamespaceStatusNon_retryable_failure NamespaceStatus = "NON_RETRYABLE_FAILURE" + NamespaceStatusCreated NamespaceStatus = "CREATED" + NamespaceStatusCreating NamespaceStatus = "CREATING" + NamespaceStatusDeleting NamespaceStatus = "DELETING" + NamespaceStatusRetryableFailure NamespaceStatus = "RETRYABLE_FAILURE" + NamespaceStatusNonRetryableFailure NamespaceStatus = "NON_RETRYABLE_FAILURE" ) // Values returns all known values for NamespaceStatus. Note that this can be @@ -712,13 +712,13 @@ type ResourceStatus string // Enum values for ResourceStatus const ( - ResourceStatusCreation_in_progress ResourceStatus = "CREATION_IN_PROGRESS" - ResourceStatusCreation_successful ResourceStatus = "CREATION_SUCCESSFUL" - ResourceStatusCreation_failed ResourceStatus = "CREATION_FAILED" - ResourceStatusUpdate_in_progress ResourceStatus = "UPDATE_IN_PROGRESS" - ResourceStatusUpdate_successful ResourceStatus = "UPDATE_SUCCESSFUL" - ResourceStatusUpdate_failed ResourceStatus = "UPDATE_FAILED" - ResourceStatusDeleted ResourceStatus = "DELETED" + ResourceStatusCreationInProgress ResourceStatus = "CREATION_IN_PROGRESS" + ResourceStatusCreationSuccessful ResourceStatus = "CREATION_SUCCESSFUL" + ResourceStatusCreationFailed ResourceStatus = "CREATION_FAILED" + ResourceStatusUpdateInProgress ResourceStatus = "UPDATE_IN_PROGRESS" + ResourceStatusUpdateSuccessful ResourceStatus = "UPDATE_SUCCESSFUL" + ResourceStatusUpdateFailed ResourceStatus = "UPDATE_FAILED" + ResourceStatusDeleted ResourceStatus = "DELETED" ) // Values returns all known values for ResourceStatus. Note that this can be @@ -740,8 +740,8 @@ type RowLevelPermissionPolicy string // Enum values for RowLevelPermissionPolicy const ( - RowLevelPermissionPolicyGrant_access RowLevelPermissionPolicy = "GRANT_ACCESS" - RowLevelPermissionPolicyDeny_access RowLevelPermissionPolicy = "DENY_ACCESS" + RowLevelPermissionPolicyGrantAccess RowLevelPermissionPolicy = "GRANT_ACCESS" + RowLevelPermissionPolicyDenyAccess RowLevelPermissionPolicy = "DENY_ACCESS" ) // Values returns all known values for RowLevelPermissionPolicy. Note that this can @@ -758,10 +758,10 @@ type TemplateErrorType string // Enum values for TemplateErrorType const ( - TemplateErrorTypeSource_not_found TemplateErrorType = "SOURCE_NOT_FOUND" - TemplateErrorTypeData_set_not_found TemplateErrorType = "DATA_SET_NOT_FOUND" - TemplateErrorTypeInternal_failure TemplateErrorType = "INTERNAL_FAILURE" - TemplateErrorTypeAccess_denied TemplateErrorType = "ACCESS_DENIED" + TemplateErrorTypeSourceNotFound TemplateErrorType = "SOURCE_NOT_FOUND" + TemplateErrorTypeDataSetNotFound TemplateErrorType = "DATA_SET_NOT_FOUND" + TemplateErrorTypeInternalFailure TemplateErrorType = "INTERNAL_FAILURE" + TemplateErrorTypeAccessDenied TemplateErrorType = "ACCESS_DENIED" ) // Values returns all known values for TemplateErrorType. Note that this can be @@ -780,8 +780,8 @@ type TextQualifier string // Enum values for TextQualifier const ( - TextQualifierDouble_quote TextQualifier = "DOUBLE_QUOTE" - TextQualifierSingle_quote TextQualifier = "SINGLE_QUOTE" + TextQualifierDoubleQuote TextQualifier = "DOUBLE_QUOTE" + TextQualifierSingleQuote TextQualifier = "SINGLE_QUOTE" ) // Values returns all known values for TextQualifier. Note that this can be @@ -798,7 +798,7 @@ type ThemeErrorType string // Enum values for ThemeErrorType const ( - ThemeErrorTypeInternal_failure ThemeErrorType = "INTERNAL_FAILURE" + ThemeErrorTypeInternalFailure ThemeErrorType = "INTERNAL_FAILURE" ) // Values returns all known values for ThemeErrorType. Note that this can be @@ -834,11 +834,11 @@ type UserRole string // Enum values for UserRole const ( - UserRoleAdmin UserRole = "ADMIN" - UserRoleAuthor UserRole = "AUTHOR" - UserRoleReader UserRole = "READER" - UserRoleRestricted_author UserRole = "RESTRICTED_AUTHOR" - UserRoleRestricted_reader UserRole = "RESTRICTED_READER" + UserRoleAdmin UserRole = "ADMIN" + UserRoleAuthor UserRole = "AUTHOR" + UserRoleReader UserRole = "READER" + UserRoleRestrictedAuthor UserRole = "RESTRICTED_AUTHOR" + UserRoleRestrictedReader UserRole = "RESTRICTED_READER" ) // Values returns all known values for UserRole. Note that this can be expanded in diff --git a/service/quicksight/types/types.go b/service/quicksight/types/types.go index 796fd5bff3e..df58c11ef28 100644 --- a/service/quicksight/types/types.go +++ b/service/quicksight/types/types.go @@ -1369,16 +1369,16 @@ type ResourcePermission struct { // The Amazon Resource Name (ARN) of the principal. This can be one of the // following: // - // * The ARN of an Amazon QuickSight user or group associated with - // a data source or dataset. (This is common.) + // * The ARN of an Amazon QuickSight user or group associated with a + // data source or dataset. (This is common.) // - // * The ARN of an Amazon - // QuickSight user, group, or namespace associated with an analysis, dashboard, - // template, or theme. (This is common.) + // * The ARN of an Amazon QuickSight + // user, group, or namespace associated with an analysis, dashboard, template, or + // theme. (This is common.) // - // * The ARN of an AWS account root: - // This is an IAM ARN rather than a QuickSight ARN. Use this option only to share - // resources (templates) across AWS accounts. (This is less common.) + // * The ARN of an AWS account root: This is an IAM ARN + // rather than a QuickSight ARN. Use this option only to share resources + // (templates) across AWS accounts. (This is less common.) // // This member is required. Principal *string @@ -2067,20 +2067,19 @@ type User struct { // The Amazon QuickSight role for the user. The user role can be one of the // following:. // - // * READER: A user who has read-only access to dashboards. + // * READER: A user who has read-only access to dashboards. // - // * - // AUTHOR: A user who can create data sources, datasets, analyses, and - // dashboards. + // * AUTHOR: + // A user who can create data sources, datasets, analyses, and dashboards. // - // * ADMIN: A user who is an author, who can also manage Amazon - // QuickSight settings. + // * + // ADMIN: A user who is an author, who can also manage Amazon QuickSight + // settings. // - // * RESTRICTED_READER: This role isn't currently - // available for use. + // * RESTRICTED_READER: This role isn't currently available for use. // - // * RESTRICTED_AUTHOR: This role isn't currently available - // for use. + // * + // RESTRICTED_AUTHOR: This role isn't currently available for use. Role UserRole // The user's user name. diff --git a/service/ram/api_op_PromoteResourceShareCreatedFromPolicy.go b/service/ram/api_op_PromoteResourceShareCreatedFromPolicy.go index dd5e1cb7479..138f9e1c5b8 100644 --- a/service/ram/api_op_PromoteResourceShareCreatedFromPolicy.go +++ b/service/ram/api_op_PromoteResourceShareCreatedFromPolicy.go @@ -15,10 +15,10 @@ import ( // modified in AWS RAM. Use this API action to promote the resource share. When you // promote the resource share, it becomes: // -// * Visible to all principals that it -// is shared with. +// * Visible to all principals that it is +// shared with. // -// * Modifiable in AWS RAM. +// * Modifiable in AWS RAM. func (c *Client) PromoteResourceShareCreatedFromPolicy(ctx context.Context, params *PromoteResourceShareCreatedFromPolicyInput, optFns ...func(*Options)) (*PromoteResourceShareCreatedFromPolicyOutput, error) { if params == nil { params = &PromoteResourceShareCreatedFromPolicyInput{} diff --git a/service/ram/types/enums.go b/service/ram/types/enums.go index 567ef64224b..edec75d3f80 100644 --- a/service/ram/types/enums.go +++ b/service/ram/types/enums.go @@ -6,8 +6,8 @@ type ResourceOwner string // Enum values for ResourceOwner const ( - ResourceOwnerSelf ResourceOwner = "SELF" - ResourceOwnerOther_accounts ResourceOwner = "OTHER-ACCOUNTS" + ResourceOwnerSelf ResourceOwner = "SELF" + ResourceOwnerOtherAccounts ResourceOwner = "OTHER-ACCOUNTS" ) // Values returns all known values for ResourceOwner. Note that this can be @@ -67,9 +67,9 @@ type ResourceShareFeatureSet string // Enum values for ResourceShareFeatureSet const ( - ResourceShareFeatureSetCreated_from_policy ResourceShareFeatureSet = "CREATED_FROM_POLICY" - ResourceShareFeatureSetPromoting_to_standard ResourceShareFeatureSet = "PROMOTING_TO_STANDARD" - ResourceShareFeatureSetStandard ResourceShareFeatureSet = "STANDARD" + ResourceShareFeatureSetCreatedFromPolicy ResourceShareFeatureSet = "CREATED_FROM_POLICY" + ResourceShareFeatureSetPromotingToStandard ResourceShareFeatureSet = "PROMOTING_TO_STANDARD" + ResourceShareFeatureSetStandard ResourceShareFeatureSet = "STANDARD" ) // Values returns all known values for ResourceShareFeatureSet. Note that this can @@ -134,11 +134,11 @@ type ResourceStatus string // Enum values for ResourceStatus const ( - ResourceStatusAvailable ResourceStatus = "AVAILABLE" - ResourceStatusZonal_resource_inaccessible ResourceStatus = "ZONAL_RESOURCE_INACCESSIBLE" - ResourceStatusLimit_exceeded ResourceStatus = "LIMIT_EXCEEDED" - ResourceStatusUnavailable ResourceStatus = "UNAVAILABLE" - ResourceStatusPending ResourceStatus = "PENDING" + ResourceStatusAvailable ResourceStatus = "AVAILABLE" + ResourceStatusZonalResourceInaccessible ResourceStatus = "ZONAL_RESOURCE_INACCESSIBLE" + ResourceStatusLimitExceeded ResourceStatus = "LIMIT_EXCEEDED" + ResourceStatusUnavailable ResourceStatus = "UNAVAILABLE" + ResourceStatusPending ResourceStatus = "PENDING" ) // Values returns all known values for ResourceStatus. Note that this can be diff --git a/service/ram/types/types.go b/service/ram/types/types.go index cd897cc40c6..6035782c9a1 100644 --- a/service/ram/types/types.go +++ b/service/ram/types/types.go @@ -67,19 +67,19 @@ type ResourceShare struct { // Indicates how the resource share was created. Possible values include: // - // * + // * // CREATED_FROM_POLICY - Indicates that the resource share was created from an AWS // Identity and Access Management (AWS IAM) policy attached to a resource. These // resource shares are visible only to the AWS account that created it. They cannot // be modified in AWS RAM. // - // * PROMOTING_TO_STANDARD - The resource share is in - // the process of being promoted. For more information, see + // * PROMOTING_TO_STANDARD - The resource share is in the + // process of being promoted. For more information, see // PromoteResourceShareCreatedFromPolicy. // - // * STANDARD - Indicates that the - // resource share was created in AWS RAM using the console or APIs. These resource - // shares are visible to all principals. They can be modified in AWS RAM. + // * STANDARD - Indicates that the resource + // share was created in AWS RAM using the console or APIs. These resource shares + // are visible to all principals. They can be modified in AWS RAM. FeatureSet ResourceShareFeatureSet // The time when the resource share was last updated. diff --git a/service/rds/api_op_AddSourceIdentifierToSubscription.go b/service/rds/api_op_AddSourceIdentifierToSubscription.go index 808480db4f3..7ad5f5babf8 100644 --- a/service/rds/api_op_AddSourceIdentifierToSubscription.go +++ b/service/rds/api_op_AddSourceIdentifierToSubscription.go @@ -32,25 +32,24 @@ type AddSourceIdentifierToSubscriptionInput struct { // The identifier of the event source to be added. Constraints: // - // * If the - // source type is a DB instance, a DBInstanceIdentifier value must be supplied. + // * If the source + // type is a DB instance, a DBInstanceIdentifier value must be supplied. // + // * If the + // source type is a DB cluster, a DBClusterIdentifier value must be supplied. // - // * If the source type is a DB cluster, a DBClusterIdentifier value must be + // * If + // the source type is a DB parameter group, a DBParameterGroupName value must be // supplied. // - // * If the source type is a DB parameter group, a - // DBParameterGroupName value must be supplied. - // - // * If the source type is a DB - // security group, a DBSecurityGroupName value must be supplied. - // - // * If the - // source type is a DB snapshot, a DBSnapshotIdentifier value must be supplied. + // * If the source type is a DB security group, a DBSecurityGroupName + // value must be supplied. // + // * If the source type is a DB snapshot, a + // DBSnapshotIdentifier value must be supplied. // - // * If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier - // value must be supplied. + // * If the source type is a DB + // cluster snapshot, a DBClusterSnapshotIdentifier value must be supplied. // // This member is required. SourceIdentifier *string diff --git a/service/rds/api_op_ApplyPendingMaintenanceAction.go b/service/rds/api_op_ApplyPendingMaintenanceAction.go index c5d3e61edde..7472d774596 100644 --- a/service/rds/api_op_ApplyPendingMaintenanceAction.go +++ b/service/rds/api_op_ApplyPendingMaintenanceAction.go @@ -40,15 +40,14 @@ type ApplyPendingMaintenanceActionInput struct { // A value that specifies the type of opt-in request, or undoes an opt-in request. // An opt-in request of type immediate can't be undone. Valid values: // - // * - // immediate - Apply the maintenance action immediately. + // * immediate + // - Apply the maintenance action immediately. // - // * next-maintenance - - // Apply the maintenance action during the next maintenance window for the - // resource. + // * next-maintenance - Apply the + // maintenance action during the next maintenance window for the resource. // - // * undo-opt-in - Cancel any existing next-maintenance opt-in - // requests. + // * + // undo-opt-in - Cancel any existing next-maintenance opt-in requests. // // This member is required. OptInType *string diff --git a/service/rds/api_op_BacktrackDBCluster.go b/service/rds/api_op_BacktrackDBCluster.go index 2e2831352dc..025c15fbecb 100644 --- a/service/rds/api_op_BacktrackDBCluster.go +++ b/service/rds/api_op_BacktrackDBCluster.go @@ -40,10 +40,10 @@ type BacktrackDBClusterInput struct { // time for the DB cluster, Aurora automatically chooses the nearest possible // consistent time for the DB cluster. Constraints: // - // * Must contain a valid ISO + // * Must contain a valid ISO // 8601 timestamp. // - // * Can't contain a timestamp set in the future. + // * Can't contain a timestamp set in the future. // // Example: // 2017-07-08T18:00Z @@ -54,13 +54,13 @@ type BacktrackDBClusterInput struct { // The DB cluster identifier of the DB cluster to be backtracked. This parameter is // stored as a lowercase string. Constraints: // - // * Must contain from 1 to 63 + // * Must contain from 1 to 63 // alphanumeric characters or hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // - // * Can't end with a hyphen or contain two consecutive hyphens. + // * + // Can't end with a hyphen or contain two consecutive hyphens. // // Example: // my-cluster1 @@ -102,18 +102,17 @@ type BacktrackDBClusterOutput struct { // The status of the backtrack. This property returns one of the following // values: // - // * applying - The backtrack is currently being applied to or rolled - // back from the DB cluster. + // * applying - The backtrack is currently being applied to or rolled back + // from the DB cluster. // - // * completed - The backtrack has successfully been - // applied to or rolled back from the DB cluster. + // * completed - The backtrack has successfully been applied + // to or rolled back from the DB cluster. // - // * failed - An error occurred - // while the backtrack was applied to or rolled back from the DB cluster. + // * failed - An error occurred while the + // backtrack was applied to or rolled back from the DB cluster. // - // * - // pending - The backtrack is currently pending application to or rollback from the - // DB cluster. + // * pending - The + // backtrack is currently pending application to or rollback from the DB cluster. Status *string // Metadata pertaining to the operation's result. diff --git a/service/rds/api_op_CancelExportTask.go b/service/rds/api_op_CancelExportTask.go index c107f599d9c..ef1d5e9a6da 100644 --- a/service/rds/api_op_CancelExportTask.go +++ b/service/rds/api_op_CancelExportTask.go @@ -42,20 +42,20 @@ type CancelExportTaskOutput struct { // The data exported from the snapshot. Valid values are the following: // - // * - // database - Export all the data from a specified database. + // * database + // - Export all the data from a specified database. // - // * database.table - // table-name - Export a table of the snapshot. This format is valid only for RDS - // for MySQL, RDS for MariaDB, and Aurora MySQL. + // * database.table table-name - + // Export a table of the snapshot. This format is valid only for RDS for MySQL, RDS + // for MariaDB, and Aurora MySQL. // - // * database.schema schema-name - // - Export a database schema of the snapshot. This format is valid only for RDS - // for PostgreSQL and Aurora PostgreSQL. - // - // * database.schema.table table-name - - // Export a table of the database schema. This format is valid only for RDS for + // * database.schema schema-name - Export a + // database schema of the snapshot. This format is valid only for RDS for // PostgreSQL and Aurora PostgreSQL. + // + // * database.schema.table table-name - Export a + // table of the database schema. This format is valid only for RDS for PostgreSQL + // and Aurora PostgreSQL. ExportOnly []*string // A unique identifier for the snapshot export task. This ID isn't an identifier diff --git a/service/rds/api_op_CopyDBClusterParameterGroup.go b/service/rds/api_op_CopyDBClusterParameterGroup.go index 560d9a0b811..c0639dc9b73 100644 --- a/service/rds/api_op_CopyDBClusterParameterGroup.go +++ b/service/rds/api_op_CopyDBClusterParameterGroup.go @@ -36,16 +36,16 @@ type CopyDBClusterParameterGroupInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon Aurora User Guide. Constraints: // - // * Must specify a valid DB - // cluster parameter group. + // * Must specify a valid DB cluster + // parameter group. // - // * If the source DB cluster parameter group is in - // the same AWS Region as the copy, specify a valid DB parameter group identifier, - // for example my-db-cluster-param-group, or a valid ARN. + // * If the source DB cluster parameter group is in the same AWS + // Region as the copy, specify a valid DB parameter group identifier, for example + // my-db-cluster-param-group, or a valid ARN. // - // * If the source DB - // parameter group is in a different AWS Region than the copy, specify a valid DB - // cluster parameter group ARN, for example + // * If the source DB parameter group + // is in a different AWS Region than the copy, specify a valid DB cluster parameter + // group ARN, for example // arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1. // // This member is required. @@ -58,19 +58,18 @@ type CopyDBClusterParameterGroupInput struct { // The identifier for the copied DB cluster parameter group. Constraints: // - // * - // Can't be null, empty, or blank + // * Can't + // be null, empty, or blank // - // * Must contain from 1 to 255 letters, - // numbers, or hyphens + // * Must contain from 1 to 255 letters, numbers, or + // hyphens // - // * First character must be a letter + // * First character must be a letter // - // * Can't end - // with a hyphen or contain two consecutive hyphens + // * Can't end with a hyphen or + // contain two consecutive hyphens // - // Example: - // my-cluster-param-group1 + // Example: my-cluster-param-group1 // // This member is required. TargetDBClusterParameterGroupIdentifier *string diff --git a/service/rds/api_op_CopyDBClusterSnapshot.go b/service/rds/api_op_CopyDBClusterSnapshot.go index bb765aa5b0e..301b2a559ea 100644 --- a/service/rds/api_op_CopyDBClusterSnapshot.go +++ b/service/rds/api_op_CopyDBClusterSnapshot.go @@ -23,12 +23,12 @@ import ( // encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster // snapshot from another AWS Region, you must provide the following values: // -// * +// * // KmsKeyId - The AWS Key Management System (AWS KMS) key identifier for the key to // use to encrypt the copy of the DB cluster snapshot in the destination AWS // Region. // -// * PreSignedUrl - A URL that contains a Signature Version 4 signed +// * PreSignedUrl - A URL that contains a Signature Version 4 signed // request for the CopyDBClusterSnapshot action to be called in the source AWS // Region where the DB cluster snapshot is copied from. The pre-signed URL must be // a valid request for the CopyDBClusterSnapshot API action that can be executed in @@ -36,26 +36,26 @@ import ( // copied. The pre-signed URL request must contain the following parameter // values: // -// * KmsKeyId - The KMS key identifier for the key to use to -// encrypt the copy of the DB cluster snapshot in the destination AWS Region. This -// is the same identifier for both the CopyDBClusterSnapshot action that is called -// in the destination AWS Region, and the action contained in the pre-signed URL. +// * KmsKeyId - The KMS key identifier for the key to use to encrypt the +// copy of the DB cluster snapshot in the destination AWS Region. This is the same +// identifier for both the CopyDBClusterSnapshot action that is called in the +// destination AWS Region, and the action contained in the pre-signed URL. // -// -// * DestinationRegion - The name of the AWS Region that the DB cluster snapshot is +// * +// DestinationRegion - The name of the AWS Region that the DB cluster snapshot is // to be created in. // -// * SourceDBClusterSnapshotIdentifier - The DB cluster -// snapshot identifier for the encrypted DB cluster snapshot to be copied. This -// identifier must be in the Amazon Resource Name (ARN) format for the source AWS -// Region. For example, if you are copying an encrypted DB cluster snapshot from -// the us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks like -// the following example: +// * SourceDBClusterSnapshotIdentifier - The DB cluster snapshot +// identifier for the encrypted DB cluster snapshot to be copied. This identifier +// must be in the Amazon Resource Name (ARN) format for the source AWS Region. For +// example, if you are copying an encrypted DB cluster snapshot from the us-west-2 +// AWS Region, then your SourceDBClusterSnapshotIdentifier looks like the following +// example: // arn:aws:rds:us-west-2:123456789012:cluster-snapshot:aurora-cluster1-snapshot-20161115. // -// -// To learn how to generate a Signature Version 4 signed request, see -// Authenticating Requests: Using Query Parameters (AWS Signature Version 4) +// To +// learn how to generate a Signature Version 4 signed request, see Authenticating +// Requests: Using Query Parameters (AWS Signature Version 4) // (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) // and Signature Version 4 Signing Process // (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). If you @@ -64,11 +64,11 @@ import ( // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request // for the operation that can be executed in the source AWS Region. // -// * +// * // TargetDBClusterSnapshotIdentifier - The identifier for the new copy of the DB // cluster snapshot in the destination AWS Region. // -// * +// * // SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the // encrypted DB cluster snapshot to be copied. This identifier must be in the ARN // format for the source AWS Region and is the same value as the @@ -106,15 +106,15 @@ type CopyDBClusterSnapshotInput struct { // case-sensitive. You can't copy an encrypted, shared DB cluster snapshot from one // AWS Region to another. Constraints: // - // * Must specify a valid system snapshot - // in the "available" state. + // * Must specify a valid system snapshot in + // the "available" state. // - // * If the source snapshot is in the same AWS - // Region as the copy, specify a valid DB snapshot identifier. + // * If the source snapshot is in the same AWS Region as + // the copy, specify a valid DB snapshot identifier. // - // * If the source - // snapshot is in a different AWS Region than the copy, specify a valid DB cluster - // snapshot ARN. For more information, go to Copying Snapshots Across AWS Regions + // * If the source snapshot is + // in a different AWS Region than the copy, specify a valid DB cluster snapshot + // ARN. For more information, go to Copying Snapshots Across AWS Regions // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CopySnapshot.html#USER_CopySnapshot.AcrossRegions) // in the Amazon Aurora User Guide. // @@ -126,16 +126,16 @@ type CopyDBClusterSnapshotInput struct { // The identifier of the new DB cluster snapshot to create from the source DB // cluster snapshot. This parameter isn't case-sensitive. Constraints: // - // * Must + // * Must // contain from 1 to 63 letters, numbers, or hyphens. // - // * First character must - // be a letter. + // * First character must be a + // letter. // - // * Can't end with a hyphen or contain two consecutive - // hyphens. + // * Can't end with a hyphen or contain two consecutive hyphens. // - // Example: my-cluster-snapshot2 + // Example: + // my-cluster-snapshot2 // // This member is required. TargetDBClusterSnapshotIdentifier *string @@ -170,16 +170,16 @@ type CopyDBClusterSnapshotInput struct { // that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL // request must contain the following parameter values: // - // * KmsKeyId - The AWS - // KMS key identifier for the key to use to encrypt the copy of the DB cluster - // snapshot in the destination AWS Region. This is the same identifier for both the + // * KmsKeyId - The AWS KMS + // key identifier for the key to use to encrypt the copy of the DB cluster snapshot + // in the destination AWS Region. This is the same identifier for both the // CopyDBClusterSnapshot action that is called in the destination AWS Region, and // the action contained in the pre-signed URL. // - // * DestinationRegion - The name - // of the AWS Region that the DB cluster snapshot is to be created in. + // * DestinationRegion - The name of + // the AWS Region that the DB cluster snapshot is to be created in. // - // * + // * // SourceDBClusterSnapshotIdentifier - The DB cluster snapshot identifier for the // encrypted DB cluster snapshot to be copied. This identifier must be in the // Amazon Resource Name (ARN) format for the source AWS Region. For example, if you diff --git a/service/rds/api_op_CopyDBParameterGroup.go b/service/rds/api_op_CopyDBParameterGroup.go index 5c2f53d7f9a..ea61e119523 100644 --- a/service/rds/api_op_CopyDBParameterGroup.go +++ b/service/rds/api_op_CopyDBParameterGroup.go @@ -35,11 +35,11 @@ type CopyDBParameterGroupInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. Constraints: // - // * Must specify a valid DB - // parameter group. + // * Must specify a valid DB parameter + // group. // - // * Must specify a valid DB parameter group identifier, for - // example my-db-param-group, or a valid ARN. + // * Must specify a valid DB parameter group identifier, for example + // my-db-param-group, or a valid ARN. // // This member is required. SourceDBParameterGroupIdentifier *string @@ -51,16 +51,16 @@ type CopyDBParameterGroupInput struct { // The identifier for the copied DB parameter group. Constraints: // - // * Can't be - // null, empty, or blank + // * Can't be null, + // empty, or blank // - // * Must contain from 1 to 255 letters, numbers, or - // hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // - // * First character must be a letter + // * + // First character must be a letter // - // * Can't end with a hyphen - // or contain two consecutive hyphens + // * Can't end with a hyphen or contain two + // consecutive hyphens // // Example: my-db-parameter-group // diff --git a/service/rds/api_op_CopyDBSnapshot.go b/service/rds/api_op_CopyDBSnapshot.go index fef310a0c4b..a0d2b3a8306 100644 --- a/service/rds/api_op_CopyDBSnapshot.go +++ b/service/rds/api_op_CopyDBSnapshot.go @@ -52,7 +52,7 @@ type CopyDBSnapshotInput struct { // Region, and must match the SourceDBSnapshotIdentifier in the PreSignedUrl // parameter. Constraints: // - // * Must specify a valid system snapshot in the + // * Must specify a valid system snapshot in the // "available" state. // // Example: rds:mydb-2012-04-02-00-01 Example: @@ -63,15 +63,15 @@ type CopyDBSnapshotInput struct { // The identifier for the copy of the snapshot. Constraints: // - // * Can't be null, + // * Can't be null, // empty, or blank // - // * Must contain from 1 to 255 letters, numbers, or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // + // * + // First character must be a letter // - // * First character must be a letter - // - // * Can't end with a hyphen or contain two + // * Can't end with a hyphen or contain two // consecutive hyphens // // Example: my-db-snapshot @@ -118,27 +118,26 @@ type CopyDBSnapshotInput struct { // DB snapshot to be copied. The presigned URL request must contain the following // parameter values: // - // * DestinationRegion - The AWS Region that the encrypted - // DB snapshot is copied to. This AWS Region is the same one where the - // CopyDBSnapshot action is called that contains this presigned URL. For example, - // if you copy an encrypted DB snapshot from the us-west-2 AWS Region to the - // us-east-1 AWS Region, then you call the CopyDBSnapshot action in the us-east-1 - // AWS Region and provide a presigned URL that contains a call to the - // CopyDBSnapshot action in the us-west-2 AWS Region. For this example, the - // DestinationRegion in the presigned URL must be set to the us-east-1 AWS - // Region. + // * DestinationRegion - The AWS Region that the encrypted DB + // snapshot is copied to. This AWS Region is the same one where the CopyDBSnapshot + // action is called that contains this presigned URL. For example, if you copy an + // encrypted DB snapshot from the us-west-2 AWS Region to the us-east-1 AWS Region, + // then you call the CopyDBSnapshot action in the us-east-1 AWS Region and provide + // a presigned URL that contains a call to the CopyDBSnapshot action in the + // us-west-2 AWS Region. For this example, the DestinationRegion in the presigned + // URL must be set to the us-east-1 AWS Region. // - // * KmsKeyId - The AWS KMS key identifier for the key to use to - // encrypt the copy of the DB snapshot in the destination AWS Region. This is the - // same identifier for both the CopyDBSnapshot action that is called in the - // destination AWS Region, and the action contained in the presigned URL. + // * KmsKeyId - The AWS KMS key + // identifier for the key to use to encrypt the copy of the DB snapshot in the + // destination AWS Region. This is the same identifier for both the CopyDBSnapshot + // action that is called in the destination AWS Region, and the action contained in + // the presigned URL. // - // * - // SourceDBSnapshotIdentifier - The DB snapshot identifier for the encrypted - // snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) - // format for the source AWS Region. For example, if you are copying an encrypted - // DB snapshot from the us-west-2 AWS Region, then your SourceDBSnapshotIdentifier - // looks like the following example: + // * SourceDBSnapshotIdentifier - The DB snapshot identifier + // for the encrypted snapshot to be copied. This identifier must be in the Amazon + // Resource Name (ARN) format for the source AWS Region. For example, if you are + // copying an encrypted DB snapshot from the us-west-2 AWS Region, then your + // SourceDBSnapshotIdentifier looks like the following example: // arn:aws:rds:us-west-2:123456789012:snapshot:mysql-instance1-snapshot-20161115. // // To diff --git a/service/rds/api_op_CopyOptionGroup.go b/service/rds/api_op_CopyOptionGroup.go index d21acf3244d..fba98b9cb8e 100644 --- a/service/rds/api_op_CopyOptionGroup.go +++ b/service/rds/api_op_CopyOptionGroup.go @@ -32,8 +32,8 @@ type CopyOptionGroupInput struct { // The identifier for the source option group. Constraints: // - // * Must specify a - // valid option group. + // * Must specify a valid + // option group. // // This member is required. SourceOptionGroupIdentifier *string @@ -45,15 +45,15 @@ type CopyOptionGroupInput struct { // The identifier for the copied option group. Constraints: // - // * Can't be null, + // * Can't be null, // empty, or blank // - // * Must contain from 1 to 255 letters, numbers, or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // + // * + // First character must be a letter // - // * First character must be a letter - // - // * Can't end with a hyphen or contain two + // * Can't end with a hyphen or contain two // consecutive hyphens // // Example: my-option-group diff --git a/service/rds/api_op_CreateDBCluster.go b/service/rds/api_op_CreateDBCluster.go index 8fa8e414631..ea14b0f23f3 100644 --- a/service/rds/api_op_CreateDBCluster.go +++ b/service/rds/api_op_CreateDBCluster.go @@ -46,13 +46,13 @@ type CreateDBClusterInput struct { // The DB cluster identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * First + // character must be a letter. // - // * First character must be a letter. - // - // * Can't end with a hyphen or contain - // two consecutive hyphens. + // * Can't end with a hyphen or contain two + // consecutive hyphens. // // Example: my-cluster1 // @@ -77,14 +77,14 @@ type CreateDBClusterInput struct { // to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. // Default: 0 Constraints: // - // * If specified, this value must be set to a number - // from 0 to 259,200 (72 hours). + // * If specified, this value must be set to a number from + // 0 to 259,200 (72 hours). BacktrackWindow *int64 // The number of days for which automated backups are retained. Default: 1 // Constraints: // - // * Must be a value from 1 to 35 + // * Must be a value from 1 to 35 BackupRetentionPeriod *int32 // A value that indicates that the DB cluster should be associated with the @@ -99,8 +99,8 @@ type CreateDBClusterInput struct { // you do not specify a value, then the default DB cluster parameter group for the // specified DB engine and version is used. Constraints: // - // * If supplied, must - // match the name of an existing DB cluster parameter group. + // * If supplied, must match + // the name of an existing DB cluster parameter group. DBClusterParameterGroupName *string // A DB subnet group to associate with this DB cluster. Constraints: Must match the @@ -174,19 +174,19 @@ type CreateDBClusterInput struct { // some DB engine modes. For more information, see the following sections in the // Amazon Aurora User Guide: // - // * Limitations of Aurora Serverless + // * Limitations of Aurora Serverless // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) // - // - // * Limitations of Parallel Query + // * + // Limitations of Parallel Query // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) // - // - // * Limitations of Aurora Global Databases + // * + // Limitations of Aurora Global Databases // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) // - // - // * Limitations of Multi-Master Clusters + // * + // Limitations of Multi-Master Clusters // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations) EngineMode *string @@ -215,20 +215,20 @@ type CreateDBClusterInput struct { // of the ARN for the KMS encryption key. If an encryption key isn't specified in // KmsKeyId: // - // * If ReplicationSourceIdentifier identifies an encrypted source, - // then Amazon RDS will use the encryption key used to encrypt the source. - // Otherwise, Amazon RDS will use your default encryption key. + // * If ReplicationSourceIdentifier identifies an encrypted source, then + // Amazon RDS will use the encryption key used to encrypt the source. Otherwise, + // Amazon RDS will use your default encryption key. // - // * If the - // StorageEncrypted parameter is enabled and ReplicationSourceIdentifier isn't - // specified, then Amazon RDS will use your default encryption key. + // * If the StorageEncrypted + // parameter is enabled and ReplicationSourceIdentifier isn't specified, then + // Amazon RDS will use your default encryption key. // - // AWS KMS - // creates the default encryption key for your AWS account. Your AWS account has a - // different default encryption key for each AWS Region. If you create a read - // replica of an encrypted DB cluster in another AWS Region, you must set KmsKeyId - // to a KMS key ID that is valid in the destination AWS Region. This key is used to - // encrypt the read replica in that AWS Region. + // AWS KMS creates the default + // encryption key for your AWS account. Your AWS account has a different default + // encryption key for each AWS Region. If you create a read replica of an encrypted + // DB cluster in another AWS Region, you must set KmsKeyId to a KMS key ID that is + // valid in the destination AWS Region. This key is used to encrypt the read + // replica in that AWS Region. KmsKeyId *string // The password for the master database user. This password can contain any @@ -238,13 +238,13 @@ type CreateDBClusterInput struct { // The name of the master user for the DB cluster. Constraints: // - // * Must be 1 to - // 16 letters or numbers. + // * Must be 1 to 16 + // letters or numbers. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't be - // a reserved word for the chosen database engine. + // * Can't be a reserved + // word for the chosen database engine. MasterUsername *string // A value that indicates that the DB cluster should be associated with the @@ -265,16 +265,16 @@ type CreateDBClusterInput struct { // AWS Region that contains the encrypted DB cluster to be copied. The pre-signed // URL request must contain the following parameter values: // - // * KmsKeyId - The - // AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster - // in the destination AWS Region. This should refer to the same KMS key for both - // the CreateDBCluster action that is called in the destination AWS Region, and the + // * KmsKeyId - The AWS + // KMS key identifier for the key to use to encrypt the copy of the DB cluster in + // the destination AWS Region. This should refer to the same KMS key for both the + // CreateDBCluster action that is called in the destination AWS Region, and the // action contained in the pre-signed URL. // - // * DestinationRegion - The name of - // the AWS Region that Aurora read replica will be created in. + // * DestinationRegion - The name of the + // AWS Region that Aurora read replica will be created in. // - // * + // * // ReplicationSourceIdentifier - The DB cluster identifier for the encrypted DB // cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) // format for the source AWS Region. For example, if you are copying an encrypted @@ -302,16 +302,15 @@ type CreateDBClusterInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/rds/api_op_CreateDBClusterEndpoint.go b/service/rds/api_op_CreateDBClusterEndpoint.go index efa572c333a..676fb7a20e6 100644 --- a/service/rds/api_op_CreateDBClusterEndpoint.go +++ b/service/rds/api_op_CreateDBClusterEndpoint.go @@ -63,17 +63,17 @@ type CreateDBClusterEndpointInput struct { // Aurora DB cluster. This data type is used as a response element in the following // actions: // -// * CreateDBClusterEndpoint +// * CreateDBClusterEndpoint // -// * DescribeDBClusterEndpoints +// * DescribeDBClusterEndpoints // -// * +// * // ModifyDBClusterEndpoint // -// * DeleteDBClusterEndpoint +// * DeleteDBClusterEndpoint // -// For the data structure -// that represents Amazon RDS DB instance endpoints, see Endpoint. +// For the data structure that +// represents Amazon RDS DB instance endpoints, see Endpoint. type CreateDBClusterEndpointOutput struct { // The type associated with a custom endpoint. One of: READER, WRITER, ANY. diff --git a/service/rds/api_op_CreateDBClusterParameterGroup.go b/service/rds/api_op_CreateDBClusterParameterGroup.go index 7ba6ae2dfcd..8ef93276e3c 100644 --- a/service/rds/api_op_CreateDBClusterParameterGroup.go +++ b/service/rds/api_op_CreateDBClusterParameterGroup.go @@ -56,11 +56,11 @@ type CreateDBClusterParameterGroupInput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must match the - // name of an existing DB cluster parameter group. + // * Must match the name + // of an existing DB cluster parameter group. // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. // // This member is required. DBClusterParameterGroupName *string diff --git a/service/rds/api_op_CreateDBClusterSnapshot.go b/service/rds/api_op_CreateDBClusterSnapshot.go index 2c1cee1a9a9..23e8124204a 100644 --- a/service/rds/api_op_CreateDBClusterSnapshot.go +++ b/service/rds/api_op_CreateDBClusterSnapshot.go @@ -36,7 +36,7 @@ type CreateDBClusterSnapshotInput struct { // The identifier of the DB cluster to create a snapshot for. This parameter isn't // case-sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. // // Example: my-cluster1 @@ -47,16 +47,15 @@ type CreateDBClusterSnapshotInput struct { // The identifier of the DB cluster snapshot. This parameter is stored as a // lowercase string. Constraints: // - // * Must contain from 1 to 63 letters, - // numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't end - // with a hyphen or contain two consecutive hyphens. + // * Can't end with a hyphen or + // contain two consecutive hyphens. // - // Example: - // my-cluster1-snapshot1 + // Example: my-cluster1-snapshot1 // // This member is required. DBClusterSnapshotIdentifier *string diff --git a/service/rds/api_op_CreateDBInstance.go b/service/rds/api_op_CreateDBInstance.go index 7a029750942..95d871497f8 100644 --- a/service/rds/api_op_CreateDBInstance.go +++ b/service/rds/api_op_CreateDBInstance.go @@ -43,13 +43,13 @@ type CreateDBInstanceInput struct { // The DB instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * First + // character must be a letter. // - // * First character must be a letter. - // - // * Can't end with a hyphen or contain - // two consecutive hyphens. + // * Can't end with a hyphen or contain two + // consecutive hyphens. // // Example: mydbinstance // @@ -59,37 +59,37 @@ type CreateDBInstanceInput struct { // The name of the database engine to be used for this instance. Not every database // engine is available for every AWS Region. Valid Values: // - // * aurora (for MySQL + // * aurora (for MySQL // 5.6-compatible Aurora) // - // * aurora-mysql (for MySQL 5.7-compatible Aurora) + // * aurora-mysql (for MySQL 5.7-compatible Aurora) // + // * + // aurora-postgresql // - // * aurora-postgresql + // * mariadb // - // * mariadb + // * mysql // - // * mysql + // * oracle-ee // - // * oracle-ee + // * oracle-se2 // - // * - // oracle-se2 + // * + // oracle-se1 // - // * oracle-se1 + // * oracle-se // - // * oracle-se + // * postgres // - // * postgres + // * sqlserver-ee // - // * - // sqlserver-ee + // * sqlserver-se // - // * sqlserver-se + // * + // sqlserver-ex // - // * sqlserver-ex - // - // * sqlserver-web + // * sqlserver-web // // This member is required. Engine *string @@ -100,79 +100,78 @@ type CreateDBInstanceInput struct { // for the space that you use in an Aurora cluster volume. MySQL Constraints to the // amount of storage for each storage type are the following: // - // * General - // Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. - // - // * - // Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. - // - // * - // Magnetic storage (standard): Must be an integer from 5 to 3072. - // - // MariaDB - // Constraints to the amount of storage for each storage type are the following: - // - // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. + // * General Purpose + // (SSD) storage (gp2): Must be an integer from 20 to 65536. // + // * Provisioned IOPS + // storage (io1): Must be an integer from 100 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. - // - // * - // Magnetic storage (standard): Must be an integer from 5 to 3072. - // - // PostgreSQL - // Constraints to the amount of storage for each storage type are the following: + // * Magnetic storage + // (standard): Must be an integer from 5 to 3072. // + // MariaDB Constraints to the + // amount of storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. + // * General Purpose + // (SSD) storage (gp2): Must be an integer from 20 to 65536. // + // * Provisioned IOPS + // storage (io1): Must be an integer from 100 to 65536. // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. + // * Magnetic storage + // (standard): Must be an integer from 5 to 3072. // - // * - // Magnetic storage (standard): Must be an integer from 5 to 3072. + // PostgreSQL Constraints to the + // amount of storage for each storage type are the following: // - // Oracle - // Constraints to the amount of storage for each storage type are the following: + // * General Purpose + // (SSD) storage (gp2): Must be an integer from 20 to 65536. // + // * Provisioned IOPS + // storage (io1): Must be an integer from 100 to 65536. // - // * General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536. + // * Magnetic storage + // (standard): Must be an integer from 5 to 3072. // + // Oracle Constraints to the amount + // of storage for each storage type are the following: // - // * Provisioned IOPS storage (io1): Must be an integer from 100 to 65536. + // * General Purpose (SSD) + // storage (gp2): Must be an integer from 20 to 65536. // - // * - // Magnetic storage (standard): Must be an integer from 10 to 3072. + // * Provisioned IOPS storage + // (io1): Must be an integer from 100 to 65536. // - // SQL Server - // Constraints to the amount of storage for each storage type are the following: + // * Magnetic storage (standard): + // Must be an integer from 10 to 3072. // + // SQL Server Constraints to the amount of + // storage for each storage type are the following: // - // * General Purpose (SSD) storage (gp2): + // * General Purpose (SSD) + // storage (gp2): // - // * Enterprise and Standard - // editions: Must be an integer from 200 to 16384. + // * Enterprise and Standard editions: Must be an integer from 200 + // to 16384. // - // * Web and Express - // editions: Must be an integer from 20 to 16384. + // * Web and Express editions: Must be an integer from 20 to 16384. // - // * Provisioned IOPS storage - // (io1): + // * + // Provisioned IOPS storage (io1): // - // * Enterprise and Standard editions: Must be an integer from 200 - // to 16384. + // * Enterprise and Standard editions: Must be an + // integer from 200 to 16384. // - // * Web and Express editions: Must be an integer from 100 to - // 16384. + // * Web and Express editions: Must be an integer from + // 100 to 16384. // - // * Magnetic storage (standard): + // * Magnetic storage (standard): // - // * Enterprise and Standard + // * Enterprise and Standard // editions: Must be an integer from 200 to 1024. // - // * Web and Express - // editions: Must be an integer from 20 to 1024. + // * Web and Express editions: Must + // be an integer from 20 to 1024. AllocatedStorage *int32 // A value that indicates whether minor engine upgrades are applied automatically @@ -198,11 +197,11 @@ type CreateDBInstanceInput struct { // disables automated backups. Amazon Aurora Not applicable. The retention period // for automated backups is managed by the DB cluster. Default: 1 Constraints: // + // * + // Must be a value from 0 to 35 // - // * Must be a value from 0 to 35 - // - // * Can't be set to 0 if the DB instance is a - // source to read replicas + // * Can't be set to 0 if the DB instance is a source + // to read replicas BackupRetentionPeriod *int32 // For supported engines, indicates that the DB instance should be associated with @@ -224,71 +223,70 @@ type CreateDBInstanceInput struct { // this parameter isn't specified, no database is created in the DB instance. // Constraints: // - // * Must contain 1 to 64 letters or numbers. + // * Must contain 1 to 64 letters or numbers. // - // * Must begin - // with a letter. Subsequent characters can be letters, underscores, or digits - // (0-9). + // * Must begin with a + // letter. Subsequent characters can be letters, underscores, or digits (0-9). // - // * Can't be a word reserved by the specified database engine + // * + // Can't be a word reserved by the specified database engine // - // MariaDB - // The name of the database to create when the DB instance is created. If this - // parameter isn't specified, no database is created in the DB instance. - // Constraints: + // MariaDB The name of + // the database to create when the DB instance is created. If this parameter isn't + // specified, no database is created in the DB instance. Constraints: // - // * Must contain 1 to 64 letters or numbers. + // * Must + // contain 1 to 64 letters or numbers. // - // * Must begin - // with a letter. Subsequent characters can be letters, underscores, or digits - // (0-9). + // * Must begin with a letter. Subsequent + // characters can be letters, underscores, or digits (0-9). // - // * Can't be a word reserved by the specified database - // engine + // * Can't be a word + // reserved by the specified database engine // - // PostgreSQL The name of the database to create when the DB instance is - // created. If this parameter isn't specified, the default "postgres" database is - // created in the DB instance. Constraints: + // PostgreSQL The name of the database + // to create when the DB instance is created. If this parameter isn't specified, + // the default "postgres" database is created in the DB instance. Constraints: // - // * Must contain 1 to 63 letters, - // numbers, or underscores. + // * + // Must contain 1 to 63 letters, numbers, or underscores. // - // * Must begin with a letter. Subsequent characters - // can be letters, underscores, or digits (0-9). + // * Must begin with a + // letter. Subsequent characters can be letters, underscores, or digits (0-9). // - // * Can't be a word reserved by - // the specified database engine + // * + // Can't be a word reserved by the specified database engine // - // Oracle The Oracle System ID (SID) of the created - // DB instance. If you specify null, the default value ORCL is used. You can't - // specify the string NULL, or any other reserved word, for DBName. Default: ORCL - // Constraints: + // Oracle The Oracle + // System ID (SID) of the created DB instance. If you specify null, the default + // value ORCL is used. You can't specify the string NULL, or any other reserved + // word, for DBName. Default: ORCL Constraints: // - // * Can't be longer than 8 characters + // * Can't be longer than 8 + // characters // - // SQL Server Not - // applicable. Must be null. Amazon Aurora The name of the database to create when - // the primary instance of the DB cluster is created. If this parameter isn't - // specified, no database is created in the DB instance. Constraints: + // SQL Server Not applicable. Must be null. Amazon Aurora The name of + // the database to create when the primary instance of the DB cluster is created. + // If this parameter isn't specified, no database is created in the DB instance. + // Constraints: // - // * Must - // contain 1 to 64 letters or numbers. + // * Must contain 1 to 64 letters or numbers. // - // * Can't be a word reserved by the - // specified database engine + // * Can't be a word + // reserved by the specified database engine DBName *string // The name of the DB parameter group to associate with this DB instance. If you do // not specify a value, then the default DB parameter group for the specified DB // engine and version is used. Constraints: // - // * Must be 1 to 255 letters, - // numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, + // or hyphens. // - // * First character must be a letter + // * First character must be a letter // - // * Can't end - // with a hyphen or contain two consecutive hyphens + // * Can't end with a hyphen or + // contain two consecutive hyphens DBParameterGroupName *string // A list of DB security groups to associate with this DB instance. Default: The @@ -340,26 +338,26 @@ type CreateDBInstanceInput struct { // Amazon Aurora Not applicable. Mapping AWS IAM accounts to database accounts is // managed by the DB cluster. MySQL // - // * For MySQL 5.6, minor version 5.6.34 or + // * For MySQL 5.6, minor version 5.6.34 or // higher // - // * For MySQL 5.7, minor version 5.7.16 or higher + // * For MySQL 5.7, minor version 5.7.16 or higher // - // * For MySQL - // 8.0, minor version 8.0.16 or higher + // * For MySQL 8.0, minor + // version 8.0.16 or higher // // PostgreSQL // - // * For PostgreSQL 9.5, minor - // version 9.5.15 or higher + // * For PostgreSQL 9.5, minor version 9.5.15 + // or higher // - // * For PostgreSQL 9.6, minor version 9.6.11 or - // higher + // * For PostgreSQL 9.6, minor version 9.6.11 or higher // - // * PostgreSQL 10.6, 10.7, and 10.9 + // * PostgreSQL + // 10.6, 10.7, and 10.9 // - // For more information, see IAM - // Database Authentication for MySQL and PostgreSQL + // For more information, see IAM Database Authentication for + // MySQL and PostgreSQL // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html) // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool @@ -429,64 +427,62 @@ type CreateDBInstanceInput struct { // The name for the master user. Amazon Aurora Not applicable. The name for the // master user is managed by the DB cluster. MariaDB Constraints: // - // * Required - // for MariaDB. + // * Required for + // MariaDB. // - // * Must be 1 to 16 letters or numbers. + // * Must be 1 to 16 letters or numbers. // - // * Can't be a - // reserved word for the chosen database engine. + // * Can't be a reserved word for + // the chosen database engine. // - // Microsoft SQL Server - // Constraints: + // Microsoft SQL Server Constraints: // - // * Required for SQL Server. + // * Required for + // SQL Server. // - // * Must be 1 to 128 letters or - // numbers. + // * Must be 1 to 128 letters or numbers. // - // * The first character must be a letter. + // * The first character must + // be a letter. // - // * Can't be a reserved - // word for the chosen database engine. + // * Can't be a reserved word for the chosen database engine. // - // MySQL Constraints: + // MySQL + // Constraints: // - // * Required for - // MySQL. + // * Required for MySQL. // - // * Must be 1 to 16 letters or numbers. + // * Must be 1 to 16 letters or numbers. // - // * First character must be - // a letter. + // * + // First character must be a letter. // - // * Can't be a reserved word for the chosen database - // engine. + // * Can't be a reserved word for the chosen + // database engine. // // Oracle Constraints: // - // * Required for Oracle. + // * Required for Oracle. // - // * Must be 1 to 30 + // * Must be 1 to 30 // letters or numbers. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't be a - // reserved word for the chosen database engine. + // * Can't be a reserved + // word for the chosen database engine. // // PostgreSQL Constraints: // - // * - // Required for PostgreSQL. + // * Required for + // PostgreSQL. // - // * Must be 1 to 63 letters or numbers. + // * Must be 1 to 63 letters or numbers. // - // * First - // character must be a letter. + // * First character must be a + // letter. // - // * Can't be a reserved word for the chosen - // database engine. + // * Can't be a reserved word for the chosen database engine. MasterUsername *string // The upper limit to which Amazon RDS can automatically scale the storage of the @@ -555,16 +551,15 @@ type CreateDBInstanceInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow) // in the Amazon RDS User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The time range each week during which system maintenance can occur, in Universal @@ -599,22 +594,22 @@ type CreateDBInstanceInput struct { // DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, the // following applies: // - // * If the default VPC in the target region doesn’t have - // an Internet gateway attached to it, the DB instance is private. + // * If the default VPC in the target region doesn’t have an + // Internet gateway attached to it, the DB instance is private. // - // * If the - // default VPC in the target region has an Internet gateway attached to it, the DB - // instance is public. + // * If the default + // VPC in the target region has an Internet gateway attached to it, the DB instance + // is public. // - // If DBSubnetGroupName is specified, and PubliclyAccessible - // isn't specified, the following applies: + // If DBSubnetGroupName is specified, and PubliclyAccessible isn't + // specified, the following applies: // - // * If the subnets are part of a VPC - // that doesn’t have an Internet gateway attached to it, the DB instance is - // private. + // * If the subnets are part of a VPC that + // doesn’t have an Internet gateway attached to it, the DB instance is private. // - // * If the subnets are part of a VPC that has an Internet gateway - // attached to it, the DB instance is public. + // * + // If the subnets are part of a VPC that has an Internet gateway attached to it, + // the DB instance is public. PubliclyAccessible *bool // A value that indicates whether the DB instance is encrypted. By default, it diff --git a/service/rds/api_op_CreateDBInstanceReadReplica.go b/service/rds/api_op_CreateDBInstanceReadReplica.go index e3e0432fc3f..ad02502b986 100644 --- a/service/rds/api_op_CreateDBInstanceReadReplica.go +++ b/service/rds/api_op_CreateDBInstanceReadReplica.go @@ -53,37 +53,37 @@ type CreateDBInstanceReadReplicaInput struct { // The identifier of the DB instance that will act as the source for the read // replica. Each DB instance can have up to five read replicas. Constraints: // - // * + // * // Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL // Server DB instance. // - // * Can specify a DB instance that is a MySQL read - // replica only if the source is running MySQL 5.6 or later. + // * Can specify a DB instance that is a MySQL read replica + // only if the source is running MySQL 5.6 or later. // - // * For the - // limitations of Oracle read replicas, see Read Replica Limitations with Oracle + // * For the limitations of + // Oracle read replicas, see Read Replica Limitations with Oracle // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) // in the Amazon RDS User Guide. // - // * For the limitations of SQL Server read + // * For the limitations of SQL Server read // replicas, see Read Replica Limitations with Microsoft SQL Server // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.Limitations.html) // in the Amazon RDS User Guide. // - // * Can specify a PostgreSQL DB instance only - // if the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for + // * Can specify a PostgreSQL DB instance only if + // the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for // cross-region replication). // - // * The specified DB instance must have automatic + // * The specified DB instance must have automatic // backups enabled, that is, its backup retention period must be greater than 0. // + // * + // If the source DB instance is in the same AWS Region as the read replica, specify + // a valid DB instance identifier. // - // * If the source DB instance is in the same AWS Region as the read replica, - // specify a valid DB instance identifier. - // - // * If the source DB instance is in a - // different AWS Region from the read replica, specify a valid DB instance ARN. For - // more information, see Constructing an ARN for Amazon RDS + // * If the source DB instance is in a different + // AWS Region from the read replica, specify a valid DB instance ARN. For more + // information, see Constructing an ARN for Amazon RDS // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. This doesn't apply to SQL Server, which doesn't // support cross-region replicas. @@ -120,38 +120,38 @@ type CreateDBInstanceReadReplicaInput struct { // replica. Currently, specifying a parameter group for this operation is only // supported for Oracle DB instances. Constraints: // - // * Must be 1 to 255 letters, + // * Must be 1 to 255 letters, // numbers, or hyphens. // - // * First character must be a letter + // * First character must be a letter // - // * Can't end - // with a hyphen or contain two consecutive hyphens + // * Can't end with a + // hyphen or contain two consecutive hyphens DBParameterGroupName *string // Specifies a DB subnet group for the DB instance. The new DB instance is created // in the VPC associated with the DB subnet group. If no DB subnet group is // specified, then the new DB instance isn't created in a VPC. Constraints: // - // * - // Can only be specified if the source DB instance identifier specifies a DB - // instance in another AWS Region. - // - // * If supplied, must match the name of an - // existing DBSubnetGroup. + // * Can + // only be specified if the source DB instance identifier specifies a DB instance + // in another AWS Region. // - // * The specified DB subnet group must be in the same - // AWS Region in which the operation is running. + // * If supplied, must match the name of an existing + // DBSubnetGroup. // - // * All read replicas in one - // AWS Region that are created from the same source DB instance must either:> + // * The specified DB subnet group must be in the same AWS Region + // in which the operation is running. // + // * All read replicas in one AWS Region that + // are created from the same source DB instance must either:> // - // * Specify DB subnet groups from the same VPC. All these read replicas are - // created in the same VPC. + // * Specify DB subnet + // groups from the same VPC. All these read replicas are created in the same + // VPC. // - // * Not specify a DB subnet group. All these - // read replicas are created outside of any VPC. + // * Not specify a DB subnet group. All these read replicas are created + // outside of any VPC. // // Example: mySubnetgroup DBSubnetGroupName *string @@ -267,7 +267,7 @@ type CreateDBInstanceReadReplicaInput struct { // Region that contains the encrypted source DB instance. The presigned URL request // must contain the following parameter values: // - // * DestinationRegion - The AWS + // * DestinationRegion - The AWS // Region that the encrypted read replica is created in. This AWS Region is the // same one where the CreateDBInstanceReadReplica action is called that contains // this presigned URL. For example, if you create an encrypted DB instance in the @@ -278,12 +278,12 @@ type CreateDBInstanceReadReplicaInput struct { // example, the DestinationRegion in the presigned URL must be set to the us-east-1 // AWS Region. // - // * KmsKeyId - The AWS KMS key identifier for the key to use to + // * KmsKeyId - The AWS KMS key identifier for the key to use to // encrypt the read replica in the destination AWS Region. This is the same // identifier for both the CreateDBInstanceReadReplica action that is called in the // destination AWS Region, and the action contained in the presigned URL. // - // * + // * // SourceDBInstanceIdentifier - The DB instance identifier for the encrypted DB // instance to be replicated. This identifier must be in the Amazon Resource Name // (ARN) format for the source AWS Region. For example, if you are creating an diff --git a/service/rds/api_op_CreateDBParameterGroup.go b/service/rds/api_op_CreateDBParameterGroup.go index 32d17033c07..b1da0ea6589 100644 --- a/service/rds/api_op_CreateDBParameterGroup.go +++ b/service/rds/api_op_CreateDBParameterGroup.go @@ -58,16 +58,16 @@ type CreateDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * Must be 1 to 255 - // letters, numbers, or hyphens. + // * Must be 1 to 255 letters, + // numbers, or hyphens. // - // * First character must be a letter + // * First character must be a letter // - // * - // Can't end with a hyphen or contain two consecutive hyphens + // * Can't end with a + // hyphen or contain two consecutive hyphens // - // This value is stored - // as a lowercase string. + // This value is stored as a lowercase + // string. // // This member is required. DBParameterGroupName *string diff --git a/service/rds/api_op_CreateDBSecurityGroup.go b/service/rds/api_op_CreateDBSecurityGroup.go index 1c10d01a7c9..e07c268800a 100644 --- a/service/rds/api_op_CreateDBSecurityGroup.go +++ b/service/rds/api_op_CreateDBSecurityGroup.go @@ -40,15 +40,15 @@ type CreateDBSecurityGroupInput struct { // The name for the DB security group. This value is stored as a lowercase string. // Constraints: // - // * Must be 1 to 255 letters, numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, or hyphens. // - // * First + // * First // character must be a letter // - // * Can't end with a hyphen or contain two - // consecutive hyphens + // * Can't end with a hyphen or contain two consecutive + // hyphens // - // * Must not be "Default" + // * Must not be "Default" // // Example: mysecuritygroup // diff --git a/service/rds/api_op_CreateDBSnapshot.go b/service/rds/api_op_CreateDBSnapshot.go index 33a677b7164..ddc16e190ee 100644 --- a/service/rds/api_op_CreateDBSnapshot.go +++ b/service/rds/api_op_CreateDBSnapshot.go @@ -34,23 +34,23 @@ type CreateDBSnapshotInput struct { // The identifier of the DB instance that you want to create the snapshot of. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string // The identifier for the DB snapshot. Constraints: // - // * Can't be null, empty, or + // * Can't be null, empty, or // blank // - // * Must contain from 1 to 255 letters, numbers, or hyphens + // * Must contain from 1 to 255 letters, numbers, or hyphens // - // * - // First character must be a letter + // * First + // character must be a letter // - // * Can't end with a hyphen or contain two - // consecutive hyphens + // * Can't end with a hyphen or contain two consecutive + // hyphens // // Example: my-snapshot-id // diff --git a/service/rds/api_op_CreateEventSubscription.go b/service/rds/api_op_CreateEventSubscription.go index 91a9a4aa5c5..1ee45cc5a8f 100644 --- a/service/rds/api_op_CreateEventSubscription.go +++ b/service/rds/api_op_CreateEventSubscription.go @@ -76,28 +76,27 @@ type CreateEventSubscriptionInput struct { // begin with a letter and must contain only ASCII letters, digits, and hyphens. It // can't end with a hyphen or contain two consecutive hyphens. Constraints: // - // * - // If a SourceIds value is supplied, SourceType must also be provided. + // * If a + // SourceIds value is supplied, SourceType must also be provided. // - // * If - // the source type is a DB instance, a DBInstanceIdentifier value must be - // supplied. - // - // * If the source type is a DB cluster, a DBClusterIdentifier value - // must be supplied. + // * If the source + // type is a DB instance, a DBInstanceIdentifier value must be supplied. // - // * If the source type is a DB parameter group, a - // DBParameterGroupName value must be supplied. + // * If the + // source type is a DB cluster, a DBClusterIdentifier value must be supplied. // - // * If the source type is a DB - // security group, a DBSecurityGroupName value must be supplied. + // * If + // the source type is a DB parameter group, a DBParameterGroupName value must be + // supplied. // - // * If the - // source type is a DB snapshot, a DBSnapshotIdentifier value must be supplied. + // * If the source type is a DB security group, a DBSecurityGroupName + // value must be supplied. // + // * If the source type is a DB snapshot, a + // DBSnapshotIdentifier value must be supplied. // - // * If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier - // value must be supplied. + // * If the source type is a DB + // cluster snapshot, a DBClusterSnapshotIdentifier value must be supplied. SourceIds []*string // The type of source that is generating the events. For example, if you want to be diff --git a/service/rds/api_op_CreateOptionGroup.go b/service/rds/api_op_CreateOptionGroup.go index abe94ff9a8a..0c7ff7d3f8b 100644 --- a/service/rds/api_op_CreateOptionGroup.go +++ b/service/rds/api_op_CreateOptionGroup.go @@ -49,14 +49,13 @@ type CreateOptionGroupInput struct { // Specifies the name of the option group to be created. Constraints: // - // * Must - // be 1 to 255 letters, numbers, or hyphens + // * Must be 1 + // to 255 letters, numbers, or hyphens // - // * First character must be a - // letter + // * First character must be a letter // - // * Can't end with a hyphen or contain two consecutive - // hyphens + // * Can't + // end with a hyphen or contain two consecutive hyphens // // Example: myoptiongroup // diff --git a/service/rds/api_op_DeleteDBCluster.go b/service/rds/api_op_DeleteDBCluster.go index e85f272982b..2ac314c64b4 100644 --- a/service/rds/api_op_DeleteDBCluster.go +++ b/service/rds/api_op_DeleteDBCluster.go @@ -38,7 +38,7 @@ type DeleteDBClusterInput struct { // The DB cluster identifier for the DB cluster to be deleted. This parameter isn't // case-sensitive. Constraints: // - // * Must match an existing DBClusterIdentifier. + // * Must match an existing DBClusterIdentifier. // // This member is required. DBClusterIdentifier *string @@ -48,13 +48,13 @@ type DeleteDBClusterInput struct { // creation of a final DB cluster snapshot with the SkipFinalShapshot parameter // results in an error. Constraints: // - // * Must be 1 to 255 letters, numbers, or + // * Must be 1 to 255 letters, numbers, or // hyphens. // - // * First character must be a letter + // * First character must be a letter // - // * Can't end with a hyphen - // or contain two consecutive hyphens + // * Can't end with a hyphen or + // contain two consecutive hyphens FinalDBSnapshotIdentifier *string // A value that indicates whether to skip the creation of a final DB cluster diff --git a/service/rds/api_op_DeleteDBClusterEndpoint.go b/service/rds/api_op_DeleteDBClusterEndpoint.go index a6a7d8e6aea..5421299c770 100644 --- a/service/rds/api_op_DeleteDBClusterEndpoint.go +++ b/service/rds/api_op_DeleteDBClusterEndpoint.go @@ -40,17 +40,17 @@ type DeleteDBClusterEndpointInput struct { // Aurora DB cluster. This data type is used as a response element in the following // actions: // -// * CreateDBClusterEndpoint +// * CreateDBClusterEndpoint // -// * DescribeDBClusterEndpoints +// * DescribeDBClusterEndpoints // -// * +// * // ModifyDBClusterEndpoint // -// * DeleteDBClusterEndpoint +// * DeleteDBClusterEndpoint // -// For the data structure -// that represents Amazon RDS DB instance endpoints, see Endpoint. +// For the data structure that +// represents Amazon RDS DB instance endpoints, see Endpoint. type DeleteDBClusterEndpointOutput struct { // The type associated with a custom endpoint. One of: READER, WRITER, ANY. diff --git a/service/rds/api_op_DeleteDBClusterParameterGroup.go b/service/rds/api_op_DeleteDBClusterParameterGroup.go index 478cd851f39..759789db96e 100644 --- a/service/rds/api_op_DeleteDBClusterParameterGroup.go +++ b/service/rds/api_op_DeleteDBClusterParameterGroup.go @@ -35,13 +35,13 @@ type DeleteDBClusterParameterGroupInput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must be the name - // of an existing DB cluster parameter group. + // * Must be the name of + // an existing DB cluster parameter group. // - // * You can't delete a default DB - // cluster parameter group. + // * You can't delete a default DB cluster + // parameter group. // - // * Can't be associated with any DB clusters. + // * Can't be associated with any DB clusters. // // This member is required. DBClusterParameterGroupName *string diff --git a/service/rds/api_op_DeleteDBInstance.go b/service/rds/api_op_DeleteDBInstance.go index 0ec7db6984b..fc703b7cb13 100644 --- a/service/rds/api_op_DeleteDBInstance.go +++ b/service/rds/api_op_DeleteDBInstance.go @@ -24,11 +24,11 @@ import ( // part of an Amazon Aurora DB cluster, you can't delete the DB instance if both of // the following conditions are true: // -// * The DB cluster is a read replica of +// * The DB cluster is a read replica of // another Amazon Aurora DB cluster. // -// * The DB instance is the only instance in -// the DB cluster. +// * The DB instance is the only instance in the +// DB cluster. // // To delete a DB instance in this case, first call the // PromoteReadReplicaDBCluster API action to promote the DB cluster so it's no @@ -55,7 +55,7 @@ type DeleteDBInstanceInput struct { // The DB instance identifier for the DB instance to be deleted. This parameter // isn't case-sensitive. Constraints: // - // * Must match the name of an existing DB + // * Must match the name of an existing DB // instance. // // This member is required. @@ -71,15 +71,15 @@ type DeleteDBInstanceInput struct { // specifying to skip final DB snapshot creation in SkipFinalShapshot results in an // error. Constraints: // - // * Must be 1 to 255 letters or numbers. + // * Must be 1 to 255 letters or numbers. // - // * First - // character must be a letter. + // * First character + // must be a letter. // - // * Can't end with a hyphen or contain two - // consecutive hyphens. + // * Can't end with a hyphen or contain two consecutive + // hyphens. // - // * Can't be specified when deleting a read replica. + // * Can't be specified when deleting a read replica. FinalDBSnapshotIdentifier *string // A value that indicates whether to skip the creation of a final DB snapshot diff --git a/service/rds/api_op_DeleteDBParameterGroup.go b/service/rds/api_op_DeleteDBParameterGroup.go index 43fe12b78b2..b5e22ebdbd5 100644 --- a/service/rds/api_op_DeleteDBParameterGroup.go +++ b/service/rds/api_op_DeleteDBParameterGroup.go @@ -32,13 +32,13 @@ type DeleteDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * Must be the name of an + // * Must be the name of an // existing DB parameter group // - // * You can't delete a default DB parameter - // group + // * You can't delete a default DB parameter group // - // * Can't be associated with any DB instances + // * + // Can't be associated with any DB instances // // This member is required. DBParameterGroupName *string diff --git a/service/rds/api_op_DeleteDBSecurityGroup.go b/service/rds/api_op_DeleteDBSecurityGroup.go index 12eed4a2445..a94964e0987 100644 --- a/service/rds/api_op_DeleteDBSecurityGroup.go +++ b/service/rds/api_op_DeleteDBSecurityGroup.go @@ -33,15 +33,15 @@ type DeleteDBSecurityGroupInput struct { // The name of the DB security group to delete. You can't delete the default DB // security group. Constraints: // - // * Must be 1 to 255 letters, numbers, or + // * Must be 1 to 255 letters, numbers, or // hyphens. // - // * First character must be a letter + // * First character must be a letter // - // * Can't end with a hyphen - // or contain two consecutive hyphens + // * Can't end with a hyphen or + // contain two consecutive hyphens // - // * Must not be "Default" + // * Must not be "Default" // // This member is required. DBSecurityGroupName *string diff --git a/service/rds/api_op_DescribeCertificates.go b/service/rds/api_op_DescribeCertificates.go index b0d1bd53480..f673c903939 100644 --- a/service/rds/api_op_DescribeCertificates.go +++ b/service/rds/api_op_DescribeCertificates.go @@ -34,7 +34,7 @@ type DescribeCertificatesInput struct { // information for only the identified certificate is returned. This parameter // isn't case-sensitive. Constraints: // - // * Must match an existing + // * Must match an existing // CertificateIdentifier. CertificateIdentifier *string diff --git a/service/rds/api_op_DescribeDBClusterBacktracks.go b/service/rds/api_op_DescribeDBClusterBacktracks.go index 4eb5ebcaae7..82879e3a1fe 100644 --- a/service/rds/api_op_DescribeDBClusterBacktracks.go +++ b/service/rds/api_op_DescribeDBClusterBacktracks.go @@ -37,13 +37,13 @@ type DescribeDBClusterBacktracksInput struct { // The DB cluster identifier of the DB cluster to be described. This parameter is // stored as a lowercase string. Constraints: // - // * Must contain from 1 to 63 + // * Must contain from 1 to 63 // alphanumeric characters or hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // - // * Can't end with a hyphen or contain two consecutive hyphens. + // * + // Can't end with a hyphen or contain two consecutive hyphens. // // Example: // my-cluster1 @@ -54,9 +54,9 @@ type DescribeDBClusterBacktracksInput struct { // If specified, this value is the backtrack identifier of the backtrack to be // described. Constraints: // - // * Must contain a valid universally unique - // identifier (UUID). For more information about UUIDs, see A Universally Unique - // Identifier (UUID) URN Namespace (http://www.ietf.org/rfc/rfc4122.txt). + // * Must contain a valid universally unique identifier + // (UUID). For more information about UUIDs, see A Universally Unique Identifier + // (UUID) URN Namespace (http://www.ietf.org/rfc/rfc4122.txt). // // Example: // 123e4567-e89b-12d3-a456-426655440000 @@ -65,24 +65,24 @@ type DescribeDBClusterBacktracksInput struct { // A filter that specifies one or more DB clusters to describe. Supported filters // include the following: // - // * db-cluster-backtrack-id - Accepts backtrack + // * db-cluster-backtrack-id - Accepts backtrack // identifiers. The results list includes information about only the backtracks // identified by these identifiers. // - // * db-cluster-backtrack-status - Accepts - // any of the following backtrack status values: + // * db-cluster-backtrack-status - Accepts any of + // the following backtrack status values: // - // * applying + // * applying // - // * - // completed + // * completed // - // * failed + // * failed // - // * pending + // * + // pending // - // The results list includes - // information about only the backtracks identified by these values. + // The results list includes information about only the backtracks + // identified by these values. Filters []*types.Filter // An optional pagination token provided by a previous DescribeDBClusterBacktracks diff --git a/service/rds/api_op_DescribeDBClusterParameterGroups.go b/service/rds/api_op_DescribeDBClusterParameterGroups.go index 40344a9330e..e3d23d35459 100644 --- a/service/rds/api_op_DescribeDBClusterParameterGroups.go +++ b/service/rds/api_op_DescribeDBClusterParameterGroups.go @@ -38,7 +38,7 @@ type DescribeDBClusterParameterGroupsInput struct { // The name of a specific DB cluster parameter group to return details for. // Constraints: // - // * If supplied, must match the name of an existing + // * If supplied, must match the name of an existing // DBClusterParameterGroup. DBClusterParameterGroupName *string diff --git a/service/rds/api_op_DescribeDBClusterParameters.go b/service/rds/api_op_DescribeDBClusterParameters.go index f749e2b379a..392d7c296cd 100644 --- a/service/rds/api_op_DescribeDBClusterParameters.go +++ b/service/rds/api_op_DescribeDBClusterParameters.go @@ -36,7 +36,7 @@ type DescribeDBClusterParametersInput struct { // The name of a specific DB cluster parameter group to return parameter details // for. Constraints: // - // * If supplied, must match the name of an existing + // * If supplied, must match the name of an existing // DBClusterParameterGroup. // // This member is required. diff --git a/service/rds/api_op_DescribeDBClusterSnapshots.go b/service/rds/api_op_DescribeDBClusterSnapshots.go index 5c25d83fdc3..b35d5f7b2fd 100644 --- a/service/rds/api_op_DescribeDBClusterSnapshots.go +++ b/service/rds/api_op_DescribeDBClusterSnapshots.go @@ -37,7 +37,7 @@ type DescribeDBClusterSnapshotsInput struct { // parameter can't be used in conjunction with the DBClusterSnapshotIdentifier // parameter. This parameter isn't case-sensitive. Constraints: // - // * If supplied, + // * If supplied, // must match the identifier of an existing DBCluster. DBClusterIdentifier *string @@ -45,26 +45,26 @@ type DescribeDBClusterSnapshotsInput struct { // used in conjunction with the DBClusterIdentifier parameter. This value is stored // as a lowercase string. Constraints: // - // * If supplied, must match the - // identifier of an existing DBClusterSnapshot. + // * If supplied, must match the identifier of + // an existing DBClusterSnapshot. // - // * If this identifier is for an - // automated snapshot, the SnapshotType parameter must also be specified. + // * If this identifier is for an automated + // snapshot, the SnapshotType parameter must also be specified. DBClusterSnapshotIdentifier *string // A filter that specifies one or more DB cluster snapshots to describe. Supported // filters: // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster - // Amazon Resource Names (ARNs). + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). // - // * db-cluster-snapshot-id - Accepts DB cluster - // snapshot identifiers. + // * db-cluster-snapshot-id - Accepts DB cluster snapshot + // identifiers. // - // * snapshot-type - Accepts types of DB cluster - // snapshots. + // * snapshot-type - Accepts types of DB cluster snapshots. // - // * engine - Accepts names of database engines. + // * engine + // - Accepts names of database engines. Filters []*types.Filter // A value that indicates whether to include manual DB cluster snapshots that are @@ -94,27 +94,27 @@ type DescribeDBClusterSnapshotsInput struct { // The type of DB cluster snapshots to be returned. You can specify one of the // following values: // - // * automated - Return all DB cluster snapshots that have - // been automatically taken by Amazon RDS for my AWS account. + // * automated - Return all DB cluster snapshots that have been + // automatically taken by Amazon RDS for my AWS account. // - // * manual - - // Return all DB cluster snapshots that have been taken by my AWS account. + // * manual - Return all DB + // cluster snapshots that have been taken by my AWS account. // - // * - // shared - Return all manual DB cluster snapshots that have been shared to my AWS - // account. + // * shared - Return all + // manual DB cluster snapshots that have been shared to my AWS account. // - // * public - Return all DB cluster snapshots that have been marked - // as public. + // * public - + // Return all DB cluster snapshots that have been marked as public. // - // If you don't specify a SnapshotType value, then both automated and - // manual DB cluster snapshots are returned. You can include shared DB cluster - // snapshots with these results by enabling the IncludeShared parameter. You can - // include public DB cluster snapshots with these results by enabling the - // IncludePublic parameter. The IncludeShared and IncludePublic parameters don't - // apply for SnapshotType values of manual or automated. The IncludePublic - // parameter doesn't apply when SnapshotType is set to shared. The IncludeShared - // parameter doesn't apply when SnapshotType is set to public. + // If you don't + // specify a SnapshotType value, then both automated and manual DB cluster + // snapshots are returned. You can include shared DB cluster snapshots with these + // results by enabling the IncludeShared parameter. You can include public DB + // cluster snapshots with these results by enabling the IncludePublic parameter. + // The IncludeShared and IncludePublic parameters don't apply for SnapshotType + // values of manual or automated. The IncludePublic parameter doesn't apply when + // SnapshotType is set to shared. The IncludeShared parameter doesn't apply when + // SnapshotType is set to public. SnapshotType *string } diff --git a/service/rds/api_op_DescribeDBClusters.go b/service/rds/api_op_DescribeDBClusters.go index 45b8781ba39..0803ec047cf 100644 --- a/service/rds/api_op_DescribeDBClusters.go +++ b/service/rds/api_op_DescribeDBClusters.go @@ -38,16 +38,16 @@ type DescribeDBClustersInput struct { // information from only the specific DB cluster is returned. This parameter isn't // case-sensitive. Constraints: // - // * If supplied, must match an existing + // * If supplied, must match an existing // DBClusterIdentifier. DBClusterIdentifier *string // A filter that specifies one or more DB clusters to describe. Supported // filters: // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster - // Amazon Resource Names (ARNs). The results list will only include information - // about the DB clusters identified by these ARNs. + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). The results list will only include information about the + // DB clusters identified by these ARNs. Filters []*types.Filter // Optional Boolean parameter that specifies whether the output includes diff --git a/service/rds/api_op_DescribeDBEngineVersions.go b/service/rds/api_op_DescribeDBEngineVersions.go index 72d724dfbe8..0a60f96b200 100644 --- a/service/rds/api_op_DescribeDBEngineVersions.go +++ b/service/rds/api_op_DescribeDBEngineVersions.go @@ -32,7 +32,7 @@ type DescribeDBEngineVersionsInput struct { // The name of a specific DB parameter group family to return details for. // Constraints: // - // * If supplied, must match an existing DBParameterGroupFamily. + // * If supplied, must match an existing DBParameterGroupFamily. DBParameterGroupFamily *string // A value that indicates whether only the default version of the specified engine diff --git a/service/rds/api_op_DescribeDBInstanceAutomatedBackups.go b/service/rds/api_op_DescribeDBInstanceAutomatedBackups.go index e404d977dcb..2c8a7fd4c39 100644 --- a/service/rds/api_op_DescribeDBInstanceAutomatedBackups.go +++ b/service/rds/api_op_DescribeDBInstanceAutomatedBackups.go @@ -47,29 +47,29 @@ type DescribeDBInstanceAutomatedBackupsInput struct { // A filter that specifies which resources to return based on status. Supported // filters are the following: // - // * status + // * status // - // * active - automated backups - // for current instances - // - // * retained - automated backups for deleted + // * active - automated backups for current // instances // - // * creating - automated backups that are waiting for the first - // automated snapshot to be available + // * retained - automated backups for deleted instances // - // * db-instance-id - Accepts DB instance - // identifiers and Amazon Resource Names (ARNs) for DB instances. The results list - // includes only information about the DB instance automated backupss identified by - // these ARNs. + // * creating - + // automated backups that are waiting for the first automated snapshot to be + // available // - // * dbi-resource-id - Accepts DB instance resource identifiers - // and DB Amazon Resource Names (ARNs) for DB instances. The results list includes - // only information about the DB instance resources identified by these + // * db-instance-id - Accepts DB instance identifiers and Amazon + // Resource Names (ARNs) for DB instances. The results list includes only + // information about the DB instance automated backupss identified by these // ARNs. // - // Returns all resources by default. The status for each resource is - // specified in the response. + // * dbi-resource-id - Accepts DB instance resource identifiers and DB + // Amazon Resource Names (ARNs) for DB instances. The results list includes only + // information about the DB instance resources identified by these ARNs. + // + // Returns + // all resources by default. The status for each resource is specified in the + // response. Filters []*types.Filter // The pagination token provided in the previous request. If this parameter is diff --git a/service/rds/api_op_DescribeDBInstances.go b/service/rds/api_op_DescribeDBInstances.go index ed559535f7d..1c5dae8e170 100644 --- a/service/rds/api_op_DescribeDBInstances.go +++ b/service/rds/api_op_DescribeDBInstances.go @@ -36,33 +36,33 @@ type DescribeDBInstancesInput struct { // information from only the specific DB instance is returned. This parameter isn't // case-sensitive. Constraints: // - // * If supplied, must match the identifier of an + // * If supplied, must match the identifier of an // existing DBInstance. DBInstanceIdentifier *string // A filter that specifies one or more DB instances to describe. Supported // filters: // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster - // Amazon Resource Names (ARNs). The results list will only include information - // about the DB instances associated with the DB clusters identified by these - // ARNs. + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). The results list will only include information about the + // DB instances associated with the DB clusters identified by these ARNs. // - // * db-instance-id - Accepts DB instance identifiers and DB instance - // Amazon Resource Names (ARNs). The results list will only include information - // about the DB instances identified by these ARNs. + // * + // db-instance-id - Accepts DB instance identifiers and DB instance Amazon Resource + // Names (ARNs). The results list will only include information about the DB + // instances identified by these ARNs. // - // * dbi-resource-id - - // Accepts DB instance resource identifiers. The results list will only include - // information about the DB instances identified by these DB instance resource - // identifiers. + // * dbi-resource-id - Accepts DB instance + // resource identifiers. The results list will only include information about the + // DB instances identified by these DB instance resource identifiers. // - // * domain - Accepts Active Directory directory IDs. The results - // list will only include information about the DB instances associated with these - // domains. + // * domain - + // Accepts Active Directory directory IDs. The results list will only include + // information about the DB instances associated with these domains. // - // * engine - Accepts engine names. The results list will only - // include information about the DB instances for these engines. + // * engine - + // Accepts engine names. The results list will only include information about the + // DB instances for these engines. Filters []*types.Filter // An optional pagination token provided by a previous DescribeDBInstances request. diff --git a/service/rds/api_op_DescribeDBLogFiles.go b/service/rds/api_op_DescribeDBLogFiles.go index f1d2231a375..244434b548f 100644 --- a/service/rds/api_op_DescribeDBLogFiles.go +++ b/service/rds/api_op_DescribeDBLogFiles.go @@ -33,7 +33,7 @@ type DescribeDBLogFilesInput struct { // The customer-assigned name of the DB instance that contains the log files you // want to list. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBInstance. // // This member is required. diff --git a/service/rds/api_op_DescribeDBParameterGroups.go b/service/rds/api_op_DescribeDBParameterGroups.go index d78359d8d7c..560ba2bf53d 100644 --- a/service/rds/api_op_DescribeDBParameterGroups.go +++ b/service/rds/api_op_DescribeDBParameterGroups.go @@ -34,8 +34,8 @@ type DescribeDBParameterGroupsInput struct { // The name of a specific DB parameter group to return details for. Constraints: // - // - // * If supplied, must match the name of an existing DBClusterParameterGroup. + // * + // If supplied, must match the name of an existing DBClusterParameterGroup. DBParameterGroupName *string // This parameter isn't currently supported. diff --git a/service/rds/api_op_DescribeDBParameters.go b/service/rds/api_op_DescribeDBParameters.go index 46c3fb07f02..f2f1dfceb88 100644 --- a/service/rds/api_op_DescribeDBParameters.go +++ b/service/rds/api_op_DescribeDBParameters.go @@ -31,8 +31,8 @@ type DescribeDBParametersInput struct { // The name of a specific DB parameter group to return details for. Constraints: // - // - // * If supplied, must match the name of an existing DBParameterGroup. + // * + // If supplied, must match the name of an existing DBParameterGroup. // // This member is required. DBParameterGroupName *string diff --git a/service/rds/api_op_DescribeDBSnapshots.go b/service/rds/api_op_DescribeDBSnapshots.go index 8c3f3d2285b..5d2b5e7633c 100644 --- a/service/rds/api_op_DescribeDBSnapshots.go +++ b/service/rds/api_op_DescribeDBSnapshots.go @@ -34,19 +34,19 @@ type DescribeDBSnapshotsInput struct { // parameter can't be used in conjunction with DBSnapshotIdentifier. This parameter // isn't case-sensitive. Constraints: // - // * If supplied, must match the identifier - // of an existing DBInstance. + // * If supplied, must match the identifier of + // an existing DBInstance. DBInstanceIdentifier *string // A specific DB snapshot identifier to describe. This parameter can't be used in // conjunction with DBInstanceIdentifier. This value is stored as a lowercase // string. Constraints: // - // * If supplied, must match the identifier of an - // existing DBSnapshot. + // * If supplied, must match the identifier of an existing + // DBSnapshot. // - // * If this identifier is for an automated snapshot, the - // SnapshotType parameter must also be specified. + // * If this identifier is for an automated snapshot, the SnapshotType + // parameter must also be specified. DBSnapshotIdentifier *string // A specific DB resource ID to describe. @@ -55,19 +55,19 @@ type DescribeDBSnapshotsInput struct { // A filter that specifies one or more DB snapshots to describe. Supported // filters: // - // * db-instance-id - Accepts DB instance identifiers and DB instance + // * db-instance-id - Accepts DB instance identifiers and DB instance // Amazon Resource Names (ARNs). // - // * db-snapshot-id - Accepts DB snapshot + // * db-snapshot-id - Accepts DB snapshot // identifiers. // - // * dbi-resource-id - Accepts identifiers of source DB - // instances. + // * dbi-resource-id - Accepts identifiers of source DB instances. // - // * snapshot-type - Accepts types of DB snapshots. + // * + // snapshot-type - Accepts types of DB snapshots. // - // * engine - - // Accepts names of database engines. + // * engine - Accepts names of + // database engines. Filters []*types.Filter // A value that indicates whether to include manual DB cluster snapshots that are @@ -97,21 +97,21 @@ type DescribeDBSnapshotsInput struct { // The type of snapshots to be returned. You can specify one of the following // values: // - // * automated - Return all DB snapshots that have been automatically + // * automated - Return all DB snapshots that have been automatically // taken by Amazon RDS for my AWS account. // - // * manual - Return all DB snapshots - // that have been taken by my AWS account. + // * manual - Return all DB snapshots that + // have been taken by my AWS account. // - // * shared - Return all manual DB - // snapshots that have been shared to my AWS account. + // * shared - Return all manual DB snapshots + // that have been shared to my AWS account. // - // * public - Return all DB - // snapshots that have been marked as public. + // * public - Return all DB snapshots + // that have been marked as public. // - // * awsbackup - Return the DB - // snapshots managed by the AWS Backup service. For information about AWS Backup, - // see the AWS Backup Developer Guide. + // * awsbackup - Return the DB snapshots managed + // by the AWS Backup service. For information about AWS Backup, see the AWS Backup + // Developer Guide. // (https://docs.aws.amazon.com/aws-backup/latest/devguide/whatisbackup.html) The // awsbackup type does not apply to Aurora. // diff --git a/service/rds/api_op_DescribeEvents.go b/service/rds/api_op_DescribeEvents.go index 79151f7534f..ca60555e4d7 100644 --- a/service/rds/api_op_DescribeEvents.go +++ b/service/rds/api_op_DescribeEvents.go @@ -65,31 +65,30 @@ type DescribeEventsInput struct { // The identifier of the event source for which events are returned. If not // specified, then all sources are included in the response. Constraints: // - // * If + // * If // SourceIdentifier is supplied, SourceType must also be provided. // - // * If the - // source type is a DB instance, a DBInstanceIdentifier value must be supplied. + // * If the source + // type is a DB instance, a DBInstanceIdentifier value must be supplied. // + // * If the + // source type is a DB cluster, a DBClusterIdentifier value must be supplied. // - // * If the source type is a DB cluster, a DBClusterIdentifier value must be + // * If + // the source type is a DB parameter group, a DBParameterGroupName value must be // supplied. // - // * If the source type is a DB parameter group, a - // DBParameterGroupName value must be supplied. - // - // * If the source type is a DB - // security group, a DBSecurityGroupName value must be supplied. - // - // * If the - // source type is a DB snapshot, a DBSnapshotIdentifier value must be supplied. + // * If the source type is a DB security group, a DBSecurityGroupName + // value must be supplied. // + // * If the source type is a DB snapshot, a + // DBSnapshotIdentifier value must be supplied. // - // * If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier - // value must be supplied. + // * If the source type is a DB + // cluster snapshot, a DBClusterSnapshotIdentifier value must be supplied. // - // * Can't end with a hyphen or contain two - // consecutive hyphens. + // * Can't + // end with a hyphen or contain two consecutive hyphens. SourceIdentifier *string // The event source to retrieve events for. If no value is specified, all events diff --git a/service/rds/api_op_DescribeExportTasks.go b/service/rds/api_op_DescribeExportTasks.go index c53324366a3..af8f30e1d71 100644 --- a/service/rds/api_op_DescribeExportTasks.go +++ b/service/rds/api_op_DescribeExportTasks.go @@ -37,18 +37,17 @@ type DescribeExportTasksInput struct { // specified as name-value pairs that define what to include in the output. Filter // names and values are case-sensitive. Supported filters include the following: // + // * + // export-task-identifier - An identifier for the snapshot export task. // - // * export-task-identifier - An identifier for the snapshot export task. - // - // * + // * // s3-bucket - The Amazon S3 bucket the snapshot is exported to. // - // * source-arn - // - The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3 + // * source-arn - + // The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3 // - // * - // status - The status of the export task. Must be lowercase, for example, - // complete. + // * status - + // The status of the export task. Must be lowercase, for example, complete. Filters []*types.Filter // An optional pagination token provided by a previous DescribeExportTasks request. diff --git a/service/rds/api_op_DescribeGlobalClusters.go b/service/rds/api_op_DescribeGlobalClusters.go index 802733f9bde..1b35ef0bd10 100644 --- a/service/rds/api_op_DescribeGlobalClusters.go +++ b/service/rds/api_op_DescribeGlobalClusters.go @@ -35,16 +35,16 @@ type DescribeGlobalClustersInput struct { // A filter that specifies one or more global DB clusters to describe. Supported // filters: // - // * db-cluster-id - Accepts DB cluster identifiers and DB cluster - // Amazon Resource Names (ARNs). The results list will only include information - // about the DB clusters identified by these ARNs. + // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon + // Resource Names (ARNs). The results list will only include information about the + // DB clusters identified by these ARNs. Filters []*types.Filter // The user-supplied DB cluster identifier. If this parameter is specified, // information from only the specific DB cluster is returned. This parameter isn't // case-sensitive. Constraints: // - // * If supplied, must match an existing + // * If supplied, must match an existing // DBClusterIdentifier. GlobalClusterIdentifier *string diff --git a/service/rds/api_op_DescribeInstallationMedia.go b/service/rds/api_op_DescribeInstallationMedia.go index 39a507f70ff..08841835531 100644 --- a/service/rds/api_op_DescribeInstallationMedia.go +++ b/service/rds/api_op_DescribeInstallationMedia.go @@ -33,14 +33,14 @@ type DescribeInstallationMediaInput struct { // A filter that specifies one or more installation media to describe. Supported // filters include the following: // - // * custom-availability-zone-id - Accepts - // custom Availability Zone (AZ) identifiers. The results list includes information - // about only the custom AZs identified by these identifiers. + // * custom-availability-zone-id - Accepts custom + // Availability Zone (AZ) identifiers. The results list includes information about + // only the custom AZs identified by these identifiers. // - // * engine - - // Accepts database engines. The results list includes information about only the - // database engines identified by these identifiers. For more information about the - // valid engines for installation media, see ImportInstallationMedia. + // * engine - Accepts + // database engines. The results list includes information about only the database + // engines identified by these identifiers. For more information about the valid + // engines for installation media, see ImportInstallationMedia. Filters []*types.Filter // The installation medium ID. diff --git a/service/rds/api_op_DescribePendingMaintenanceActions.go b/service/rds/api_op_DescribePendingMaintenanceActions.go index 963ffb9b13b..8d1f6031380 100644 --- a/service/rds/api_op_DescribePendingMaintenanceActions.go +++ b/service/rds/api_op_DescribePendingMaintenanceActions.go @@ -34,13 +34,13 @@ type DescribePendingMaintenanceActionsInput struct { // A filter that specifies one or more resources to return pending maintenance // actions for. Supported filters: // - // * db-cluster-id - Accepts DB cluster + // * db-cluster-id - Accepts DB cluster // identifiers and DB cluster Amazon Resource Names (ARNs). The results list will // only include pending maintenance actions for the DB clusters identified by these // ARNs. // - // * db-instance-id - Accepts DB instance identifiers and DB instance - // ARNs. The results list will only include pending maintenance actions for the DB + // * db-instance-id - Accepts DB instance identifiers and DB instance ARNs. + // The results list will only include pending maintenance actions for the DB // instances identified by these ARNs. Filters []*types.Filter diff --git a/service/rds/api_op_DescribeSourceRegions.go b/service/rds/api_op_DescribeSourceRegions.go index 3b50c77ac88..27e2c5c7df1 100644 --- a/service/rds/api_op_DescribeSourceRegions.go +++ b/service/rds/api_op_DescribeSourceRegions.go @@ -47,8 +47,8 @@ type DescribeSourceRegionsInput struct { // The source AWS Region name. For example, us-east-1. Constraints: // - // * Must - // specify a valid AWS Region name. + // * Must specify + // a valid AWS Region name. RegionName *string } diff --git a/service/rds/api_op_DownloadDBLogFilePortion.go b/service/rds/api_op_DownloadDBLogFilePortion.go index a7686f2592a..5a0a04186ca 100644 --- a/service/rds/api_op_DownloadDBLogFilePortion.go +++ b/service/rds/api_op_DownloadDBLogFilePortion.go @@ -32,7 +32,7 @@ type DownloadDBLogFilePortionInput struct { // The customer-assigned name of the DB instance that contains the log files you // want to list. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBInstance. // // This member is required. @@ -54,19 +54,19 @@ type DownloadDBLogFilePortionInput struct { // from the beginning or the end of the log file, depending on the value of the // Marker parameter. // - // * If neither Marker or NumberOfLines are specified, the + // * If neither Marker or NumberOfLines are specified, the // entire log file is returned up to a maximum of 10000 lines, starting with the // most recent log entries first. // - // * If NumberOfLines is specified and Marker - // isn't specified, then the most recent lines from the end of the log file are + // * If NumberOfLines is specified and Marker isn't + // specified, then the most recent lines from the end of the log file are // returned. // - // * If Marker is specified as "0", then the specified number of - // lines from the beginning of the log file are returned. + // * If Marker is specified as "0", then the specified number of lines + // from the beginning of the log file are returned. // - // * You can download - // the log file in blocks of lines by specifying the size of the block using the + // * You can download the log + // file in blocks of lines by specifying the size of the block using the // NumberOfLines parameter, and by specifying a value of "0" for the Marker // parameter in your first request. Include the Marker value returned in the // response as the Marker value for the next request, continuing until the diff --git a/service/rds/api_op_FailoverDBCluster.go b/service/rds/api_op_FailoverDBCluster.go index 96beb784baf..c2436c1cc46 100644 --- a/service/rds/api_op_FailoverDBCluster.go +++ b/service/rds/api_op_FailoverDBCluster.go @@ -43,7 +43,7 @@ type FailoverDBClusterInput struct { // A DB cluster identifier to force a failover for. This parameter isn't // case-sensitive. Constraints: // - // * Must match the identifier of an existing + // * Must match the identifier of an existing // DBCluster. // // This member is required. diff --git a/service/rds/api_op_ImportInstallationMedia.go b/service/rds/api_op_ImportInstallationMedia.go index b444af1b8df..577eef455cd 100644 --- a/service/rds/api_op_ImportInstallationMedia.go +++ b/service/rds/api_op_ImportInstallationMedia.go @@ -40,14 +40,14 @@ type ImportInstallationMediaInput struct { // includes supported DB engines that require an on-premises customer provided // license. Valid Values: // - // * sqlserver-ee + // * sqlserver-ee // - // * sqlserver-se + // * sqlserver-se // - // * - // sqlserver-ex + // * sqlserver-ex // - // * sqlserver-web + // * + // sqlserver-web // // This member is required. Engine *string diff --git a/service/rds/api_op_ModifyCertificates.go b/service/rds/api_op_ModifyCertificates.go index 2106537360c..364584521c9 100644 --- a/service/rds/api_op_ModifyCertificates.go +++ b/service/rds/api_op_ModifyCertificates.go @@ -19,12 +19,12 @@ import ( // new DB instances use the default certificate provided by RDS. You might need to // override the default certificate in the following situations: // -// * You already +// * You already // migrated your applications to support the latest certificate authority (CA) // certificate, but the new CA certificate is not yet the RDS default CA // certificate for the specified AWS Region. // -// * RDS has already moved to a new +// * RDS has already moved to a new // default CA certificate for the specified AWS Region, but you are still in the // process of supporting the new CA certificate. In this case, you temporarily need // additional time to finish your application changes. diff --git a/service/rds/api_op_ModifyCurrentDBClusterCapacity.go b/service/rds/api_op_ModifyCurrentDBClusterCapacity.go index 310f7312ab2..a14d162a11a 100644 --- a/service/rds/api_op_ModifyCurrentDBClusterCapacity.go +++ b/service/rds/api_op_ModifyCurrentDBClusterCapacity.go @@ -46,7 +46,7 @@ type ModifyCurrentDBClusterCapacityInput struct { // The DB cluster identifier for the cluster being modified. This parameter isn't // case-sensitive. Constraints: // - // * Must match the identifier of an existing DB + // * Must match the identifier of an existing DB // cluster. // // This member is required. @@ -55,19 +55,18 @@ type ModifyCurrentDBClusterCapacityInput struct { // The DB cluster capacity. When you change the capacity of a paused Aurora // Serverless DB cluster, it automatically resumes. Constraints: // - // * For Aurora + // * For Aurora // MySQL, valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256. // - // * - // For Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and - // 384. + // * For + // Aurora PostgreSQL, valid capacity values are 2, 4, 8, 16, 32, 64, 192, and 384. Capacity *int32 // The amount of time, in seconds, that Aurora Serverless tries to find a scaling // point to perform seamless scaling before enforcing the timeout action. The // default is 300. // - // * Value must be from 10 through 600. + // * Value must be from 10 through 600. SecondsBeforeTimeout *int32 // The action to take when the timeout is reached, either ForceApplyCapacityChange diff --git a/service/rds/api_op_ModifyDBCluster.go b/service/rds/api_op_ModifyDBCluster.go index dbae8a98cf6..da9fe5c37e6 100644 --- a/service/rds/api_op_ModifyDBCluster.go +++ b/service/rds/api_op_ModifyDBCluster.go @@ -65,14 +65,14 @@ type ModifyDBClusterInput struct { // to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. // Default: 0 Constraints: // - // * If specified, this value must be set to a number - // from 0 to 259,200 (72 hours). + // * If specified, this value must be set to a number from + // 0 to 259,200 (72 hours). BacktrackWindow *int64 // The number of days for which automated backups are retained. You must specify a // minimum value of 1. Default: 1 Constraints: // - // * Must be a value from 1 to 35 + // * Must be a value from 1 to 35 BackupRetentionPeriod *int32 // The configuration setting for the log types to be enabled for export to @@ -92,12 +92,12 @@ type ModifyDBClusterInput struct { // aren't applied during the next maintenance window but instead are applied // immediately. Default: The existing name setting Constraints: // - // * The DB - // parameter group must be in the same DB parameter group family as this DB - // cluster. + // * The DB parameter + // group must be in the same DB parameter group family as this DB cluster. // - // * The DBInstanceParameterGroupName parameter is only valid in - // combination with the AllowMajorVersionUpgrade parameter. + // * The + // DBInstanceParameterGroupName parameter is only valid in combination with the + // AllowMajorVersionUpgrade parameter. DBInstanceParameterGroupName *string // A value that indicates whether the DB cluster has deletion protection enabled. @@ -162,14 +162,13 @@ type ModifyDBClusterInput struct { // The new DB cluster identifier for the DB cluster when renaming a DB cluster. // This value is stored as a lowercase string. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from 1 + // to 63 letters, numbers, or hyphens // - // * The first character must be a - // letter + // * The first character must be a letter // - // * Can't end with a hyphen or contain two consecutive - // hyphens + // * + // Can't end with a hyphen or contain two consecutive hyphens // // Example: my-cluster2 NewDBClusterIdentifier *string @@ -197,16 +196,15 @@ type ModifyDBClusterInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/rds/api_op_ModifyDBClusterEndpoint.go b/service/rds/api_op_ModifyDBClusterEndpoint.go index 612c75cad1e..4159da6a3e8 100644 --- a/service/rds/api_op_ModifyDBClusterEndpoint.go +++ b/service/rds/api_op_ModifyDBClusterEndpoint.go @@ -51,17 +51,17 @@ type ModifyDBClusterEndpointInput struct { // Aurora DB cluster. This data type is used as a response element in the following // actions: // -// * CreateDBClusterEndpoint +// * CreateDBClusterEndpoint // -// * DescribeDBClusterEndpoints +// * DescribeDBClusterEndpoints // -// * +// * // ModifyDBClusterEndpoint // -// * DeleteDBClusterEndpoint +// * DeleteDBClusterEndpoint // -// For the data structure -// that represents Amazon RDS DB instance endpoints, see Endpoint. +// For the data structure that +// represents Amazon RDS DB instance endpoints, see Endpoint. type ModifyDBClusterEndpointOutput struct { // The type associated with a custom endpoint. One of: READER, WRITER, ANY. diff --git a/service/rds/api_op_ModifyDBClusterParameterGroup.go b/service/rds/api_op_ModifyDBClusterParameterGroup.go index 32a448bc1fe..ee7d99b978e 100644 --- a/service/rds/api_op_ModifyDBClusterParameterGroup.go +++ b/service/rds/api_op_ModifyDBClusterParameterGroup.go @@ -68,16 +68,16 @@ type ModifyDBClusterParameterGroupOutput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must be 1 to 255 + // * Must be 1 to 255 // letters or numbers. // - // * First character must be a letter + // * First character must be a letter // - // * Can't end - // with a hyphen or contain two consecutive hyphens + // * Can't end with a + // hyphen or contain two consecutive hyphens // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. DBClusterParameterGroupName *string // Metadata pertaining to the operation's result. diff --git a/service/rds/api_op_ModifyDBInstance.go b/service/rds/api_op_ModifyDBInstance.go index 4b20ee0fa12..3d00037b3bd 100644 --- a/service/rds/api_op_ModifyDBInstance.go +++ b/service/rds/api_op_ModifyDBInstance.go @@ -36,7 +36,7 @@ type ModifyDBInstanceInput struct { // The DB instance identifier. This value is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string @@ -88,17 +88,17 @@ type ModifyDBInstanceInput struct { // the DB cluster. For more information, see ModifyDBCluster. Default: Uses // existing setting Constraints: // - // * Must be a value from 0 to 35 + // * Must be a value from 0 to 35 // - // * Can be + // * Can be // specified for a MySQL read replica only if the source is running MySQL 5.6 or // later // - // * Can be specified for a PostgreSQL read replica only if the source - // is running PostgreSQL 9.3.5 + // * Can be specified for a PostgreSQL read replica only if the source is + // running PostgreSQL 9.3.5 // - // * Can't be set to 0 if the DB instance is a - // source to read replicas + // * Can't be set to 0 if the DB instance is a source to + // read replicas BackupRetentionPeriod *int32 // Indicates the certificate that needs to be associated with the instance. @@ -112,12 +112,12 @@ type ModifyDBInstanceInput struct { // the appropriate instructions for your DB engine to rotate your SSL/TLS // certificate: // - // * For more information about rotating your SSL/TLS certificate - // for RDS DB engines, see Rotating Your SSL/TLS Certificate. + // * For more information about rotating your SSL/TLS certificate for + // RDS DB engines, see Rotating Your SSL/TLS Certificate. // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) // in the Amazon RDS User Guide. // - // * For more information about rotating your + // * For more information about rotating your // SSL/TLS certificate for Aurora DB engines, see Rotating Your SSL/TLS // Certificate // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) @@ -173,7 +173,7 @@ type ModifyDBInstanceInput struct { // setting doesn't result in an outage and the change is asynchronously applied as // soon as possible. Constraints: // - // * If supplied, must match existing + // * If supplied, must match existing // DBSecurityGroups. DBSecurityGroups []*string @@ -309,13 +309,13 @@ type ModifyDBInstanceInput struct { // maintenance window if you disable Apply Immediately. This value is stored as a // lowercase string. Constraints: // - // * Must contain from 1 to 63 letters, - // numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or + // hyphens. // - // * The first character must be a letter. + // * The first character must be a letter. // - // * Can't - // end with a hyphen or contain two consecutive hyphens. + // * Can't end with a hyphen or + // contain two consecutive hyphens. // // Example: mydbinstance NewDBInstanceIdentifier *string @@ -351,16 +351,15 @@ type ModifyDBInstanceInput struct { // daily time range for creating automated backups is managed by the DB cluster. // For more information, see ModifyDBCluster. Constraints: // - // * Must be in the - // format hh24:mi-hh24:mi + // * Must be in the format + // hh24:mi-hh24:mi // - // * Must be in Universal Time Coordinated (UTC) + // * Must be in Universal Time Coordinated (UTC) // - // * - // Must not conflict with the preferred maintenance window + // * Must not + // conflict with the preferred maintenance window // - // * Must be at least - // 30 minutes + // * Must be at least 30 minutes PreferredBackupWindow *string // The weekly time range (in UTC) during which system maintenance can occur, which @@ -446,8 +445,8 @@ type ModifyDBInstanceInput struct { // associated list of EC2 VPC security groups is managed by the DB cluster. For // more information, see ModifyDBCluster. Constraints: // - // * If supplied, must - // match existing VpcSecurityGroupIds. + // * If supplied, must match + // existing VpcSecurityGroupIds. VpcSecurityGroupIds []*string } diff --git a/service/rds/api_op_ModifyDBParameterGroup.go b/service/rds/api_op_ModifyDBParameterGroup.go index fe672e7cf7b..3dddd23d019 100644 --- a/service/rds/api_op_ModifyDBParameterGroup.go +++ b/service/rds/api_op_ModifyDBParameterGroup.go @@ -47,8 +47,8 @@ type ModifyDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * If supplied, must match - // the name of an existing DBParameterGroup. + // * If supplied, must match the + // name of an existing DBParameterGroup. // // This member is required. DBParameterGroupName *string diff --git a/service/rds/api_op_ModifyDBSnapshot.go b/service/rds/api_op_ModifyDBSnapshot.go index 86799184222..60ad38213cc 100644 --- a/service/rds/api_op_ModifyDBSnapshot.go +++ b/service/rds/api_op_ModifyDBSnapshot.go @@ -40,21 +40,21 @@ type ModifyDBSnapshotInput struct { // engines and engine versions that are available when you upgrade a DB snapshot. // MySQL // - // * 5.5.46 (supported for 5.1 DB snapshots) + // * 5.5.46 (supported for 5.1 DB snapshots) // // Oracle // - // * 12.1.0.2.v8 + // * 12.1.0.2.v8 // (supported for 12.1.0.1 DB snapshots) // - // * 11.2.0.4.v12 (supported for - // 11.2.0.2 DB snapshots) - // - // * 11.2.0.4.v11 (supported for 11.2.0.3 DB + // * 11.2.0.4.v12 (supported for 11.2.0.2 DB // snapshots) // - // PostgreSQL For the list of engine versions that are available for - // upgrading a DB snapshot, see Upgrading the PostgreSQL DB Engine for Amazon RDS + // * 11.2.0.4.v11 (supported for 11.2.0.3 DB snapshots) + // + // PostgreSQL For + // the list of engine versions that are available for upgrading a DB snapshot, see + // Upgrading the PostgreSQL DB Engine for Amazon RDS // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.PostgreSQL.html#USER_UpgradeDBInstance.PostgreSQL.MajorVersion). EngineVersion *string diff --git a/service/rds/api_op_ModifyGlobalCluster.go b/service/rds/api_op_ModifyGlobalCluster.go index 7944e009f94..3cb1c915b06 100644 --- a/service/rds/api_op_ModifyGlobalCluster.go +++ b/service/rds/api_op_ModifyGlobalCluster.go @@ -41,21 +41,21 @@ type ModifyGlobalClusterInput struct { // The DB cluster identifier for the global cluster being modified. This parameter // isn't case-sensitive. Constraints: // - // * Must match the identifier of an - // existing global database cluster. + // * Must match the identifier of an existing + // global database cluster. GlobalClusterIdentifier *string // The new cluster identifier for the global database cluster when modifying a // global database cluster. This value is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens + // * Must contain from 1 to 63 letters, numbers, or hyphens // + // * The + // first character must be a letter // - // * The first character must be a letter - // - // * Can't end with a hyphen or contain - // two consecutive hyphens + // * Can't end with a hyphen or contain two + // consecutive hyphens // // Example: my-cluster2 NewGlobalClusterIdentifier *string diff --git a/service/rds/api_op_PromoteReadReplica.go b/service/rds/api_op_PromoteReadReplica.go index 3c25eb335a3..447f82aefa0 100644 --- a/service/rds/api_op_PromoteReadReplica.go +++ b/service/rds/api_op_PromoteReadReplica.go @@ -13,7 +13,7 @@ import ( // Promotes a read replica DB instance to a standalone DB instance. // -// * Backup +// * Backup // duration is a function of the amount of changes to the database since the // previous backup. If you plan to promote a read replica to a standalone instance, // we recommend that you enable backups and complete at least one backup prior to @@ -22,8 +22,8 @@ import ( // your read replica, configure the automated backup window so that daily backups // do not interfere with read replica promotion. // -// * This command doesn't apply -// to Aurora MySQL and Aurora PostgreSQL. +// * This command doesn't apply to +// Aurora MySQL and Aurora PostgreSQL. func (c *Client) PromoteReadReplica(ctx context.Context, params *PromoteReadReplicaInput, optFns ...func(*Options)) (*PromoteReadReplicaOutput, error) { if params == nil { params = &PromoteReadReplicaInput{} @@ -45,7 +45,7 @@ type PromoteReadReplicaInput struct { // The DB instance identifier. This value is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing read replica DB + // * Must match the identifier of an existing read replica DB // instance. // // Example: mydbinstance @@ -57,11 +57,10 @@ type PromoteReadReplicaInput struct { // parameter to a positive number enables backups. Setting this parameter to 0 // disables automated backups. Default: 1 Constraints: // - // * Must be a value from - // 0 to 35. + // * Must be a value from 0 to + // 35. // - // * Can't be set to 0 if the DB instance is a source to read - // replicas. + // * Can't be set to 0 if the DB instance is a source to read replicas. BackupRetentionPeriod *int32 // The daily time range during which automated backups are created if automated @@ -72,16 +71,15 @@ type PromoteReadReplicaInput struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) // in the Amazon RDS User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string } diff --git a/service/rds/api_op_PromoteReadReplicaDBCluster.go b/service/rds/api_op_PromoteReadReplicaDBCluster.go index 7cc649c77c9..3546a8329a1 100644 --- a/service/rds/api_op_PromoteReadReplicaDBCluster.go +++ b/service/rds/api_op_PromoteReadReplicaDBCluster.go @@ -34,7 +34,7 @@ type PromoteReadReplicaDBClusterInput struct { // The identifier of the DB cluster read replica to promote. This parameter isn't // case-sensitive. Constraints: // - // * Must match the identifier of an existing DB + // * Must match the identifier of an existing DB // cluster read replica. // // Example: my-cluster-replica1 diff --git a/service/rds/api_op_RebootDBInstance.go b/service/rds/api_op_RebootDBInstance.go index b0d9a2e7014..4d5cf739dbe 100644 --- a/service/rds/api_op_RebootDBInstance.go +++ b/service/rds/api_op_RebootDBInstance.go @@ -41,7 +41,7 @@ type RebootDBInstanceInput struct { // The DB instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must match the identifier of an existing DBInstance. + // * Must match the identifier of an existing DBInstance. // // This member is required. DBInstanceIdentifier *string diff --git a/service/rds/api_op_ResetDBClusterParameterGroup.go b/service/rds/api_op_ResetDBClusterParameterGroup.go index b4945e41ec5..20f358a49a3 100644 --- a/service/rds/api_op_ResetDBClusterParameterGroup.go +++ b/service/rds/api_op_ResetDBClusterParameterGroup.go @@ -63,16 +63,16 @@ type ResetDBClusterParameterGroupOutput struct { // The name of the DB cluster parameter group. Constraints: // - // * Must be 1 to 255 + // * Must be 1 to 255 // letters or numbers. // - // * First character must be a letter + // * First character must be a letter // - // * Can't end - // with a hyphen or contain two consecutive hyphens + // * Can't end with a + // hyphen or contain two consecutive hyphens // - // This value is stored as a - // lowercase string. + // This value is stored as a lowercase + // string. DBClusterParameterGroupName *string // Metadata pertaining to the operation's result. diff --git a/service/rds/api_op_ResetDBParameterGroup.go b/service/rds/api_op_ResetDBParameterGroup.go index 527fe2b17b1..48ebf210dcb 100644 --- a/service/rds/api_op_ResetDBParameterGroup.go +++ b/service/rds/api_op_ResetDBParameterGroup.go @@ -38,8 +38,8 @@ type ResetDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // - // * Must match the name of - // an existing DBParameterGroup. + // * Must match the name of an + // existing DBParameterGroup. // // This member is required. DBParameterGroupName *string diff --git a/service/rds/api_op_RestoreDBClusterFromS3.go b/service/rds/api_op_RestoreDBClusterFromS3.go index 28b9654c0c2..e1892f6e49b 100644 --- a/service/rds/api_op_RestoreDBClusterFromS3.go +++ b/service/rds/api_op_RestoreDBClusterFromS3.go @@ -46,16 +46,16 @@ type RestoreDBClusterFromS3Input struct { // The name of the DB cluster to create from the source data in the Amazon S3 // bucket. This parameter isn't case-sensitive. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 + // to 63 letters, numbers, or hyphens. // - // * First character must be a - // letter. + // * First character must be a letter. // - // * Can't end with a hyphen or contain two consecutive - // hyphens. + // * + // Can't end with a hyphen or contain two consecutive hyphens. // - // Example: my-cluster1 + // Example: + // my-cluster1 // // This member is required. DBClusterIdentifier *string @@ -76,13 +76,13 @@ type RestoreDBClusterFromS3Input struct { // The name of the master user for the restored DB cluster. Constraints: // - // * - // Must be 1 to 16 letters or numbers. + // * Must be + // 1 to 16 letters or numbers. // - // * First character must be a letter. + // * First character must be a letter. // - // - // * Can't be a reserved word for the chosen database engine. + // * Can't be a + // reserved word for the chosen database engine. // // This member is required. MasterUsername *string @@ -119,15 +119,15 @@ type RestoreDBClusterFromS3Input struct { // to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. // Default: 0 Constraints: // - // * If specified, this value must be set to a number - // from 0 to 259,200 (72 hours). + // * If specified, this value must be set to a number from + // 0 to 259,200 (72 hours). BacktrackWindow *int64 // The number of days for which automated backups of the restored DB cluster are // retained. You must specify a minimum value of 1. Default: 1 Constraints: // - // * - // Must be a value from 1 to 35 + // * Must + // be a value from 1 to 35 BackupRetentionPeriod *int32 // A value that indicates that the restored DB cluster should be associated with @@ -141,8 +141,8 @@ type RestoreDBClusterFromS3Input struct { // The name of the DB cluster parameter group to associate with the restored DB // cluster. If this argument is omitted, default.aurora5.6 is used. Constraints: // - // - // * If supplied, must match the name of an existing DBClusterParameterGroup. + // * + // If supplied, must match the name of an existing DBClusterParameterGroup. DBClusterParameterGroupName *string // A DB subnet group to associate with the restored DB cluster. Constraints: If @@ -227,16 +227,15 @@ type RestoreDBClusterFromS3Input struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) // in the Amazon Aurora User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The weekly time range during which system maintenance can occur, in Universal diff --git a/service/rds/api_op_RestoreDBClusterFromSnapshot.go b/service/rds/api_op_RestoreDBClusterFromSnapshot.go index 8e530c7d24a..e7a48f495b8 100644 --- a/service/rds/api_op_RestoreDBClusterFromSnapshot.go +++ b/service/rds/api_op_RestoreDBClusterFromSnapshot.go @@ -44,16 +44,16 @@ type RestoreDBClusterFromSnapshotInput struct { // The name of the DB cluster to create from the DB snapshot or DB cluster // snapshot. This parameter isn't case-sensitive. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from + // 1 to 63 letters, numbers, or hyphens // - // * First character must be a - // letter + // * First character must be a letter // - // * Can't end with a hyphen or contain two consecutive - // hyphens + // * + // Can't end with a hyphen or contain two consecutive hyphens // - // Example: my-snapshot-id + // Example: + // my-snapshot-id // // This member is required. DBClusterIdentifier *string @@ -69,7 +69,7 @@ type RestoreDBClusterFromSnapshotInput struct { // cluster snapshot. However, you can use only the ARN to specify a DB snapshot. // Constraints: // - // * Must match the identifier of an existing Snapshot. + // * Must match the identifier of an existing Snapshot. // // This member is required. SnapshotIdentifier *string @@ -82,8 +82,8 @@ type RestoreDBClusterFromSnapshotInput struct { // to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. // Default: 0 Constraints: // - // * If specified, this value must be set to a number - // from 0 to 259,200 (72 hours). + // * If specified, this value must be set to a number from + // 0 to 259,200 (72 hours). BacktrackWindow *int64 // A value that indicates whether to copy all tags from the restored DB cluster to @@ -94,16 +94,16 @@ type RestoreDBClusterFromSnapshotInput struct { // this argument is omitted, the default DB cluster parameter group for the // specified engine is used. Constraints: // - // * If supplied, must match the name - // of an existing default DB cluster parameter group. + // * If supplied, must match the name of an + // existing default DB cluster parameter group. // - // * Must be 1 to 255 - // letters, numbers, or hyphens. + // * Must be 1 to 255 letters, + // numbers, or hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * - // Can't end with a hyphen or contain two consecutive hyphens. + // * Can't end with a + // hyphen or contain two consecutive hyphens. DBClusterParameterGroupName *string // The name of the DB subnet group to use for the new DB cluster. Constraints: If @@ -174,12 +174,12 @@ type RestoreDBClusterFromSnapshotInput struct { // ARN for the KMS encryption key. If you don't specify a value for the KmsKeyId // parameter, then the following occurs: // - // * If the DB snapshot or DB cluster + // * If the DB snapshot or DB cluster // snapshot in SnapshotIdentifier is encrypted, then the restored DB cluster is // encrypted using the KMS key that was used to encrypt the DB snapshot or DB // cluster snapshot. // - // * If the DB snapshot or DB cluster snapshot in + // * If the DB snapshot or DB cluster snapshot in // SnapshotIdentifier isn't encrypted, then the restored DB cluster isn't // encrypted. KmsKeyId *string diff --git a/service/rds/api_op_RestoreDBClusterToPointInTime.go b/service/rds/api_op_RestoreDBClusterToPointInTime.go index e26c912a14d..cecef3872df 100644 --- a/service/rds/api_op_RestoreDBClusterToPointInTime.go +++ b/service/rds/api_op_RestoreDBClusterToPointInTime.go @@ -45,21 +45,21 @@ type RestoreDBClusterToPointInTimeInput struct { // The name of the new DB cluster to be created. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from + // 1 to 63 letters, numbers, or hyphens // - // * First character must be a - // letter + // * First character must be a letter // - // * Can't end with a hyphen or contain two consecutive hyphens + // * + // Can't end with a hyphen or contain two consecutive hyphens // // This member is required. DBClusterIdentifier *string // The identifier of the source DB cluster from which to restore. Constraints: // - // - // * Must match the identifier of an existing DBCluster. + // * + // Must match the identifier of an existing DBCluster. // // This member is required. SourceDBClusterIdentifier *string @@ -68,8 +68,8 @@ type RestoreDBClusterToPointInTimeInput struct { // to 0. Currently, Backtrack is only supported for Aurora MySQL DB clusters. // Default: 0 Constraints: // - // * If specified, this value must be set to a number - // from 0 to 259,200 (72 hours). + // * If specified, this value must be set to a number from + // 0 to 259,200 (72 hours). BacktrackWindow *int64 // A value that indicates whether to copy all tags from the restored DB cluster to @@ -80,16 +80,16 @@ type RestoreDBClusterToPointInTimeInput struct { // this argument is omitted, the default DB cluster parameter group for the // specified engine is used. Constraints: // - // * If supplied, must match the name - // of an existing DB cluster parameter group. + // * If supplied, must match the name of an + // existing DB cluster parameter group. // - // * Must be 1 to 255 letters, - // numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't end - // with a hyphen or contain two consecutive hyphens. + // * Can't end with a hyphen or + // contain two consecutive hyphens. DBClusterParameterGroupName *string // The DB subnet group name to use for the new DB cluster. Constraints: If @@ -139,15 +139,15 @@ type RestoreDBClusterToPointInTimeInput struct { // KmsKeyId parameter. If you don't specify a value for the KmsKeyId parameter, // then the following occurs: // - // * If the DB cluster is encrypted, then the - // restored DB cluster is encrypted using the KMS key that was used to encrypt the - // source DB cluster. + // * If the DB cluster is encrypted, then the restored + // DB cluster is encrypted using the KMS key that was used to encrypt the source DB + // cluster. // - // * If the DB cluster isn't encrypted, then the restored - // DB cluster isn't encrypted. + // * If the DB cluster isn't encrypted, then the restored DB cluster + // isn't encrypted. // - // If DBClusterIdentifier refers to a DB cluster that - // isn't encrypted, then the restore request is rejected. + // If DBClusterIdentifier refers to a DB cluster that isn't + // encrypted, then the restore request is rejected. KmsKeyId *string // The name of the option group for the new DB cluster. @@ -160,17 +160,17 @@ type RestoreDBClusterToPointInTimeInput struct { // The date and time to restore the DB cluster to. Valid Values: Value must be a // time in Universal Coordinated Time (UTC) format Constraints: // - // * Must be - // before the latest restorable time for the DB instance + // * Must be before + // the latest restorable time for the DB instance // - // * Must be specified - // if UseLatestRestorableTime parameter isn't provided + // * Must be specified if + // UseLatestRestorableTime parameter isn't provided // - // * Can't be specified if - // the UseLatestRestorableTime parameter is enabled + // * Can't be specified if the + // UseLatestRestorableTime parameter is enabled // - // * Can't be specified if - // the RestoreType parameter is copy-on-write + // * Can't be specified if the + // RestoreType parameter is copy-on-write // // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time @@ -178,14 +178,14 @@ type RestoreDBClusterToPointInTimeInput struct { // The type of restore to be performed. You can specify one of the following // values: // - // * full-copy - The new DB cluster is restored as a full copy of the + // * full-copy - The new DB cluster is restored as a full copy of the // source DB cluster. // - // * copy-on-write - The new DB cluster is restored as a - // clone of the source DB cluster. + // * copy-on-write - The new DB cluster is restored as a clone + // of the source DB cluster. // - // Constraints: You can't specify copy-on-write if - // the engine version of the source DB cluster is earlier than 1.11. If you don't + // Constraints: You can't specify copy-on-write if the + // engine version of the source DB cluster is earlier than 1.11. If you don't // specify a RestoreType value, then the new DB cluster is restored as a full copy // of the source DB cluster. RestoreType *string diff --git a/service/rds/api_op_RestoreDBInstanceFromDBSnapshot.go b/service/rds/api_op_RestoreDBInstanceFromDBSnapshot.go index 221e70309f2..637c2254fcc 100644 --- a/service/rds/api_op_RestoreDBInstanceFromDBSnapshot.go +++ b/service/rds/api_op_RestoreDBInstanceFromDBSnapshot.go @@ -49,13 +49,13 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // Name of the DB instance to create from the DB snapshot. This parameter isn't // case-sensitive. Constraints: // - // * Must contain from 1 to 63 numbers, letters, - // or hyphens + // * Must contain from 1 to 63 numbers, letters, or + // hyphens // - // * First character must be a letter + // * First character must be a letter // - // * Can't end with a - // hyphen or contain two consecutive hyphens + // * Can't end with a hyphen or + // contain two consecutive hyphens // // Example: my-snapshot-id // @@ -64,12 +64,12 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // The identifier for the DB snapshot to restore from. Constraints: // - // * Must - // match the identifier of an existing DBSnapshot. + // * Must match + // the identifier of an existing DBSnapshot. // - // * If you are restoring from - // a shared manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the - // shared DB snapshot. + // * If you are restoring from a shared + // manual DB snapshot, the DBSnapshotIdentifier must be the ARN of the shared DB + // snapshot. // // This member is required. DBSnapshotIdentifier *string @@ -105,16 +105,16 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // not specify a value for DBParameterGroupName, then the default DBParameterGroup // for the specified DB engine is used. Constraints: // - // * If supplied, must match - // the name of an existing DBParameterGroup. + // * If supplied, must match the + // name of an existing DBParameterGroup. // - // * Must be 1 to 255 letters, - // numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't end - // with a hyphen or contain two consecutive hyphens. + // * Can't end with a hyphen or + // contain two consecutive hyphens. DBParameterGroupName *string // The DB subnet group name to use for the new instance. Constraints: If supplied, @@ -161,29 +161,29 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot. Valid // Values: // - // * mariadb + // * mariadb // - // * mysql + // * mysql // - // * oracle-ee + // * oracle-ee // - // * oracle-se2 + // * oracle-se2 // - // * - // oracle-se1 + // * oracle-se1 // - // * oracle-se + // * + // oracle-se // - // * postgres + // * postgres // - // * sqlserver-ee + // * sqlserver-ee // - // * - // sqlserver-se + // * sqlserver-se // - // * sqlserver-ex + // * sqlserver-ex // - // * sqlserver-web + // * + // sqlserver-web Engine *string // Specifies the amount of provisioned IOPS for the DB instance, expressed in I/O diff --git a/service/rds/api_op_RestoreDBInstanceFromS3.go b/service/rds/api_op_RestoreDBInstanceFromS3.go index a0870683d87..682c711df1a 100644 --- a/service/rds/api_op_RestoreDBInstanceFromS3.go +++ b/service/rds/api_op_RestoreDBInstanceFromS3.go @@ -49,13 +49,13 @@ type RestoreDBInstanceFromS3Input struct { // The DB instance identifier. This parameter is stored as a lowercase string. // Constraints: // - // * Must contain from 1 to 63 letters, numbers, or hyphens. + // * Must contain from 1 to 63 letters, numbers, or hyphens. // + // * First + // character must be a letter. // - // * First character must be a letter. - // - // * Can't end with a hyphen or contain - // two consecutive hyphens. + // * Can't end with a hyphen or contain two + // consecutive hyphens. // // Example: mydbinstance // @@ -198,13 +198,13 @@ type RestoreDBInstanceFromS3Input struct { // The name for the master user. Constraints: // - // * Must be 1 to 16 letters or + // * Must be 1 to 16 letters or // numbers. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't be a reserved - // word for the chosen database engine. + // * Can't be a reserved word for + // the chosen database engine. MasterUsername *string // The upper limit to which Amazon RDS can automatically scale the storage of the @@ -257,16 +257,15 @@ type RestoreDBInstanceFromS3Input struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) // in the Amazon RDS User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // hh24:mi-hh24:mi. // - // * Must be in Universal Coordinated Time (UTC). + // * Must be in Universal Coordinated Time (UTC). // - // * Must - // not conflict with the preferred maintenance window. + // * Must not + // conflict with the preferred maintenance window. // - // * Must be at least 30 - // minutes. + // * Must be at least 30 minutes. PreferredBackupWindow *string // The time range each week during which system maintenance can occur, in Universal @@ -274,18 +273,18 @@ type RestoreDBInstanceFromS3Input struct { // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance) // in the Amazon RDS User Guide. Constraints: // - // * Must be in the format + // * Must be in the format // ddd:hh24:mi-ddd:hh24:mi. // - // * Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. - // + // * Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. // - // * Must be in Universal Coordinated Time (UTC). + // * + // Must be in Universal Coordinated Time (UTC). // - // * Must not conflict with the + // * Must not conflict with the // preferred backup window. // - // * Must be at least 30 minutes. + // * Must be at least 30 minutes. PreferredMaintenanceWindow *string // The number of CPU cores and the number of threads per core for the DB instance diff --git a/service/rds/api_op_RestoreDBInstanceToPointInTime.go b/service/rds/api_op_RestoreDBInstanceToPointInTime.go index 3fe990396ae..2d3629e7a61 100644 --- a/service/rds/api_op_RestoreDBInstanceToPointInTime.go +++ b/service/rds/api_op_RestoreDBInstanceToPointInTime.go @@ -43,13 +43,13 @@ type RestoreDBInstanceToPointInTimeInput struct { // The name of the new DB instance to be created. Constraints: // - // * Must contain - // from 1 to 63 letters, numbers, or hyphens + // * Must contain from + // 1 to 63 letters, numbers, or hyphens // - // * First character must be a - // letter + // * First character must be a letter // - // * Can't end with a hyphen or contain two consecutive hyphens + // * + // Can't end with a hyphen or contain two consecutive hyphens // // This member is required. TargetDBInstanceIdentifier *string @@ -85,16 +85,16 @@ type RestoreDBInstanceToPointInTimeInput struct { // not specify a value for DBParameterGroupName, then the default DBParameterGroup // for the specified DB engine is used. Constraints: // - // * If supplied, must match - // the name of an existing DBParameterGroup. + // * If supplied, must match the + // name of an existing DBParameterGroup. // - // * Must be 1 to 255 letters, - // numbers, or hyphens. + // * Must be 1 to 255 letters, numbers, or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Can't end - // with a hyphen or contain two consecutive hyphens. + // * Can't end with a hyphen or + // contain two consecutive hyphens. DBParameterGroupName *string // The DB subnet group name to use for the new instance. Constraints: If supplied, @@ -139,30 +139,29 @@ type RestoreDBInstanceToPointInTimeInput struct { // The database engine to use for the new instance. Default: The same as source // Constraint: Must be compatible with the engine of the source Valid Values: // + // * + // mariadb // - // * mariadb + // * mysql // - // * mysql + // * oracle-ee // - // * oracle-ee - // - // * oracle-se2 - // - // * oracle-se1 + // * oracle-se2 // + // * oracle-se1 // // * oracle-se // - // * postgres + // * + // postgres // - // * sqlserver-ee + // * sqlserver-ee // - // * sqlserver-se + // * sqlserver-se // - // * - // sqlserver-ex + // * sqlserver-ex // - // * sqlserver-web + // * sqlserver-web Engine *string // The amount of Provisioned IOPS (input/output operations per second) to be @@ -212,10 +211,10 @@ type RestoreDBInstanceToPointInTimeInput struct { // The date and time to restore from. Valid Values: Value must be a time in // Universal Coordinated Time (UTC) format Constraints: // - // * Must be before the + // * Must be before the // latest restorable time for the DB instance // - // * Can't be specified if the + // * Can't be specified if the // UseLatestRestorableTime parameter is enabled // // Example: 2009-09-07T23:45:00Z @@ -223,8 +222,8 @@ type RestoreDBInstanceToPointInTimeInput struct { // The identifier of the source DB instance from which to restore. Constraints: // - // - // * Must match the identifier of an existing DB instance. + // * + // Must match the identifier of an existing DB instance. SourceDBInstanceIdentifier *string // The resource ID of the source DB instance from which to restore. diff --git a/service/rds/api_op_StartExportTask.go b/service/rds/api_op_StartExportTask.go index 32c3e2d43ba..768ccbb2587 100644 --- a/service/rds/api_op_StartExportTask.go +++ b/service/rds/api_op_StartExportTask.go @@ -48,27 +48,27 @@ type StartExportTaskInput struct { // authorized to execute the following operations. These can be set in the KMS key // policy: // - // * GrantOperation.Encrypt + // * GrantOperation.Encrypt // - // * GrantOperation.Decrypt + // * GrantOperation.Decrypt // - // * + // * // GrantOperation.GenerateDataKey // - // * + // * // GrantOperation.GenerateDataKeyWithoutPlaintext // - // * + // * // GrantOperation.ReEncryptFrom // - // * GrantOperation.ReEncryptTo + // * GrantOperation.ReEncryptTo // - // * + // * // GrantOperation.CreateGrant // - // * GrantOperation.DescribeKey + // * GrantOperation.DescribeKey // - // * + // * // GrantOperation.RetireGrant // // This member is required. @@ -87,20 +87,20 @@ type StartExportTaskInput struct { // The data to be exported from the snapshot. If this parameter is not provided, // all the snapshot data is exported. Valid values are the following: // - // * - // database - Export all the data from a specified database. + // * database - + // Export all the data from a specified database. // - // * database.table - // table-name - Export a table of the snapshot. This format is valid only for RDS - // for MySQL, RDS for MariaDB, and Aurora MySQL. + // * database.table table-name - + // Export a table of the snapshot. This format is valid only for RDS for MySQL, RDS + // for MariaDB, and Aurora MySQL. // - // * database.schema schema-name - // - Export a database schema of the snapshot. This format is valid only for RDS - // for PostgreSQL and Aurora PostgreSQL. - // - // * database.schema.table table-name - - // Export a table of the database schema. This format is valid only for RDS for + // * database.schema schema-name - Export a + // database schema of the snapshot. This format is valid only for RDS for // PostgreSQL and Aurora PostgreSQL. + // + // * database.schema.table table-name - Export a + // table of the database schema. This format is valid only for RDS for PostgreSQL + // and Aurora PostgreSQL. ExportOnly []*string // The Amazon S3 bucket prefix to use as the file name and path of the exported @@ -114,20 +114,20 @@ type StartExportTaskOutput struct { // The data exported from the snapshot. Valid values are the following: // - // * - // database - Export all the data from a specified database. + // * database + // - Export all the data from a specified database. // - // * database.table - // table-name - Export a table of the snapshot. This format is valid only for RDS - // for MySQL, RDS for MariaDB, and Aurora MySQL. + // * database.table table-name - + // Export a table of the snapshot. This format is valid only for RDS for MySQL, RDS + // for MariaDB, and Aurora MySQL. // - // * database.schema schema-name - // - Export a database schema of the snapshot. This format is valid only for RDS - // for PostgreSQL and Aurora PostgreSQL. - // - // * database.schema.table table-name - - // Export a table of the database schema. This format is valid only for RDS for + // * database.schema schema-name - Export a + // database schema of the snapshot. This format is valid only for RDS for // PostgreSQL and Aurora PostgreSQL. + // + // * database.schema.table table-name - Export a + // table of the database schema. This format is valid only for RDS for PostgreSQL + // and Aurora PostgreSQL. ExportOnly []*string // A unique identifier for the snapshot export task. This ID isn't an identifier diff --git a/service/rds/doc.go b/service/rds/doc.go index 0be3fc79f81..ca3171d3c7c 100644 --- a/service/rds/doc.go +++ b/service/rds/doc.go @@ -26,30 +26,30 @@ // maintenance window. The reference structure is as follows, and we list following // some related topics from the user guide. Amazon RDS API Reference // -// * For the +// * For the // alphabetical list of API actions, see API Actions // (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Operations.html). // -// -// * For the alphabetical list of data types, see Data Types +// * +// For the alphabetical list of data types, see Data Types // (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Types.html). // -// -// * For a list of common query parameters, see Common Parameters +// * +// For a list of common query parameters, see Common Parameters // (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonParameters.html). // -// -// * For descriptions of the error codes, see Common Errors +// * +// For descriptions of the error codes, see Common Errors // (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonErrors.html). // // Amazon // RDS User Guide // -// * For a summary of the Amazon RDS interfaces, see Available -// RDS Interfaces +// * For a summary of the Amazon RDS interfaces, see Available RDS +// Interfaces // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html#Welcome.Interfaces). // -// -// * For more information about how to use the Query API, see Using the Query API +// * +// For more information about how to use the Query API, see Using the Query API // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Using_the_Query_API.html). package rds diff --git a/service/rds/types/enums.go b/service/rds/types/enums.go index fdbf768433b..838254c594b 100644 --- a/service/rds/types/enums.go +++ b/service/rds/types/enums.go @@ -80,15 +80,15 @@ type DBProxyStatus string // Enum values for DBProxyStatus const ( - DBProxyStatusAvailable DBProxyStatus = "available" - DBProxyStatusModifying DBProxyStatus = "modifying" - DBProxyStatusIncompatible_network DBProxyStatus = "incompatible-network" - DBProxyStatusInsufficient_resource_limits DBProxyStatus = "insufficient-resource-limits" - DBProxyStatusCreating DBProxyStatus = "creating" - DBProxyStatusDeleting DBProxyStatus = "deleting" - DBProxyStatusSuspended DBProxyStatus = "suspended" - DBProxyStatusSuspending DBProxyStatus = "suspending" - DBProxyStatusReactivating DBProxyStatus = "reactivating" + DBProxyStatusAvailable DBProxyStatus = "available" + DBProxyStatusModifying DBProxyStatus = "modifying" + DBProxyStatusIncompatibleNetwork DBProxyStatus = "incompatible-network" + DBProxyStatusInsufficientResourceLimits DBProxyStatus = "insufficient-resource-limits" + DBProxyStatusCreating DBProxyStatus = "creating" + DBProxyStatusDeleting DBProxyStatus = "deleting" + DBProxyStatusSuspended DBProxyStatus = "suspended" + DBProxyStatusSuspending DBProxyStatus = "suspending" + DBProxyStatusReactivating DBProxyStatus = "reactivating" ) // Values returns all known values for DBProxyStatus. Note that this can be @@ -148,8 +148,8 @@ type ReplicaMode string // Enum values for ReplicaMode const ( - ReplicaModeOpen_read_only ReplicaMode = "open-read-only" - ReplicaModeMounted ReplicaMode = "mounted" + ReplicaModeOpenReadOnly ReplicaMode = "open-read-only" + ReplicaModeMounted ReplicaMode = "mounted" ) // Values returns all known values for ReplicaMode. Note that this can be expanded @@ -192,10 +192,10 @@ type TargetHealthReason string // Enum values for TargetHealthReason const ( - TargetHealthReasonUnreachable TargetHealthReason = "UNREACHABLE" - TargetHealthReasonConnection_failed TargetHealthReason = "CONNECTION_FAILED" - TargetHealthReasonAuth_failure TargetHealthReason = "AUTH_FAILURE" - TargetHealthReasonPending_proxy_capacity TargetHealthReason = "PENDING_PROXY_CAPACITY" + TargetHealthReasonUnreachable TargetHealthReason = "UNREACHABLE" + TargetHealthReasonConnectionFailed TargetHealthReason = "CONNECTION_FAILED" + TargetHealthReasonAuthFailure TargetHealthReason = "AUTH_FAILURE" + TargetHealthReasonPendingProxyCapacity TargetHealthReason = "PENDING_PROXY_CAPACITY" ) // Values returns all known values for TargetHealthReason. Note that this can be @@ -234,9 +234,9 @@ type TargetType string // Enum values for TargetType const ( - TargetTypeRds_instance TargetType = "RDS_INSTANCE" - TargetTypeRds_serverless_endpoint TargetType = "RDS_SERVERLESS_ENDPOINT" - TargetTypeTracked_cluster TargetType = "TRACKED_CLUSTER" + TargetTypeRdsInstance TargetType = "RDS_INSTANCE" + TargetTypeRdsServerlessEndpoint TargetType = "RDS_SERVERLESS_ENDPOINT" + TargetTypeTrackedCluster TargetType = "TRACKED_CLUSTER" ) // Values returns all known values for TargetType. Note that this can be expanded diff --git a/service/rds/types/types.go b/service/rds/types/types.go index 857bbfbd860..07bad65748d 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -8,90 +8,90 @@ import ( // Describes a quota for an AWS account. The following are account quotas: // -// * +// * // AllocatedStorage - The total allocated storage per account, in GiB. The used // value is the total allocated storage in the account, in GiB. // -// * +// * // AuthorizationsPerDBSecurityGroup - The number of ingress rules per DB security // group. The used value is the highest number of ingress rules in a DB security // group in the account. Other DB security groups in the account might have a lower // number of ingress rules. // -// * CustomEndpointsPerDBCluster - The number of -// custom endpoints per DB cluster. The used value is the highest number of custom +// * CustomEndpointsPerDBCluster - The number of custom +// endpoints per DB cluster. The used value is the highest number of custom // endpoints in a DB clusters in the account. Other DB clusters in the account // might have a lower number of custom endpoints. // -// * DBClusterParameterGroups - -// The number of DB cluster parameter groups per account, excluding default -// parameter groups. The used value is the count of nondefault DB cluster parameter -// groups in the account. +// * DBClusterParameterGroups - The +// number of DB cluster parameter groups per account, excluding default parameter +// groups. The used value is the count of nondefault DB cluster parameter groups in +// the account. // -// * DBClusterRoles - The number of associated AWS -// Identity and Access Management (IAM) roles per DB cluster. The used value is the -// highest number of associated IAM roles for a DB cluster in the account. Other DB +// * DBClusterRoles - The number of associated AWS Identity and +// Access Management (IAM) roles per DB cluster. The used value is the highest +// number of associated IAM roles for a DB cluster in the account. Other DB // clusters in the account might have a lower number of associated IAM roles. // +// * +// DBClusters - The number of DB clusters per account. The used value is the count +// of DB clusters in the account. // -// * DBClusters - The number of DB clusters per account. The used value is the -// count of DB clusters in the account. -// -// * DBInstanceRoles - The number of -// associated IAM roles per DB instance. The used value is the highest number of -// associated IAM roles for a DB instance in the account. Other DB instances in the -// account might have a lower number of associated IAM roles. +// * DBInstanceRoles - The number of associated IAM +// roles per DB instance. The used value is the highest number of associated IAM +// roles for a DB instance in the account. Other DB instances in the account might +// have a lower number of associated IAM roles. // -// * DBInstances - -// The number of DB instances per account. The used value is the count of the DB -// instances in the account. Amazon RDS DB instances, Amazon Aurora DB instances, -// Amazon Neptune instances, and Amazon DocumentDB instances apply to this quota. +// * DBInstances - The number of DB +// instances per account. The used value is the count of the DB instances in the +// account. Amazon RDS DB instances, Amazon Aurora DB instances, Amazon Neptune +// instances, and Amazon DocumentDB instances apply to this quota. // -// -// * DBParameterGroups - The number of DB parameter groups per account, excluding +// * +// DBParameterGroups - The number of DB parameter groups per account, excluding // default parameter groups. The used value is the count of nondefault DB parameter // groups in the account. // -// * DBSecurityGroups - The number of DB security -// groups (not VPC security groups) per account, excluding the default security -// group. The used value is the count of nondefault DB security groups in the -// account. -// -// * DBSubnetGroups - The number of DB subnet groups per account. The -// used value is the count of the DB subnet groups in the account. +// * DBSecurityGroups - The number of DB security groups +// (not VPC security groups) per account, excluding the default security group. The +// used value is the count of nondefault DB security groups in the account. // -// * -// EventSubscriptions - The number of event subscriptions per account. The used -// value is the count of the event subscriptions in the account. +// * +// DBSubnetGroups - The number of DB subnet groups per account. The used value is +// the count of the DB subnet groups in the account. // -// * -// ManualClusterSnapshots - The number of manual DB cluster snapshots per account. -// The used value is the count of the manual DB cluster snapshots in the account. +// * EventSubscriptions - The +// number of event subscriptions per account. The used value is the count of the +// event subscriptions in the account. // +// * ManualClusterSnapshots - The number of +// manual DB cluster snapshots per account. The used value is the count of the +// manual DB cluster snapshots in the account. // -// * ManualSnapshots - The number of manual DB instance snapshots per account. The -// used value is the count of the manual DB instance snapshots in the account. +// * ManualSnapshots - The number of +// manual DB instance snapshots per account. The used value is the count of the +// manual DB instance snapshots in the account. // +// * OptionGroups - The number of DB +// option groups per account, excluding default option groups. The used value is +// the count of nondefault DB option groups in the account. // -// * OptionGroups - The number of DB option groups per account, excluding default -// option groups. The used value is the count of nondefault DB option groups in the -// account. +// * +// ReadReplicasPerMaster - The number of read replicas per DB instance. The used +// value is the highest number of read replicas for a DB instance in the account. +// Other DB instances in the account might have a lower number of read replicas. // -// * ReadReplicasPerMaster - The number of read replicas per DB -// instance. The used value is the highest number of read replicas for a DB -// instance in the account. Other DB instances in the account might have a lower -// number of read replicas. +// * +// ReservedDBInstances - The number of reserved DB instances per account. The used +// value is the count of the active reserved DB instances in the account. // -// * ReservedDBInstances - The number of reserved DB -// instances per account. The used value is the count of the active reserved DB -// instances in the account. +// * +// SubnetsPerDBSubnetGroup - The number of subnets per DB subnet group. The used +// value is highest number of subnets for a DB subnet group in the account. Other +// DB subnet groups in the account might have a lower number of subnets. // -// * SubnetsPerDBSubnetGroup - The number of subnets -// per DB subnet group. The used value is highest number of subnets for a DB subnet -// group in the account. Other DB subnet groups in the account might have a lower -// number of subnets. -// -// For more information, see Quotas for Amazon RDS +// For more +// information, see Quotas for Amazon RDS // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html) in the // Amazon RDS User Guide and Quotas for Amazon Aurora // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html) @@ -552,18 +552,17 @@ type DBClusterBacktrack struct { // The status of the backtrack. This property returns one of the following // values: // - // * applying - The backtrack is currently being applied to or rolled - // back from the DB cluster. + // * applying - The backtrack is currently being applied to or rolled back + // from the DB cluster. // - // * completed - The backtrack has successfully been - // applied to or rolled back from the DB cluster. + // * completed - The backtrack has successfully been applied + // to or rolled back from the DB cluster. // - // * failed - An error occurred - // while the backtrack was applied to or rolled back from the DB cluster. + // * failed - An error occurred while the + // backtrack was applied to or rolled back from the DB cluster. // - // * - // pending - The backtrack is currently pending application to or rollback from the - // DB cluster. + // * pending - The + // backtrack is currently pending application to or rollback from the DB cluster. Status *string } @@ -571,17 +570,17 @@ type DBClusterBacktrack struct { // Aurora DB cluster. This data type is used as a response element in the following // actions: // -// * CreateDBClusterEndpoint +// * CreateDBClusterEndpoint // -// * DescribeDBClusterEndpoints +// * DescribeDBClusterEndpoints // -// * +// * // ModifyDBClusterEndpoint // -// * DeleteDBClusterEndpoint +// * DeleteDBClusterEndpoint // -// For the data structure -// that represents Amazon RDS DB instance endpoints, see Endpoint. +// For the data structure that +// represents Amazon RDS DB instance endpoints, see Endpoint. type DBClusterEndpoint struct { // The type associated with a custom endpoint. One of: READER, WRITER, ANY. @@ -688,16 +687,16 @@ type DBClusterRole struct { // Describes the state of association between the IAM role and the DB cluster. The // Status property returns one of the following values: // - // * ACTIVE - the IAM - // role ARN is associated with the DB cluster and can be used to access other AWS + // * ACTIVE - the IAM role + // ARN is associated with the DB cluster and can be used to access other AWS // services on your behalf. // - // * PENDING - the IAM role ARN is being associated - // with the DB cluster. + // * PENDING - the IAM role ARN is being associated with + // the DB cluster. // - // * INVALID - the IAM role ARN is associated with the DB - // cluster, but the DB cluster is unable to assume the IAM role in order to access - // other AWS services on your behalf. + // * INVALID - the IAM role ARN is associated with the DB cluster, + // but the DB cluster is unable to assume the IAM role in order to access other AWS + // services on your behalf. Status *string } @@ -854,7 +853,7 @@ type DBEngineVersion struct { // A list of features supported by the DB engine. Supported feature names include // the following. // - // * s3Import + // * s3Import SupportedFeatureNames []*string // A list of the character sets supported by the Oracle DB engine for the @@ -999,14 +998,13 @@ type DBInstance struct { // accounts is enabled, and otherwise false. IAM database authentication can be // enabled for the following database engines // - // * For MySQL 5.6, minor version + // * For MySQL 5.6, minor version // 5.6.34 or higher // - // * For MySQL 5.7, minor version 5.7.16 or higher + // * For MySQL 5.7, minor version 5.7.16 or higher // - // * - // Aurora 5.6 or higher. To enable IAM database authentication for Aurora, see - // DBCluster Type. + // * Aurora 5.6 + // or higher. To enable IAM database authentication for Aurora, see DBCluster Type. IAMDatabaseAuthenticationEnabled *bool // Provides the date and time the DB instance was created. @@ -1227,14 +1225,14 @@ type DBInstanceAutomatedBackup struct { // Provides a list of status information for an automated backup: // - // * active - + // * active - // automated backups for current instances // - // * retained - automated backups for + // * retained - automated backups for // deleted instances // - // * creating - automated backups that are waiting for the - // first automated snapshot to be available. + // * creating - automated backups that are waiting for the first + // automated snapshot to be available. Status *string // Specifies the storage type associated with the automated backup. @@ -1268,15 +1266,15 @@ type DBInstanceRole struct { // Describes the state of association between the IAM role and the DB instance. The // Status property returns one of the following values: // - // * ACTIVE - the IAM - // role ARN is associated with the DB instance and can be used to access other AWS + // * ACTIVE - the IAM role + // ARN is associated with the DB instance and can be used to access other AWS // services on your behalf. // - // * PENDING - the IAM role ARN is being associated - // with the DB instance. + // * PENDING - the IAM role ARN is being associated with + // the DB instance. // - // * INVALID - the IAM role ARN is associated with the - // DB instance, but the DB instance is unable to assume the IAM role in order to + // * INVALID - the IAM role ARN is associated with the DB + // instance, but the DB instance is unable to assume the IAM role in order to // access other AWS services on your behalf. Status *string } @@ -1322,19 +1320,19 @@ type DBParameterGroup struct { // The status of the DB parameter group. This data type is used as a response // element in the following actions: // -// * CreateDBInstance +// * CreateDBInstance // -// * +// * // CreateDBInstanceReadReplica // -// * DeleteDBInstance -// -// * ModifyDBInstance +// * DeleteDBInstance // +// * ModifyDBInstance // -// * RebootDBInstance +// * +// RebootDBInstance // -// * RestoreDBInstanceFromDBSnapshot +// * RestoreDBInstanceFromDBSnapshot type DBParameterGroupStatus struct { // The name of the DB parameter group. @@ -1505,15 +1503,15 @@ type DBSecurityGroup struct { // This data type is used as a response element in the following actions: // -// * +// * // ModifyDBInstance // -// * RebootDBInstance +// * RebootDBInstance // -// * -// RestoreDBInstanceFromDBSnapshot +// * RestoreDBInstanceFromDBSnapshot // -// * RestoreDBInstanceToPointInTime +// * +// RestoreDBInstanceToPointInTime type DBSecurityGroupMembership struct { // The name of the DB security group. @@ -1728,12 +1726,12 @@ type DoubleRange struct { // This data type is used as a response element in the following actions: // -// * +// * // AuthorizeDBSecurityGroupIngress // -// * DescribeDBSecurityGroups +// * DescribeDBSecurityGroups // -// * +// * // RevokeDBSecurityGroupIngress type EC2SecurityGroup struct { @@ -1756,15 +1754,15 @@ type EC2SecurityGroup struct { // DB instance. This data type is used as a response element in the following // actions: // -// * CreateDBInstance +// * CreateDBInstance // -// * DescribeDBInstances +// * DescribeDBInstances // -// * -// DeleteDBInstance +// * DeleteDBInstance // -// For the data structure that represents Amazon Aurora DB -// cluster endpoints, see DBClusterEndpoint. +// For the +// data structure that represents Amazon Aurora DB cluster endpoints, see +// DBClusterEndpoint. type Endpoint struct { // Specifies the DNS address of the DB instance. @@ -1874,20 +1872,20 @@ type ExportTask struct { // The data exported from the snapshot. Valid values are the following: // - // * - // database - Export all the data from a specified database. + // * database + // - Export all the data from a specified database. // - // * database.table - // table-name - Export a table of the snapshot. This format is valid only for RDS - // for MySQL, RDS for MariaDB, and Aurora MySQL. + // * database.table table-name - + // Export a table of the snapshot. This format is valid only for RDS for MySQL, RDS + // for MariaDB, and Aurora MySQL. // - // * database.schema schema-name - // - Export a database schema of the snapshot. This format is valid only for RDS - // for PostgreSQL and Aurora PostgreSQL. - // - // * database.schema.table table-name - - // Export a table of the database schema. This format is valid only for RDS for + // * database.schema schema-name - Export a + // database schema of the snapshot. This format is valid only for RDS for // PostgreSQL and Aurora PostgreSQL. + // + // * database.schema.table table-name - Export a + // table of the database schema. This format is valid only for RDS for PostgreSQL + // and Aurora PostgreSQL. ExportOnly []*string // A unique identifier for the snapshot export task. This ID isn't an identifier @@ -1946,18 +1944,17 @@ type ExportTask struct { // operation are documented with the describe operation. Currently, wildcards are // not supported in filters. The following actions can be filtered: // -// * +// * // DescribeDBClusterBacktracks // -// * DescribeDBClusterEndpoints +// * DescribeDBClusterEndpoints // -// * +// * // DescribeDBClusters // -// * DescribeDBInstances +// * DescribeDBInstances // -// * -// DescribePendingMaintenanceActions +// * DescribePendingMaintenanceActions type Filter struct { // The name of the filter. Filter names are case-sensitive. @@ -2587,17 +2584,17 @@ type PendingModifiedValues struct { // parameter. You can set the processor features of the DB instance class for a DB // instance when you call one of the following actions: // -// * CreateDBInstance +// * CreateDBInstance // +// * +// ModifyDBInstance // -// * ModifyDBInstance -// -// * RestoreDBInstanceFromDBSnapshot +// * RestoreDBInstanceFromDBSnapshot // -// * +// * // RestoreDBInstanceFromS3 // -// * RestoreDBInstanceToPointInTime +// * RestoreDBInstanceToPointInTime // // You can view the // valid processor values for a particular instance class by calling the @@ -2605,24 +2602,24 @@ type PendingModifiedValues struct { // the DBInstanceClass parameter. In addition, you can use the following actions // for DB instance class processor information: // -// * DescribeDBInstances +// * DescribeDBInstances // -// * +// * // DescribeDBSnapshots // -// * DescribeValidDBInstanceModifications +// * DescribeValidDBInstanceModifications // // If you call // DescribeDBInstances, ProcessorFeature returns non-null values only if the // following conditions are met: // -// * You are accessing an Oracle DB instance. -// +// * You are accessing an Oracle DB instance. // -// * Your Oracle DB instance class supports configuring the number of CPU cores and +// * +// Your Oracle DB instance class supports configuring the number of CPU cores and // threads per core. // -// * The current number CPU cores and threads is set to a +// * The current number CPU cores and threads is set to a // non-default value. // // For more information, see Configuring the Processor of the diff --git a/service/rdsdata/api_op_BatchExecuteStatement.go b/service/rdsdata/api_op_BatchExecuteStatement.go index ef45a3bf46b..7c84dd30f07 100644 --- a/service/rdsdata/api_op_BatchExecuteStatement.go +++ b/service/rdsdata/api_op_BatchExecuteStatement.go @@ -58,10 +58,10 @@ type BatchExecuteStatementInput struct { // times as the number of parameter sets provided. To execute a SQL statement with // no parameters, use one of the following options: // - // * Specify one or more - // empty parameter sets. + // * Specify one or more empty + // parameter sets. // - // * Use the ExecuteStatement operation instead of the + // * Use the ExecuteStatement operation instead of the // BatchExecuteStatement operation. // // Array parameters are not supported. diff --git a/service/rdsdata/types/enums.go b/service/rdsdata/types/enums.go index 21be599d8db..805576639e0 100644 --- a/service/rdsdata/types/enums.go +++ b/service/rdsdata/types/enums.go @@ -6,8 +6,8 @@ type DecimalReturnType string // Enum values for DecimalReturnType const ( - DecimalReturnTypeString DecimalReturnType = "STRING" - DecimalReturnTypeDouble_or_long DecimalReturnType = "DOUBLE_OR_LONG" + DecimalReturnTypeString DecimalReturnType = "STRING" + DecimalReturnTypeDoubleOrLong DecimalReturnType = "DOUBLE_OR_LONG" ) // Values returns all known values for DecimalReturnType. Note that this can be diff --git a/service/rdsdata/types/types.go b/service/rdsdata/types/types.go index 68bb2d6b906..76cd7d5e432 100644 --- a/service/rdsdata/types/types.go +++ b/service/rdsdata/types/types.go @@ -189,21 +189,21 @@ type SqlParameter struct { // A hint that specifies the correct object type for data type mapping. Values: // - // - // * DECIMAL - The corresponding String parameter value is sent as an object of + // * + // DECIMAL - The corresponding String parameter value is sent as an object of // DECIMAL type to the database. // - // * TIMESTAMP - The corresponding String - // parameter value is sent as an object of TIMESTAMP type to the database. The - // accepted format is YYYY-MM-DD HH:MM:SS[.FFF]. + // * TIMESTAMP - The corresponding String parameter + // value is sent as an object of TIMESTAMP type to the database. The accepted + // format is YYYY-MM-DD HH:MM:SS[.FFF]. // - // * TIME - The corresponding - // String parameter value is sent as an object of TIME type to the database. The - // accepted format is HH:MM:SS[.FFF]. + // * TIME - The corresponding String + // parameter value is sent as an object of TIME type to the database. The accepted + // format is HH:MM:SS[.FFF]. // - // * DATE - The corresponding String - // parameter value is sent as an object of DATE type to the database. The accepted - // format is YYYY-MM-DD. + // * DATE - The corresponding String parameter value is + // sent as an object of DATE type to the database. The accepted format is + // YYYY-MM-DD. TypeHint TypeHint // The value of the parameter. diff --git a/service/redshift/api_op_CopyClusterSnapshot.go b/service/redshift/api_op_CopyClusterSnapshot.go index 547846dbb77..573ba251a2e 100644 --- a/service/redshift/api_op_CopyClusterSnapshot.go +++ b/service/redshift/api_op_CopyClusterSnapshot.go @@ -41,27 +41,27 @@ type CopyClusterSnapshotInput struct { // The identifier for the source snapshot. Constraints: // - // * Must be the - // identifier for a valid automated snapshot whose state is available. + // * Must be the identifier + // for a valid automated snapshot whose state is available. // // This member is required. SourceSnapshotIdentifier *string // The identifier given to the new manual snapshot. Constraints: // - // * Cannot be - // null, empty, or blank. + // * Cannot be null, + // empty, or blank. // - // * Must contain from 1 to 255 alphanumeric characters - // or hyphens. + // * Must contain from 1 to 255 alphanumeric characters or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot end with a - // hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // - // * Must be unique for the AWS - // account that is making the request. + // * Must be unique for the AWS account that is + // making the request. // // This member is required. TargetSnapshotIdentifier *string @@ -76,7 +76,7 @@ type CopyClusterSnapshotInput struct { // resource element that specifies anything other than * for the cluster name. // Constraints: // - // * Must be the identifier for a valid cluster. + // * Must be the identifier for a valid cluster. SourceSnapshotClusterIdentifier *string } diff --git a/service/redshift/api_op_CreateCluster.go b/service/redshift/api_op_CreateCluster.go index 117cc1c20dc..9f75edc67e2 100644 --- a/service/redshift/api_op_CreateCluster.go +++ b/service/redshift/api_op_CreateCluster.go @@ -40,19 +40,19 @@ type CreateClusterInput struct { // cluster for any subsequent cluster operations such as deleting or modifying. The // identifier also appears in the Amazon Redshift console. Constraints: // - // * Must + // * Must // contain from 1 to 63 alphanumeric characters or hyphens. // - // * Alphabetic + // * Alphabetic // characters must be lowercase. // - // * First character must be a letter. + // * First character must be a letter. // - // * - // Cannot end with a hyphen or contain two consecutive hyphens. + // * Cannot end + // with a hyphen or contain two consecutive hyphens. // - // * Must be - // unique for all clusters within an AWS account. + // * Must be unique for all + // clusters within an AWS account. // // Example: myexamplecluster // @@ -62,19 +62,19 @@ type CreateClusterInput struct { // The password associated with the master user account for the cluster that is // being created. Constraints: // - // * Must be between 8 and 64 characters in - // length. + // * Must be between 8 and 64 characters in length. // - // * Must contain at least one uppercase letter. + // * + // Must contain at least one uppercase letter. // - // * Must contain - // at least one lowercase letter. + // * Must contain at least one + // lowercase letter. // - // * Must contain one number. + // * Must contain one number. // - // * Can be any - // printable ASCII character (ASCII code 33 to 126) except ' (single quote), " - // (double quote), \, /, @, or space. + // * Can be any printable ASCII + // character (ASCII code 33 to 126) except ' (single quote), " (double quote), \, + // /, @, or space. // // This member is required. MasterUserPassword *string @@ -82,14 +82,14 @@ type CreateClusterInput struct { // The user name associated with the master user account for the cluster that is // being created. Constraints: // - // * Must be 1 - 128 alphanumeric characters. The - // user name can't be PUBLIC. + // * Must be 1 - 128 alphanumeric characters. The user + // name can't be PUBLIC. // - // * First character must be a letter. + // * First character must be a letter. // - // * - // Cannot be a reserved word. A list of reserved words can be found in Reserved - // Words (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the + // * Cannot be a + // reserved word. A list of reserved words can be found in Reserved Words + // (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the // Amazon Redshift Database Developer Guide. // // This member is required. @@ -136,12 +136,12 @@ type CreateClusterInput struct { // (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html) // Constraints: // - // * Must be 1 to 255 alphanumeric characters or hyphens. + // * Must be 1 to 255 alphanumeric characters or hyphens. // - // * - // First character must be a letter. + // * First + // character must be a letter. // - // * Cannot end with a hyphen or contain two + // * Cannot end with a hyphen or contain two // consecutive hyphens. ClusterParameterGroupName *string @@ -156,14 +156,14 @@ type CreateClusterInput struct { // The type of the cluster. When cluster type is specified as // - // * single-node, - // the NumberOfNodes parameter is not required. + // * single-node, the + // NumberOfNodes parameter is not required. // - // * multi-node, the - // NumberOfNodes parameter is required. + // * multi-node, the NumberOfNodes + // parameter is required. // - // Valid Values: multi-node | single-node - // Default: multi-node + // Valid Values: multi-node | single-node Default: + // multi-node ClusterType *string // The version of the Amazon Redshift engine software that you want to deploy on @@ -178,14 +178,14 @@ type CreateClusterInput struct { // (https://docs.aws.amazon.com/redshift/latest/dg/t_creating_database.html) in the // Amazon Redshift Database Developer Guide. Default: dev Constraints: // - // * Must + // * Must // contain 1 to 64 alphanumeric characters. // - // * Must contain only lowercase + // * Must contain only lowercase // letters. // - // * Cannot be a word that is reserved by the service. A list of - // reserved words can be found in Reserved Words + // * Cannot be a word that is reserved by the service. A list of reserved + // words can be found in Reserved Words // (https://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the // Amazon Redshift Database Developer Guide. DBName *string diff --git a/service/redshift/api_op_CreateClusterParameterGroup.go b/service/redshift/api_op_CreateClusterParameterGroup.go index a7f6ee8bb8d..7bc86a49004 100644 --- a/service/redshift/api_op_CreateClusterParameterGroup.go +++ b/service/redshift/api_op_CreateClusterParameterGroup.go @@ -57,16 +57,16 @@ type CreateClusterParameterGroupInput struct { // The name of the cluster parameter group. Constraints: // - // * Must be 1 to 255 + // * Must be 1 to 255 // alphanumeric characters or hyphens // - // * First character must be a letter. + // * First character must be a letter. // + // * + // Cannot end with a hyphen or contain two consecutive hyphens. // - // * Cannot end with a hyphen or contain two consecutive hyphens. - // - // * Must be - // unique withing your AWS account. + // * Must be unique + // withing your AWS account. // // This value is stored as a lower-case string. // diff --git a/service/redshift/api_op_CreateClusterSecurityGroup.go b/service/redshift/api_op_CreateClusterSecurityGroup.go index cf35619d85a..80ccac97d74 100644 --- a/service/redshift/api_op_CreateClusterSecurityGroup.go +++ b/service/redshift/api_op_CreateClusterSecurityGroup.go @@ -37,16 +37,15 @@ type CreateClusterSecurityGroupInput struct { // The name for the security group. Amazon Redshift stores the value as a lowercase // string. Constraints: // - // * Must contain no more than 255 alphanumeric - // characters or hyphens. + // * Must contain no more than 255 alphanumeric characters or + // hyphens. // - // * Must not be "Default". + // * Must not be "Default". // - // * Must be unique for - // all security groups that are created by your AWS account. + // * Must be unique for all security groups + // that are created by your AWS account. // - // Example: - // examplesecuritygroup + // Example: examplesecuritygroup // // This member is required. ClusterSecurityGroupName *string diff --git a/service/redshift/api_op_CreateClusterSnapshot.go b/service/redshift/api_op_CreateClusterSnapshot.go index 66db7a134ff..573027ac17b 100644 --- a/service/redshift/api_op_CreateClusterSnapshot.go +++ b/service/redshift/api_op_CreateClusterSnapshot.go @@ -42,16 +42,16 @@ type CreateClusterSnapshotInput struct { // A unique identifier for the snapshot that you are requesting. This identifier // must be unique for all snapshots within the AWS account. Constraints: // - // * - // Cannot be null, empty, or blank + // * Cannot + // be null, empty, or blank // - // * Must contain from 1 to 255 alphanumeric - // characters or hyphens + // * Must contain from 1 to 255 alphanumeric characters + // or hyphens // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end - // with a hyphen or contain two consecutive hyphens + // * Cannot end with a hyphen or + // contain two consecutive hyphens // // Example: my-snapshot-id // diff --git a/service/redshift/api_op_CreateClusterSubnetGroup.go b/service/redshift/api_op_CreateClusterSubnetGroup.go index 5855e945eaf..c46a486fd34 100644 --- a/service/redshift/api_op_CreateClusterSubnetGroup.go +++ b/service/redshift/api_op_CreateClusterSubnetGroup.go @@ -38,16 +38,15 @@ type CreateClusterSubnetGroupInput struct { // The name for the subnet group. Amazon Redshift stores the value as a lowercase // string. Constraints: // - // * Must contain no more than 255 alphanumeric - // characters or hyphens. + // * Must contain no more than 255 alphanumeric characters or + // hyphens. // - // * Must not be "Default". + // * Must not be "Default". // - // * Must be unique for - // all subnet groups that are created by your AWS account. + // * Must be unique for all subnet groups that + // are created by your AWS account. // - // Example: - // examplesubnetgroup + // Example: examplesubnetgroup // // This member is required. ClusterSubnetGroupName *string diff --git a/service/redshift/api_op_CreateEventSubscription.go b/service/redshift/api_op_CreateEventSubscription.go index adf1427d78a..f4955dffc75 100644 --- a/service/redshift/api_op_CreateEventSubscription.go +++ b/service/redshift/api_op_CreateEventSubscription.go @@ -57,16 +57,16 @@ type CreateEventSubscriptionInput struct { // The name of the event subscription to be created. Constraints: // - // * Cannot be + // * Cannot be // null, empty, or blank. // - // * Must contain from 1 to 255 alphanumeric characters - // or hyphens. + // * Must contain from 1 to 255 alphanumeric characters or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot end with a - // hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // // This member is required. SubscriptionName *string diff --git a/service/redshift/api_op_CreateSnapshotCopyGrant.go b/service/redshift/api_op_CreateSnapshotCopyGrant.go index f02e7501df4..a95f706ce50 100644 --- a/service/redshift/api_op_CreateSnapshotCopyGrant.go +++ b/service/redshift/api_op_CreateSnapshotCopyGrant.go @@ -38,19 +38,18 @@ type CreateSnapshotCopyGrantInput struct { // The name of the snapshot copy grant. This name must be unique in the region for // the AWS account. Constraints: // - // * Must contain from 1 to 63 alphanumeric + // * Must contain from 1 to 63 alphanumeric // characters or hyphens. // - // * Alphabetic characters must be lowercase. + // * Alphabetic characters must be lowercase. // - // * - // First character must be a letter. + // * First + // character must be a letter. // - // * Cannot end with a hyphen or contain two + // * Cannot end with a hyphen or contain two // consecutive hyphens. // - // * Must be unique for all clusters within an AWS - // account. + // * Must be unique for all clusters within an AWS account. // // This member is required. SnapshotCopyGrantName *string diff --git a/service/redshift/api_op_CreateUsageLimit.go b/service/redshift/api_op_CreateUsageLimit.go index 9c50af6fdc4..e2fa0ceabf0 100644 --- a/service/redshift/api_op_CreateUsageLimit.go +++ b/service/redshift/api_op_CreateUsageLimit.go @@ -76,13 +76,13 @@ type CreateUsageLimitOutput struct { // The action that Amazon Redshift takes when the limit is reached. Possible values // are: // - // * log - To log an event in a system table. The default is log. + // * log - To log an event in a system table. The default is log. // - // * + // * // emit-metric - To emit CloudWatch metrics. // - // * disable - To disable the - // feature until the next usage period begins. + // * disable - To disable the feature + // until the next usage period begins. BreachAction types.UsageLimitBreachAction // The identifier of the cluster with a usage limit. diff --git a/service/redshift/api_op_DeleteCluster.go b/service/redshift/api_op_DeleteCluster.go index ee66eb7c308..f11802681cb 100644 --- a/service/redshift/api_op_DeleteCluster.go +++ b/service/redshift/api_op_DeleteCluster.go @@ -46,16 +46,16 @@ type DeleteClusterInput struct { // The identifier of the cluster to be deleted. Constraints: // - // * Must contain + // * Must contain // lowercase characters. // - // * Must contain from 1 to 63 alphanumeric characters - // or hyphens. + // * Must contain from 1 to 63 alphanumeric characters or + // hyphens. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot end with a - // hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // // This member is required. ClusterIdentifier *string @@ -64,13 +64,13 @@ type DeleteClusterInput struct { // deleting the cluster. If this parameter is provided, SkipFinalClusterSnapshot // must be false. Constraints: // - // * Must be 1 to 255 alphanumeric characters. - // + // * Must be 1 to 255 alphanumeric characters. // - // * First character must be a letter. + // * + // First character must be a letter. // - // * Cannot end with a hyphen or contain - // two consecutive hyphens. + // * Cannot end with a hyphen or contain two + // consecutive hyphens. FinalClusterSnapshotIdentifier *string // The number of days that a manual snapshot is retained. If the value is -1, the diff --git a/service/redshift/api_op_DeleteClusterParameterGroup.go b/service/redshift/api_op_DeleteClusterParameterGroup.go index deb4973c813..422626218a5 100644 --- a/service/redshift/api_op_DeleteClusterParameterGroup.go +++ b/service/redshift/api_op_DeleteClusterParameterGroup.go @@ -32,11 +32,11 @@ type DeleteClusterParameterGroupInput struct { // The name of the parameter group to be deleted. Constraints: // - // * Must be the - // name of an existing cluster parameter group. + // * Must be the name + // of an existing cluster parameter group. // - // * Cannot delete a default - // cluster parameter group. + // * Cannot delete a default cluster + // parameter group. // // This member is required. ParameterGroupName *string diff --git a/service/redshift/api_op_DescribeClusterSnapshots.go b/service/redshift/api_op_DescribeClusterSnapshots.go index 5bb222e3966..c40014f5d7e 100644 --- a/service/redshift/api_op_DescribeClusterSnapshots.go +++ b/service/redshift/api_op_DescribeClusterSnapshots.go @@ -47,19 +47,19 @@ type DescribeClusterSnapshotsInput struct { // cluster, that is, a cluster that has not been deleted. Values for this parameter // work as follows: // - // * If ClusterExists is set to true, ClusterIdentifier is + // * If ClusterExists is set to true, ClusterIdentifier is // required. // - // * If ClusterExists is set to false and ClusterIdentifier isn't + // * If ClusterExists is set to false and ClusterIdentifier isn't // specified, all snapshots associated with deleted clusters (orphaned snapshots) // are returned. // - // * If ClusterExists is set to false and ClusterIdentifier is + // * If ClusterExists is set to false and ClusterIdentifier is // specified for a deleted cluster, snapshots associated with that cluster are // returned. // - // * If ClusterExists is set to false and ClusterIdentifier is - // specified for an existing cluster, no snapshots are returned. + // * If ClusterExists is set to false and ClusterIdentifier is specified + // for an existing cluster, no snapshots are returned. ClusterExists *bool // The identifier of the cluster which generated the requested snapshots. diff --git a/service/redshift/api_op_DescribeClusterVersions.go b/service/redshift/api_op_DescribeClusterVersions.go index ebcc2af6b47..f894e83eebb 100644 --- a/service/redshift/api_op_DescribeClusterVersions.go +++ b/service/redshift/api_op_DescribeClusterVersions.go @@ -38,13 +38,12 @@ type DescribeClusterVersionsInput struct { // The name of a specific cluster parameter group family to return details for. // Constraints: // - // * Must be 1 to 255 alphanumeric characters + // * Must be 1 to 255 alphanumeric characters // - // * First - // character must be a letter + // * First character must + // be a letter // - // * Cannot end with a hyphen or contain two - // consecutive hyphens + // * Cannot end with a hyphen or contain two consecutive hyphens ClusterParameterGroupFamily *string // The specific cluster version to return. Example: 1.0 diff --git a/service/redshift/api_op_DescribeEvents.go b/service/redshift/api_op_DescribeEvents.go index 2d4f7181760..2325cb581b1 100644 --- a/service/redshift/api_op_DescribeEvents.go +++ b/service/redshift/api_op_DescribeEvents.go @@ -64,34 +64,34 @@ type DescribeEventsInput struct { // Constraints: If SourceIdentifier is supplied, SourceType must also be // provided. // - // * Specify a cluster identifier when SourceType is cluster. + // * Specify a cluster identifier when SourceType is cluster. // - // * - // Specify a cluster security group name when SourceType is - // cluster-security-group. + // * Specify + // a cluster security group name when SourceType is cluster-security-group. // - // * Specify a cluster parameter group name when - // SourceType is cluster-parameter-group. + // * + // Specify a cluster parameter group name when SourceType is + // cluster-parameter-group. // - // * Specify a cluster snapshot - // identifier when SourceType is cluster-snapshot. + // * Specify a cluster snapshot identifier when + // SourceType is cluster-snapshot. SourceIdentifier *string // The event source to retrieve events for. If no value is specified, all events // are returned. Constraints: If SourceType is supplied, SourceIdentifier must also // be provided. // - // * Specify cluster when SourceIdentifier is a cluster + // * Specify cluster when SourceIdentifier is a cluster // identifier. // - // * Specify cluster-security-group when SourceIdentifier is a - // cluster security group name. + // * Specify cluster-security-group when SourceIdentifier is a cluster + // security group name. // - // * Specify cluster-parameter-group when - // SourceIdentifier is a cluster parameter group name. + // * Specify cluster-parameter-group when SourceIdentifier is + // a cluster parameter group name. // - // * Specify - // cluster-snapshot when SourceIdentifier is a cluster snapshot identifier. + // * Specify cluster-snapshot when + // SourceIdentifier is a cluster snapshot identifier. SourceType types.SourceType // The beginning of the time interval to retrieve events for, specified in ISO 8601 diff --git a/service/redshift/api_op_DescribeTags.go b/service/redshift/api_op_DescribeTags.go index 57ab285b757..e17b299cdf0 100644 --- a/service/redshift/api_op_DescribeTags.go +++ b/service/redshift/api_op_DescribeTags.go @@ -16,23 +16,22 @@ import ( // as clusters, snapshots, and so on. The following are limitations for // DescribeTags: // -// * You cannot specify an ARN and a resource-type value -// together in the same request. +// * You cannot specify an ARN and a resource-type value together in +// the same request. // -// * You cannot use the MaxRecords and Marker -// parameters together with the ARN parameter. +// * You cannot use the MaxRecords and Marker parameters +// together with the ARN parameter. // -// * The MaxRecords parameter can -// be a range from 10 to 50 results to return in a request. +// * The MaxRecords parameter can be a range from +// 10 to 50 results to return in a request. // -// If you specify both -// tag keys and tag values in the same request, Amazon Redshift returns all -// resources that match any combination of the specified keys and values. For -// example, if you have owner and environment for tag keys, and admin and test for -// tag values, all resources that have any combination of those values are -// returned. If both tag keys and values are omitted from the request, resources -// are returned regardless of whether they have tag keys or values associated with -// them. +// If you specify both tag keys and tag +// values in the same request, Amazon Redshift returns all resources that match any +// combination of the specified keys and values. For example, if you have owner and +// environment for tag keys, and admin and test for tag values, all resources that +// have any combination of those values are returned. If both tag keys and values +// are omitted from the request, resources are returned regardless of whether they +// have tag keys or values associated with them. func (c *Client) DescribeTags(ctx context.Context, params *DescribeTagsInput, optFns ...func(*Options)) (*DescribeTagsOutput, error) { if params == nil { params = &DescribeTagsInput{} @@ -71,31 +70,31 @@ type DescribeTagsInput struct { // The type of resource with which you want to view tags. Valid resource types // are: // - // * Cluster + // * Cluster // - // * CIDR/IP + // * CIDR/IP // - // * EC2 security group + // * EC2 security group // - // * Snapshot + // * Snapshot // + // * Cluster security + // group // - // * Cluster security group + // * Subnet group // - // * Subnet group + // * HSM connection // - // * HSM connection + // * HSM certificate // - // * HSM - // certificate + // * Parameter group // - // * Parameter group + // * + // Snapshot copy grant // - // * Snapshot copy grant - // - // For more - // information about Amazon Redshift resource types and constructing ARNs, go to - // Specifying Policy Elements: Actions, Effects, Resources, and Principals + // For more information about Amazon Redshift resource types + // and constructing ARNs, go to Specifying Policy Elements: Actions, Effects, + // Resources, and Principals // (https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-overview.html#redshift-iam-access-control-specify-actions) // in the Amazon Redshift Cluster Management Guide. ResourceType *string diff --git a/service/redshift/api_op_DescribeUsageLimits.go b/service/redshift/api_op_DescribeUsageLimits.go index c9919da3547..abcab9fe031 100644 --- a/service/redshift/api_op_DescribeUsageLimits.go +++ b/service/redshift/api_op_DescribeUsageLimits.go @@ -15,19 +15,19 @@ import ( // of input usage limit identifier, cluster identifier, and feature type // parameters: // -// * If usage limit identifier, cluster identifier, and feature -// type are not provided, then all usage limit objects for the current account in -// the current region are returned. +// * If usage limit identifier, cluster identifier, and feature type +// are not provided, then all usage limit objects for the current account in the +// current region are returned. // -// * If usage limit identifier is provided, -// then the corresponding usage limit object is returned. +// * If usage limit identifier is provided, then the +// corresponding usage limit object is returned. // -// * If cluster -// identifier is provided, then all usage limit objects for the specified cluster -// are returned. +// * If cluster identifier is +// provided, then all usage limit objects for the specified cluster are +// returned. // -// * If cluster identifier and feature type are provided, then -// all usage limit objects for the combination of cluster and feature are returned. +// * If cluster identifier and feature type are provided, then all usage +// limit objects for the combination of cluster and feature are returned. func (c *Client) DescribeUsageLimits(ctx context.Context, params *DescribeUsageLimitsInput, optFns ...func(*Options)) (*DescribeUsageLimitsOutput, error) { if params == nil { params = &DescribeUsageLimitsInput{} diff --git a/service/redshift/api_op_EnableLogging.go b/service/redshift/api_op_EnableLogging.go index 285bd4929e2..8eebd895737 100644 --- a/service/redshift/api_op_EnableLogging.go +++ b/service/redshift/api_op_EnableLogging.go @@ -34,10 +34,10 @@ type EnableLoggingInput struct { // The name of an existing S3 bucket where the log files are to be stored. // Constraints: // - // * Must be in the same region as the cluster + // * Must be in the same region as the cluster // - // * The cluster - // must have read bucket and put object permissions + // * The cluster must + // have read bucket and put object permissions // // This member is required. BucketName *string @@ -50,23 +50,22 @@ type EnableLoggingInput struct { // The prefix applied to the log file names. Constraints: // - // * Cannot exceed 512 + // * Cannot exceed 512 // characters // - // * Cannot contain spaces( ), double quotes ("), single quotes - // ('), a backslash (\), or control characters. The hexadecimal codes for invalid + // * Cannot contain spaces( ), double quotes ("), single quotes ('), a + // backslash (\), or control characters. The hexadecimal codes for invalid // characters are: // - // * x00 to x20 + // * x00 to x20 // - // * x22 + // * x22 // - // * x27 + // * x27 // - // * - // x5c + // * x5c // - // * x7f or larger + // * x7f or larger S3KeyPrefix *string } diff --git a/service/redshift/api_op_GetClusterCredentials.go b/service/redshift/api_op_GetClusterCredentials.go index 9f6809ec379..69afe0f0942 100644 --- a/service/redshift/api_op_GetClusterCredentials.go +++ b/service/redshift/api_op_GetClusterCredentials.go @@ -66,20 +66,20 @@ type GetClusterCredentialsInput struct { // (https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html) in the // Amazon Redshift Database Developer Guide. Constraints: // - // * Must be 1 to 64 + // * Must be 1 to 64 // alphanumeric characters or hyphens. The user name can't be PUBLIC. // - // * Must + // * Must // contain only lowercase letters, numbers, underscore, plus sign, period (dot), at // symbol (@), or hyphen. // - // * First character must be a letter. + // * First character must be a letter. // - // * Must not - // contain a colon ( : ) or slash ( / ). + // * Must not contain + // a colon ( : ) or slash ( / ). // - // * Cannot be a reserved word. A list - // of reserved words can be found in Reserved Words + // * Cannot be a reserved word. A list of reserved + // words can be found in Reserved Words // (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the Amazon // Redshift Database Developer Guide. // @@ -95,19 +95,19 @@ type GetClusterCredentialsInput struct { // existing user. If not specified, a new user is added only to PUBLIC. Database // group name constraints // - // * Must be 1 to 64 alphanumeric characters or - // hyphens + // * Must be 1 to 64 alphanumeric characters or hyphens // - // * Must contain only lowercase letters, numbers, underscore, plus - // sign, period (dot), at symbol (@), or hyphen. + // * + // Must contain only lowercase letters, numbers, underscore, plus sign, period + // (dot), at symbol (@), or hyphen. // - // * First character must be a - // letter. + // * First character must be a letter. // - // * Must not contain a colon ( : ) or slash ( / ). + // * Must + // not contain a colon ( : ) or slash ( / ). // - // * Cannot be a - // reserved word. A list of reserved words can be found in Reserved Words + // * Cannot be a reserved word. A list + // of reserved words can be found in Reserved Words // (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the Amazon // Redshift Database Developer Guide. DbGroups []*string @@ -115,22 +115,21 @@ type GetClusterCredentialsInput struct { // The name of a database that DbUser is authorized to log on to. If DbName is not // specified, DbUser can log on to any existing database. Constraints: // - // * Must - // be 1 to 64 alphanumeric characters or hyphens + // * Must be 1 + // to 64 alphanumeric characters or hyphens // - // * Must contain only lowercase - // letters, numbers, underscore, plus sign, period (dot), at symbol (@), or - // hyphen. + // * Must contain only lowercase letters, + // numbers, underscore, plus sign, period (dot), at symbol (@), or hyphen. // - // * First character must be a letter. + // * First + // character must be a letter. // - // * Must not contain a colon - // ( : ) or slash ( / ). + // * Must not contain a colon ( : ) or slash ( / ). // - // * Cannot be a reserved word. A list of reserved words - // can be found in Reserved Words - // (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the Amazon - // Redshift Database Developer Guide. + // * + // Cannot be a reserved word. A list of reserved words can be found in Reserved + // Words (http://docs.aws.amazon.com/redshift/latest/dg/r_pg_keywords.html) in the + // Amazon Redshift Database Developer Guide. DbName *string // The number of seconds until the returned temporary password expires. Constraint: diff --git a/service/redshift/api_op_ModifyCluster.go b/service/redshift/api_op_ModifyCluster.go index cf5edfe0d7d..92febf9a80b 100644 --- a/service/redshift/api_op_ModifyCluster.go +++ b/service/redshift/api_op_ModifyCluster.go @@ -69,13 +69,13 @@ type ModifyClusterInput struct { // associated with the cluster, and not in the list of groups to apply, will be // revoked from the cluster. Constraints: // - // * Must be 1 to 255 alphanumeric + // * Must be 1 to 255 alphanumeric // characters or hyphens // - // * First character must be a letter + // * First character must be a letter // - // * Cannot end - // with a hyphen or contain two consecutive hyphens + // * Cannot end with a + // hyphen or contain two consecutive hyphens ClusterSecurityGroups []*string // The new cluster type. When you submit your cluster resize request, your existing @@ -153,36 +153,36 @@ type ModifyClusterInput struct { // user account for a cluster if the password is lost. Default: Uses existing // setting. Constraints: // - // * Must be between 8 and 64 characters in length. + // * Must be between 8 and 64 characters in length. // + // * Must + // contain at least one uppercase letter. // - // * Must contain at least one uppercase letter. + // * Must contain at least one lowercase + // letter. // - // * Must contain at least one - // lowercase letter. + // * Must contain one number. // - // * Must contain one number. - // - // * Can be any printable - // ASCII character (ASCII code 33 to 126) except ' (single quote), " (double - // quote), \, /, @, or space. + // * Can be any printable ASCII character + // (ASCII code 33 to 126) except ' (single quote), " (double quote), \, /, @, or + // space. MasterUserPassword *string // The new identifier for the cluster. Constraints: // - // * Must contain from 1 to - // 63 alphanumeric characters or hyphens. + // * Must contain from 1 to 63 + // alphanumeric characters or hyphens. // - // * Alphabetic characters must be + // * Alphabetic characters must be // lowercase. // - // * First character must be a letter. + // * First character must be a letter. // - // * Cannot end with a - // hyphen or contain two consecutive hyphens. + // * Cannot end with a hyphen or + // contain two consecutive hyphens. // - // * Must be unique for all - // clusters within an AWS account. + // * Must be unique for all clusters within an + // AWS account. // // Example: examplecluster NewClusterIdentifier *string diff --git a/service/redshift/api_op_ModifyUsageLimit.go b/service/redshift/api_op_ModifyUsageLimit.go index c720139390f..bbdaafe4228 100644 --- a/service/redshift/api_op_ModifyUsageLimit.go +++ b/service/redshift/api_op_ModifyUsageLimit.go @@ -53,13 +53,13 @@ type ModifyUsageLimitOutput struct { // The action that Amazon Redshift takes when the limit is reached. Possible values // are: // - // * log - To log an event in a system table. The default is log. + // * log - To log an event in a system table. The default is log. // - // * + // * // emit-metric - To emit CloudWatch metrics. // - // * disable - To disable the - // feature until the next usage period begins. + // * disable - To disable the feature + // until the next usage period begins. BreachAction types.UsageLimitBreachAction // The identifier of the cluster with a usage limit. diff --git a/service/redshift/api_op_ResizeCluster.go b/service/redshift/api_op_ResizeCluster.go index 1efbe0bdb88..7772305aa00 100644 --- a/service/redshift/api_op_ResizeCluster.go +++ b/service/redshift/api_op_ResizeCluster.go @@ -17,30 +17,29 @@ import ( // operations more quickly than with the classic resize method. Elastic resize // operations have the following restrictions: // -// * You can only resize clusters -// of the following types: +// * You can only resize clusters of +// the following types: // -// * dc1.large (if your cluster is in a VPC) +// * dc1.large (if your cluster is in a VPC) // +// * dc1.8xlarge +// (if your cluster is in a VPC) // -// * dc1.8xlarge (if your cluster is in a VPC) +// * dc2.large // -// * dc2.large +// * dc2.8xlarge // -// * -// dc2.8xlarge +// * ds2.xlarge // -// * ds2.xlarge +// * +// ds2.8xlarge // -// * ds2.8xlarge +// * ra3.4xlarge // -// * -// ra3.4xlarge +// * ra3.16xlarge // -// * ra3.16xlarge -// -// * The type of nodes that you add must -// match the node type for the cluster. +// * The type of nodes that you add +// must match the node type for the cluster. func (c *Client) ResizeCluster(ctx context.Context, params *ResizeClusterInput, optFns ...func(*Options)) (*ResizeClusterOutput, error) { if params == nil { params = &ResizeClusterInput{} diff --git a/service/redshift/api_op_RestoreFromClusterSnapshot.go b/service/redshift/api_op_RestoreFromClusterSnapshot.go index e2d9b74ecc4..b9a8fe667c8 100644 --- a/service/redshift/api_op_RestoreFromClusterSnapshot.go +++ b/service/redshift/api_op_RestoreFromClusterSnapshot.go @@ -44,18 +44,18 @@ type RestoreFromClusterSnapshotInput struct { // The identifier of the cluster that will be created from restoring the snapshot. // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or - // hyphens. + // * Must contain from 1 to 63 alphanumeric characters or hyphens. // - // * Alphabetic characters must be lowercase. + // * + // Alphabetic characters must be lowercase. // - // * First character - // must be a letter. + // * First character must be a letter. // - // * Cannot end with a hyphen or contain two consecutive - // hyphens. + // * + // Cannot end with a hyphen or contain two consecutive hyphens. // - // * Must be unique for all clusters within an AWS account. + // * Must be unique + // for all clusters within an AWS account. // // This member is required. ClusterIdentifier *string @@ -90,12 +90,12 @@ type RestoreFromClusterSnapshotInput struct { // (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html). // Constraints: // - // * Must be 1 to 255 alphanumeric characters or hyphens. + // * Must be 1 to 255 alphanumeric characters or hyphens. // - // * - // First character must be a letter. + // * First + // character must be a letter. // - // * Cannot end with a hyphen or contain two + // * Cannot end with a hyphen or contain two // consecutive hyphens. ClusterParameterGroupName *string diff --git a/service/redshift/types/enums.go b/service/redshift/types/enums.go index deb57c190c1..be782f764bb 100644 --- a/service/redshift/types/enums.go +++ b/service/redshift/types/enums.go @@ -6,9 +6,9 @@ type ActionType string // Enum values for ActionType const ( - ActionTypeRestore_cluster ActionType = "restore-cluster" - ActionTypeRecommend_node_config ActionType = "recommend-node-config" - ActionTypeResize_cluster ActionType = "resize-cluster" + ActionTypeRestoreCluster ActionType = "restore-cluster" + ActionTypeRecommendNodeConfig ActionType = "recommend-node-config" + ActionTypeResizeCluster ActionType = "resize-cluster" ) // Values returns all known values for ActionType. Note that this can be expanded @@ -26,8 +26,8 @@ type Mode string // Enum values for Mode const ( - ModeStandard Mode = "standard" - ModeHigh_performance Mode = "high-performance" + ModeStandard Mode = "standard" + ModeHighPerformance Mode = "high-performance" ) // Values returns all known values for Mode. Note that this can be expanded in the @@ -44,10 +44,10 @@ type NodeConfigurationOptionsFilterName string // Enum values for NodeConfigurationOptionsFilterName const ( - NodeConfigurationOptionsFilterNameNode_type NodeConfigurationOptionsFilterName = "NodeType" - NodeConfigurationOptionsFilterNameNum_nodes NodeConfigurationOptionsFilterName = "NumberOfNodes" - NodeConfigurationOptionsFilterNameEstimated_disk_utilization_percent NodeConfigurationOptionsFilterName = "EstimatedDiskUtilizationPercent" - NodeConfigurationOptionsFilterNameMode NodeConfigurationOptionsFilterName = "Mode" + NodeConfigurationOptionsFilterNameNodeType NodeConfigurationOptionsFilterName = "NodeType" + NodeConfigurationOptionsFilterNameNumNodes NodeConfigurationOptionsFilterName = "NumberOfNodes" + NodeConfigurationOptionsFilterNameEstimatedDiskUtilizationPercent NodeConfigurationOptionsFilterName = "EstimatedDiskUtilizationPercent" + NodeConfigurationOptionsFilterNameMode NodeConfigurationOptionsFilterName = "Mode" ) // Values returns all known values for NodeConfigurationOptionsFilterName. Note @@ -131,8 +131,8 @@ type ScheduledActionFilterName string // Enum values for ScheduledActionFilterName const ( - ScheduledActionFilterNameCluster_identifier ScheduledActionFilterName = "cluster-identifier" - ScheduledActionFilterNameIam_role ScheduledActionFilterName = "iam-role" + ScheduledActionFilterNameClusterIdentifier ScheduledActionFilterName = "cluster-identifier" + ScheduledActionFilterNameIamRole ScheduledActionFilterName = "iam-role" ) // Values returns all known values for ScheduledActionFilterName. Note that this @@ -167,9 +167,9 @@ type ScheduledActionTypeValues string // Enum values for ScheduledActionTypeValues const ( - ScheduledActionTypeValuesResize_cluster ScheduledActionTypeValues = "ResizeCluster" - ScheduledActionTypeValuesPause_cluster ScheduledActionTypeValues = "PauseCluster" - ScheduledActionTypeValuesResume_cluster ScheduledActionTypeValues = "ResumeCluster" + ScheduledActionTypeValuesResizeCluster ScheduledActionTypeValues = "ResizeCluster" + ScheduledActionTypeValuesPauseCluster ScheduledActionTypeValues = "PauseCluster" + ScheduledActionTypeValuesResumeCluster ScheduledActionTypeValues = "ResumeCluster" ) // Values returns all known values for ScheduledActionTypeValues. Note that this @@ -207,9 +207,9 @@ type SnapshotAttributeToSortBy string // Enum values for SnapshotAttributeToSortBy const ( - SnapshotAttributeToSortBySource_type SnapshotAttributeToSortBy = "SOURCE_TYPE" - SnapshotAttributeToSortByTotal_size SnapshotAttributeToSortBy = "TOTAL_SIZE" - SnapshotAttributeToSortByCreate_time SnapshotAttributeToSortBy = "CREATE_TIME" + SnapshotAttributeToSortBySourceType SnapshotAttributeToSortBy = "SOURCE_TYPE" + SnapshotAttributeToSortByTotalSize SnapshotAttributeToSortBy = "TOTAL_SIZE" + SnapshotAttributeToSortByCreateTime SnapshotAttributeToSortBy = "CREATE_TIME" ) // Values returns all known values for SnapshotAttributeToSortBy. Note that this @@ -269,11 +269,11 @@ type TableRestoreStatusType string // Enum values for TableRestoreStatusType const ( - TableRestoreStatusTypePending TableRestoreStatusType = "PENDING" - TableRestoreStatusTypeIn_progress TableRestoreStatusType = "IN_PROGRESS" - TableRestoreStatusTypeSucceeded TableRestoreStatusType = "SUCCEEDED" - TableRestoreStatusTypeFailed TableRestoreStatusType = "FAILED" - TableRestoreStatusTypeCanceled TableRestoreStatusType = "CANCELED" + TableRestoreStatusTypePending TableRestoreStatusType = "PENDING" + TableRestoreStatusTypeInProgress TableRestoreStatusType = "IN_PROGRESS" + TableRestoreStatusTypeSucceeded TableRestoreStatusType = "SUCCEEDED" + TableRestoreStatusTypeFailed TableRestoreStatusType = "FAILED" + TableRestoreStatusTypeCanceled TableRestoreStatusType = "CANCELED" ) // Values returns all known values for TableRestoreStatusType. Note that this can @@ -293,9 +293,9 @@ type UsageLimitBreachAction string // Enum values for UsageLimitBreachAction const ( - UsageLimitBreachActionLog UsageLimitBreachAction = "log" - UsageLimitBreachActionEmit_metric UsageLimitBreachAction = "emit-metric" - UsageLimitBreachActionDisable UsageLimitBreachAction = "disable" + UsageLimitBreachActionLog UsageLimitBreachAction = "log" + UsageLimitBreachActionEmitMetric UsageLimitBreachAction = "emit-metric" + UsageLimitBreachActionDisable UsageLimitBreachAction = "disable" ) // Values returns all known values for UsageLimitBreachAction. Note that this can @@ -313,8 +313,8 @@ type UsageLimitFeatureType string // Enum values for UsageLimitFeatureType const ( - UsageLimitFeatureTypeSpectrum UsageLimitFeatureType = "spectrum" - UsageLimitFeatureTypeConcurrency_scaling UsageLimitFeatureType = "concurrency-scaling" + UsageLimitFeatureTypeSpectrum UsageLimitFeatureType = "spectrum" + UsageLimitFeatureTypeConcurrencyScaling UsageLimitFeatureType = "concurrency-scaling" ) // Values returns all known values for UsageLimitFeatureType. Note that this can be @@ -331,8 +331,8 @@ type UsageLimitLimitType string // Enum values for UsageLimitLimitType const ( - UsageLimitLimitTypeTime UsageLimitLimitType = "time" - UsageLimitLimitTypeData_scanned UsageLimitLimitType = "data-scanned" + UsageLimitLimitTypeTime UsageLimitLimitType = "time" + UsageLimitLimitTypeDataScanned UsageLimitLimitType = "data-scanned" ) // Values returns all known values for UsageLimitLimitType. Note that this can be diff --git a/service/redshift/types/types.go b/service/redshift/types/types.go index 10b8cc4fe28..88647edc8e0 100644 --- a/service/redshift/types/types.go +++ b/service/redshift/types/types.go @@ -60,20 +60,19 @@ type Cluster struct { // The availability status of the cluster for queries. Possible values are the // following: // - // * Available - The cluster is available for queries. + // * Available - The cluster is available for queries. // - // * - // Unavailable - The cluster is not available for queries. + // * Unavailable - + // The cluster is not available for queries. // - // * Maintenance - The - // cluster is intermittently available for queries due to maintenance activities. + // * Maintenance - The cluster is + // intermittently available for queries due to maintenance activities. // + // * Modifying + // - The cluster is intermittently available for queries due to changes that modify + // the cluster. // - // * Modifying - The cluster is intermittently available for queries due to changes - // that modify the cluster. - // - // * Failed - The cluster failed and is not available - // for queries. + // * Failed - The cluster failed and is not available for queries. ClusterAvailabilityStatus *string // The date and time that the cluster was created. @@ -109,51 +108,51 @@ type Cluster struct { // The current state of the cluster. Possible values are the following: // - // * + // * // available // - // * available, prep-for-resize - // - // * available, resize-cleanup + // * available, prep-for-resize // + // * available, resize-cleanup // - // * cancelling-resize + // * + // cancelling-resize // - // * creating + // * creating // - // * deleting + // * deleting // - // * final-snapshot + // * final-snapshot // - // * + // * // hardware-failure // - // * incompatible-hsm + // * incompatible-hsm // - // * incompatible-network + // * incompatible-network // - // * + // * // incompatible-parameters // - // * incompatible-restore - // - // * modifying + // * incompatible-restore // - // * - // paused + // * modifying // - // * rebooting + // * paused // - // * renaming + // * + // rebooting // - // * resizing + // * renaming // - // * rotating-keys + // * resizing // + // * rotating-keys // // * storage-full // - // * updating-hsm + // * + // updating-hsm ClusterStatus *string // The name of the subnet group that is associated with the cluster. This parameter @@ -203,10 +202,10 @@ type Cluster struct { // The status of next expected snapshot for clusters having a valid snapshot // schedule and backups enabled. Possible values are the following: // - // * OnTrack - // - The next snapshot is expected to be taken on time. + // * OnTrack - + // The next snapshot is expected to be taken on time. // - // * Pending - The next + // * Pending - The next // snapshot is pending to be taken. ExpectedNextSnapshotScheduleTimeStatus *string @@ -265,10 +264,10 @@ type Cluster struct { // Returns the following: // - // * AllowCancelResize: a boolean value indicating if - // the resize operation can be cancelled. + // * AllowCancelResize: a boolean value indicating if the + // resize operation can be cancelled. // - // * ResizeType: Returns ClassicResize + // * ResizeType: Returns ClassicResize ResizeInfo *ResizeInfo // A value that describes the status of a cluster restore action. This parameter @@ -327,14 +326,14 @@ type ClusterIamRole struct { // A value that describes the status of the IAM role's association with an Amazon // Redshift cluster. The following are possible statuses and descriptions. // - // * + // * // in-sync: The role is available for use by the cluster. // - // * adding: The role - // is in the process of being associated with the cluster. + // * adding: The role is in + // the process of being associated with the cluster. // - // * removing: The - // role is in the process of being disassociated with the cluster. + // * removing: The role is in + // the process of being disassociated with the cluster. ApplyStatus *string // The Amazon Resource Name (ARN) of the IAM role, for example, @@ -398,28 +397,28 @@ type ClusterParameterStatus struct { // the database, waiting for a cluster reboot, or encountered an error when being // applied. The following are possible statuses and descriptions. // - // * in-sync: - // The parameter value is in sync with the database. + // * in-sync: The + // parameter value is in sync with the database. // - // * pending-reboot: The - // parameter value will be applied after the cluster reboots. + // * pending-reboot: The parameter + // value will be applied after the cluster reboots. // - // * applying: The - // parameter value is being applied to the database. + // * applying: The parameter + // value is being applied to the database. // - // * invalid-parameter: - // Cannot apply the parameter value because it has an invalid value or syntax. + // * invalid-parameter: Cannot apply the + // parameter value because it has an invalid value or syntax. // + // * apply-deferred: + // The parameter contains static property changes. The changes are deferred until + // the cluster reboots. // - // * apply-deferred: The parameter contains static property changes. The changes - // are deferred until the cluster reboots. + // * apply-error: Cannot connect to the cluster. The + // parameter change will be applied after the cluster reboots. // - // * apply-error: Cannot connect to - // the cluster. The parameter change will be applied after the cluster reboots. - // - // - // * unknown-error: Cannot apply the parameter change right now. The change will be - // applied after the cluster reboots. + // * unknown-error: + // Cannot apply the parameter change right now. The change will be applied after + // the cluster reboots. ParameterApplyStatus *string // The name of the parameter. @@ -716,11 +715,11 @@ type EventSubscription struct { // The status of the Amazon Redshift event notification subscription. // Constraints: // - // * Can be one of the following: active | no-permission | + // * Can be one of the following: active | no-permission | // topic-not-exist // - // * The status "no-permission" indicates that Amazon Redshift - // no longer has permission to post to the Amazon SNS topic. The status + // * The status "no-permission" indicates that Amazon Redshift no + // longer has permission to post to the Amazon SNS topic. The status // "topic-not-exist" indicates that the topic was deleted after the subscription // was created. Status *string @@ -1011,21 +1010,20 @@ type ReservedNode struct { // The state of the reserved compute node. Possible Values: // - // * - // pending-payment-This reserved node has recently been purchased, and the sale has - // been approved, but payment has not yet been confirmed. + // * pending-payment-This + // reserved node has recently been purchased, and the sale has been approved, but + // payment has not yet been confirmed. // - // * active-This - // reserved node is owned by the caller and is available for use. + // * active-This reserved node is owned by the + // caller and is available for use. // - // * - // payment-failed-Payment failed for the purchase attempt. + // * payment-failed-Payment failed for the + // purchase attempt. // - // * retired-The - // reserved node is no longer available. + // * retired-The reserved node is no longer available. // - // * exchanging-The owner is exchanging - // the reserved node for another reserved node. + // * + // exchanging-The owner is exchanging the reserved node for another reserved node. State *string // The hourly rate Amazon Redshift charges you for this reserved node. @@ -1351,13 +1349,13 @@ type Snapshot struct { // The snapshot status. The value of the status depends on the API operation // used: // - // * CreateClusterSnapshot and CopyClusterSnapshot returns status as + // * CreateClusterSnapshot and CopyClusterSnapshot returns status as // "creating". // - // * DescribeClusterSnapshots returns status as "creating", + // * DescribeClusterSnapshots returns status as "creating", // "available", "final snapshot", or "failed". // - // * DeleteClusterSnapshot returns + // * DeleteClusterSnapshot returns // status as "deleted". Status *string @@ -1544,29 +1542,29 @@ type TaggedResource struct { // The type of resource with which the tag is associated. Valid resource types // are: // - // * Cluster - // - // * CIDR/IP + // * Cluster // - // * EC2 security group + // * CIDR/IP // - // * Snapshot + // * EC2 security group // + // * Snapshot // - // * Cluster security group + // * Cluster security + // group // - // * Subnet group + // * Subnet group // - // * HSM connection + // * HSM connection // - // * HSM - // certificate + // * HSM certificate // - // * Parameter group + // * Parameter + // group // - // For more information about Amazon Redshift - // resource types and constructing ARNs, go to Constructing an Amazon Redshift - // Amazon Resource Name (ARN) + // For more information about Amazon Redshift resource types and + // constructing ARNs, go to Constructing an Amazon Redshift Amazon Resource Name + // (ARN) // (https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-overview.html#redshift-iam-access-control-specify-actions) // in the Amazon Redshift Cluster Management Guide. ResourceType *string @@ -1598,13 +1596,13 @@ type UsageLimit struct { // The action that Amazon Redshift takes when the limit is reached. Possible values // are: // - // * log - To log an event in a system table. The default is log. + // * log - To log an event in a system table. The default is log. // - // * + // * // emit-metric - To emit CloudWatch metrics. // - // * disable - To disable the - // feature until the next usage period begins. + // * disable - To disable the feature + // until the next usage period begins. BreachAction UsageLimitBreachAction // The identifier of the cluster with a usage limit. diff --git a/service/redshiftdata/api_op_DescribeStatement.go b/service/redshiftdata/api_op_DescribeStatement.go index 998f5d6e72d..583580824dc 100644 --- a/service/redshiftdata/api_op_DescribeStatement.go +++ b/service/redshiftdata/api_op_DescribeStatement.go @@ -94,24 +94,23 @@ type DescribeStatementOutput struct { // The status of the SQL statement being described. Status values are defined as // follows: // - // * ABORTED - The query run was stopped by the user. + // * ABORTED - The query run was stopped by the user. // - // * ALL - A - // status value that includes all query statuses. This value can be used to filter + // * ALL - A status + // value that includes all query statuses. This value can be used to filter // results. // - // * FAILED - The query run failed. + // * FAILED - The query run failed. // - // * FINISHED - The query has - // finished running. + // * FINISHED - The query has finished + // running. // - // * PICKED - The query has been chosen to be run. + // * PICKED - The query has been chosen to be run. // - // * - // STARTED - The query run has started. + // * STARTED - The query + // run has started. // - // * SUBMITTED - The query was submitted, - // but not yet processed. + // * SUBMITTED - The query was submitted, but not yet processed. Status types.StatusString // The date and time (UTC) that the metadata for the SQL statement was last diff --git a/service/redshiftdata/api_op_DescribeTable.go b/service/redshiftdata/api_op_DescribeTable.go index 0f4091b7556..86550382d49 100644 --- a/service/redshiftdata/api_op_DescribeTable.go +++ b/service/redshiftdata/api_op_DescribeTable.go @@ -16,11 +16,11 @@ import ( // column list. Depending on the authorization method, use one of the following // combinations of request parameters: // -// * AWS Secrets Manager - specify the -// Amazon Resource Name (ARN) of the secret and the cluster identifier that matches -// the cluster in the secret. +// * AWS Secrets Manager - specify the Amazon +// Resource Name (ARN) of the secret and the cluster identifier that matches the +// cluster in the secret. // -// * Temporary credentials - specify the cluster +// * Temporary credentials - specify the cluster // identifier, the database name, and the database user name. Permission to call // the redshift:GetClusterCredentials operation is required to use this method. func (c *Client) DescribeTable(ctx context.Context, params *DescribeTableInput, optFns ...func(*Options)) (*DescribeTableOutput, error) { diff --git a/service/redshiftdata/api_op_ExecuteStatement.go b/service/redshiftdata/api_op_ExecuteStatement.go index 7c7f6c66426..98db8022253 100644 --- a/service/redshiftdata/api_op_ExecuteStatement.go +++ b/service/redshiftdata/api_op_ExecuteStatement.go @@ -16,12 +16,12 @@ import ( // Depending on the authorization method, use one of the following combinations of // request parameters: // -// * AWS Secrets Manager - specify the Amazon Resource -// Name (ARN) of the secret and the cluster identifier that matches the cluster in -// the secret. +// * AWS Secrets Manager - specify the Amazon Resource Name +// (ARN) of the secret and the cluster identifier that matches the cluster in the +// secret. // -// * Temporary credentials - specify the cluster identifier, the -// database name, and the database user name. Permission to call the +// * Temporary credentials - specify the cluster identifier, the database +// name, and the database user name. Permission to call the // redshift:GetClusterCredentials operation is required to use this method. func (c *Client) ExecuteStatement(ctx context.Context, params *ExecuteStatementInput, optFns ...func(*Options)) (*ExecuteStatementOutput, error) { if params == nil { diff --git a/service/redshiftdata/api_op_ListDatabases.go b/service/redshiftdata/api_op_ListDatabases.go index dbaa81db27a..595709870df 100644 --- a/service/redshiftdata/api_op_ListDatabases.go +++ b/service/redshiftdata/api_op_ListDatabases.go @@ -14,11 +14,11 @@ import ( // database list. Depending on the authorization method, use one of the following // combinations of request parameters: // -// * AWS Secrets Manager - specify the -// Amazon Resource Name (ARN) of the secret and the cluster identifier that matches -// the cluster in the secret. +// * AWS Secrets Manager - specify the Amazon +// Resource Name (ARN) of the secret and the cluster identifier that matches the +// cluster in the secret. // -// * Temporary credentials - specify the cluster +// * Temporary credentials - specify the cluster // identifier, the database name, and the database user name. Permission to call // the redshift:GetClusterCredentials operation is required to use this method. func (c *Client) ListDatabases(ctx context.Context, params *ListDatabasesInput, optFns ...func(*Options)) (*ListDatabasesOutput, error) { diff --git a/service/redshiftdata/api_op_ListSchemas.go b/service/redshiftdata/api_op_ListSchemas.go index d1581c472a7..a01fdd59bb6 100644 --- a/service/redshiftdata/api_op_ListSchemas.go +++ b/service/redshiftdata/api_op_ListSchemas.go @@ -14,11 +14,11 @@ import ( // list. Depending on the authorization method, use one of the following // combinations of request parameters: // -// * AWS Secrets Manager - specify the -// Amazon Resource Name (ARN) of the secret and the cluster identifier that matches -// the cluster in the secret. +// * AWS Secrets Manager - specify the Amazon +// Resource Name (ARN) of the secret and the cluster identifier that matches the +// cluster in the secret. // -// * Temporary credentials - specify the cluster +// * Temporary credentials - specify the cluster // identifier, the database name, and the database user name. Permission to call // the redshift:GetClusterCredentials operation is required to use this method. func (c *Client) ListSchemas(ctx context.Context, params *ListSchemasInput, optFns ...func(*Options)) (*ListSchemasOutput, error) { diff --git a/service/redshiftdata/api_op_ListStatements.go b/service/redshiftdata/api_op_ListStatements.go index c24e50feae0..dcef3ca3075 100644 --- a/service/redshiftdata/api_op_ListStatements.go +++ b/service/redshiftdata/api_op_ListStatements.go @@ -50,27 +50,26 @@ type ListStatementsInput struct { // you provide. StatementName *string - // The status of the SQL statement to list. Status values are defined as follows: - // + // The status of the SQL statement to list. Status values are defined as + // follows: // // * ABORTED - The query run was stopped by the user. // - // * ALL - A status value - // that includes all query statuses. This value can be used to filter results. - // + // * ALL - A status + // value that includes all query statuses. This value can be used to filter + // results. // // * FAILED - The query run failed. // - // * FINISHED - The query has finished + // * FINISHED - The query has finished // running. // - // * PICKED - The query has been chosen to be run. + // * PICKED - The query has been chosen to be run. // - // * STARTED - - // The query run has started. + // * STARTED - The query + // run has started. // - // * SUBMITTED - The query was submitted, but not - // yet processed. + // * SUBMITTED - The query was submitted, but not yet processed. Status types.StatusString } diff --git a/service/redshiftdata/api_op_ListTables.go b/service/redshiftdata/api_op_ListTables.go index 7ad40ee6f13..558ab98884d 100644 --- a/service/redshiftdata/api_op_ListTables.go +++ b/service/redshiftdata/api_op_ListTables.go @@ -16,14 +16,14 @@ import ( // page through the table list. Depending on the authorization method, use one of // the following combinations of request parameters: // -// * AWS Secrets Manager - +// * AWS Secrets Manager - // specify the Amazon Resource Name (ARN) of the secret and the cluster identifier // that matches the cluster in the secret. // -// * Temporary credentials - specify -// the cluster identifier, the database name, and the database user name. -// Permission to call the redshift:GetClusterCredentials operation is required to -// use this method. +// * Temporary credentials - specify the +// cluster identifier, the database name, and the database user name. Permission to +// call the redshift:GetClusterCredentials operation is required to use this +// method. func (c *Client) ListTables(ctx context.Context, params *ListTablesInput, optFns ...func(*Options)) (*ListTablesOutput, error) { if params == nil { params = &ListTablesInput{} diff --git a/service/rekognition/api_op_DetectProtectiveEquipment.go b/service/rekognition/api_op_DetectProtectiveEquipment.go index 0d08403db9f..830e42b36a2 100644 --- a/service/rekognition/api_op_DetectProtectiveEquipment.go +++ b/service/rekognition/api_op_DetectProtectiveEquipment.go @@ -14,38 +14,38 @@ import ( // Detects Personal Protective Equipment (PPE) worn by people detected in an image. // Amazon Rekognition can detect the following types of PPE. // -// * Face cover +// * Face cover // +// * Hand +// cover // -// * Hand cover +// * Head cover // -// * Head cover +// You pass the input image as base64-encoded image bytes or +// as a reference to an image in an Amazon S3 bucket. The image must be either a +// PNG or JPG formatted file. DetectProtectiveEquipment detects PPE worn by up to +// 15 persons detected in an image. For each person detected in the image the API +// returns an array of body parts (face, head, left-hand, right-hand). For each +// body part, an array of detected items of PPE is returned, including an indicator +// of whether or not the PPE covers the body part. The API returns the confidence +// it has in each detection (person, PPE, body part and body part coverage). It +// also returns a bounding box (BoundingBox) for each detected person and each +// detected item of PPE. You can optionally request a summary of detected PPE items +// with the SummarizationAttributes input parameter. The summary provides the +// following information. // -// You pass the input image as base64-encoded image -// bytes or as a reference to an image in an Amazon S3 bucket. The image must be -// either a PNG or JPG formatted file. DetectProtectiveEquipment detects PPE worn -// by up to 15 persons detected in an image. For each person detected in the image -// the API returns an array of body parts (face, head, left-hand, right-hand). For -// each body part, an array of detected items of PPE is returned, including an -// indicator of whether or not the PPE covers the body part. The API returns the -// confidence it has in each detection (person, PPE, body part and body part -// coverage). It also returns a bounding box (BoundingBox) for each detected person -// and each detected item of PPE. You can optionally request a summary of detected -// PPE items with the SummarizationAttributes input parameter. The summary provides -// the following information. +// * The persons detected as wearing all of the types of +// PPE that you specify. // -// * The persons detected as wearing all of the -// types of PPE that you specify. +// * The persons detected as not wearing all of the types +// PPE that you specify. // -// * The persons detected as not wearing all of -// the types PPE that you specify. +// * The persons detected where PPE adornment could not be +// determined. // -// * The persons detected where PPE adornment -// could not be determined. -// -// This is a stateless API operation. That is, the -// operation does not persist any data. This operation requires permissions to -// perform the rekognition:DetectProtectiveEquipment action. +// This is a stateless API operation. That is, the operation does not +// persist any data. This operation requires permissions to perform the +// rekognition:DetectProtectiveEquipment action. func (c *Client) DetectProtectiveEquipment(ctx context.Context, params *DetectProtectiveEquipmentInput, optFns ...func(*Options)) (*DetectProtectiveEquipmentOutput, error) { if params == nil { params = &DetectProtectiveEquipmentInput{} diff --git a/service/rekognition/api_op_IndexFaces.go b/service/rekognition/api_op_IndexFaces.go index 871d2dc28f3..9bc3ed29c83 100644 --- a/service/rekognition/api_op_IndexFaces.go +++ b/service/rekognition/api_op_IndexFaces.go @@ -48,49 +48,48 @@ import ( // UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such // as: // -// * The number of faces detected exceeds the value of the MaxFaces -// request parameter. +// * The number of faces detected exceeds the value of the MaxFaces request +// parameter. // -// * The face is too small compared to the image -// dimensions. +// * The face is too small compared to the image dimensions. // -// * The face is too blurry. +// * The +// face is too blurry. // -// * The image is too dark. +// * The image is too dark. // -// * -// The face has an extreme pose. +// * The face has an extreme +// pose. // -// * The face doesn’t have enough detail to be -// suitable for face search. +// * The face doesn’t have enough detail to be suitable for face search. // -// In response, the IndexFaces operation returns an -// array of metadata for all detected faces, FaceRecords. This includes: +// In +// response, the IndexFaces operation returns an array of metadata for all detected +// faces, FaceRecords. This includes: // -// * The -// bounding box, BoundingBox, of the detected face. +// * The bounding box, BoundingBox, of the +// detected face. // -// * A confidence value, -// Confidence, which indicates the confidence that the bounding box contains a -// face. +// * A confidence value, Confidence, which indicates the confidence +// that the bounding box contains a face. // -// * A face ID, FaceId, assigned by the service for each face that's -// detected and stored. +// * A face ID, FaceId, assigned by the +// service for each face that's detected and stored. // -// * An image ID, ImageId, assigned by the service for -// the input image. +// * An image ID, ImageId, +// assigned by the service for the input image. // -// If you request all facial attributes (by using the -// detectionAttributes parameter), Amazon Rekognition returns detailed facial -// attributes, such as facial landmarks (for example, location of eye and mouth) -// and other facial attributes. If you provide the same image, specify the same -// collection, and use the same external ID in the IndexFaces operation, Amazon -// Rekognition doesn't save duplicate face metadata. The input image is passed -// either as base64-encoded image bytes, or as a reference to an image in an Amazon -// S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing -// image bytes isn't supported. The image must be formatted as a PNG or JPEG file. -// This operation requires permissions to perform the rekognition:IndexFaces -// action. +// If you request all facial +// attributes (by using the detectionAttributes parameter), Amazon Rekognition +// returns detailed facial attributes, such as facial landmarks (for example, +// location of eye and mouth) and other facial attributes. If you provide the same +// image, specify the same collection, and use the same external ID in the +// IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata. +// The input image is passed either as base64-encoded image bytes, or as a +// reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call +// Amazon Rekognition operations, passing image bytes isn't supported. The image +// must be formatted as a PNG or JPEG file. This operation requires permissions to +// perform the rekognition:IndexFaces action. func (c *Client) IndexFaces(ctx context.Context, params *IndexFacesInput, optFns ...func(*Options)) (*IndexFacesOutput, error) { if params == nil { params = &IndexFacesInput{} @@ -177,16 +176,16 @@ type IndexFacesOutput struct { // orientation information is returned. If your collection is associated with a // face detection model that's version 3.0 or earlier, the following applies: // - // - // * If the input image is in .jpeg format, it might contain exchangeable image - // file format (Exif) metadata that includes the image's orientation. Amazon - // Rekognition uses this orientation information to perform image correction - the - // bounding box coordinates are translated to represent object locations after the - // orientation information in the Exif metadata is used to correct the image - // orientation. Images in .png format don't contain Exif metadata. The value of + // * If + // the input image is in .jpeg format, it might contain exchangeable image file + // format (Exif) metadata that includes the image's orientation. Amazon Rekognition + // uses this orientation information to perform image correction - the bounding box + // coordinates are translated to represent object locations after the orientation + // information in the Exif metadata is used to correct the image orientation. + // Images in .png format don't contain Exif metadata. The value of // OrientationCorrection is null. // - // * If the image doesn't contain orientation + // * If the image doesn't contain orientation // information in its Exif metadata, Amazon Rekognition returns an estimated // orientation (ROTATE_0, ROTATE_90, ROTATE_180, ROTATE_270). Amazon Rekognition // doesn’t perform image correction for images. The bounding box coordinates aren't diff --git a/service/rekognition/types/enums.go b/service/rekognition/types/enums.go index f3f3a43eb3d..f4f272433c9 100644 --- a/service/rekognition/types/enums.go +++ b/service/rekognition/types/enums.go @@ -24,10 +24,10 @@ type BodyPart string // Enum values for BodyPart const ( - BodyPartFace BodyPart = "FACE" - BodyPartHead BodyPart = "HEAD" - BodyPartLeft_hand BodyPart = "LEFT_HAND" - BodyPartRight_hand BodyPart = "RIGHT_HAND" + BodyPartFace BodyPart = "FACE" + BodyPartHead BodyPart = "HEAD" + BodyPartLeftHand BodyPart = "LEFT_HAND" + BodyPartRightHand BodyPart = "RIGHT_HAND" ) // Values returns all known values for BodyPart. Note that this can be expanded in @@ -64,8 +64,8 @@ type ContentClassifier string // Enum values for ContentClassifier const ( - ContentClassifierFree_of_personally_identifiable_information ContentClassifier = "FreeOfPersonallyIdentifiableInformation" - ContentClassifierFree_of_adult_content ContentClassifier = "FreeOfAdultContent" + ContentClassifierFreeOfPersonallyIdentifiableInformation ContentClassifier = "FreeOfPersonallyIdentifiableInformation" + ContentClassifierFreeOfAdultContent ContentClassifier = "FreeOfAdultContent" ) // Values returns all known values for ContentClassifier. Note that this can be @@ -278,10 +278,10 @@ type OrientationCorrection string // Enum values for OrientationCorrection const ( - OrientationCorrectionRotate_0 OrientationCorrection = "ROTATE_0" - OrientationCorrectionRotate_90 OrientationCorrection = "ROTATE_90" - OrientationCorrectionRotate_180 OrientationCorrection = "ROTATE_180" - OrientationCorrectionRotate_270 OrientationCorrection = "ROTATE_270" + OrientationCorrectionRotate0 OrientationCorrection = "ROTATE_0" + OrientationCorrectionRotate90 OrientationCorrection = "ROTATE_90" + OrientationCorrectionRotate180 OrientationCorrection = "ROTATE_180" + OrientationCorrectionRotate270 OrientationCorrection = "ROTATE_270" ) // Values returns all known values for OrientationCorrection. Note that this can be @@ -338,15 +338,15 @@ type ProjectVersionStatus string // Enum values for ProjectVersionStatus const ( - ProjectVersionStatusTraining_in_progress ProjectVersionStatus = "TRAINING_IN_PROGRESS" - ProjectVersionStatusTraining_completed ProjectVersionStatus = "TRAINING_COMPLETED" - ProjectVersionStatusTraining_failed ProjectVersionStatus = "TRAINING_FAILED" - ProjectVersionStatusStarting ProjectVersionStatus = "STARTING" - ProjectVersionStatusRunning ProjectVersionStatus = "RUNNING" - ProjectVersionStatusFailed ProjectVersionStatus = "FAILED" - ProjectVersionStatusStopping ProjectVersionStatus = "STOPPING" - ProjectVersionStatusStopped ProjectVersionStatus = "STOPPED" - ProjectVersionStatusDeleting ProjectVersionStatus = "DELETING" + ProjectVersionStatusTrainingInProgress ProjectVersionStatus = "TRAINING_IN_PROGRESS" + ProjectVersionStatusTrainingCompleted ProjectVersionStatus = "TRAINING_COMPLETED" + ProjectVersionStatusTrainingFailed ProjectVersionStatus = "TRAINING_FAILED" + ProjectVersionStatusStarting ProjectVersionStatus = "STARTING" + ProjectVersionStatusRunning ProjectVersionStatus = "RUNNING" + ProjectVersionStatusFailed ProjectVersionStatus = "FAILED" + ProjectVersionStatusStopping ProjectVersionStatus = "STOPPING" + ProjectVersionStatusStopped ProjectVersionStatus = "STOPPED" + ProjectVersionStatusDeleting ProjectVersionStatus = "DELETING" ) // Values returns all known values for ProjectVersionStatus. Note that this can be @@ -370,9 +370,9 @@ type ProtectiveEquipmentType string // Enum values for ProtectiveEquipmentType const ( - ProtectiveEquipmentTypeFace_cover ProtectiveEquipmentType = "FACE_COVER" - ProtectiveEquipmentTypeHand_cover ProtectiveEquipmentType = "HAND_COVER" - ProtectiveEquipmentTypeHead_cover ProtectiveEquipmentType = "HEAD_COVER" + ProtectiveEquipmentTypeFaceCover ProtectiveEquipmentType = "FACE_COVER" + ProtectiveEquipmentTypeHandCover ProtectiveEquipmentType = "HAND_COVER" + ProtectiveEquipmentTypeHeadCover ProtectiveEquipmentType = "HEAD_COVER" ) // Values returns all known values for ProtectiveEquipmentType. Note that this can @@ -414,13 +414,13 @@ type Reason string // Enum values for Reason const ( - ReasonExceeds_max_faces Reason = "EXCEEDS_MAX_FACES" - ReasonExtreme_pose Reason = "EXTREME_POSE" - ReasonLow_brightness Reason = "LOW_BRIGHTNESS" - ReasonLow_sharpness Reason = "LOW_SHARPNESS" - ReasonLow_confidence Reason = "LOW_CONFIDENCE" - ReasonSmall_bounding_box Reason = "SMALL_BOUNDING_BOX" - ReasonLow_face_quality Reason = "LOW_FACE_QUALITY" + ReasonExceedsMaxFaces Reason = "EXCEEDS_MAX_FACES" + ReasonExtremePose Reason = "EXTREME_POSE" + ReasonLowBrightness Reason = "LOW_BRIGHTNESS" + ReasonLowSharpness Reason = "LOW_SHARPNESS" + ReasonLowConfidence Reason = "LOW_CONFIDENCE" + ReasonSmallBoundingBox Reason = "SMALL_BOUNDING_BOX" + ReasonLowFaceQuality Reason = "LOW_FACE_QUALITY" ) // Values returns all known values for Reason. Note that this can be expanded in @@ -442,8 +442,8 @@ type SegmentType string // Enum values for SegmentType const ( - SegmentTypeTechnical_cue SegmentType = "TECHNICAL_CUE" - SegmentTypeShot SegmentType = "SHOT" + SegmentTypeTechnicalCue SegmentType = "TECHNICAL_CUE" + SegmentTypeShot SegmentType = "SHOT" ) // Values returns all known values for SegmentType. Note that this can be expanded @@ -484,9 +484,9 @@ type TechnicalCueType string // Enum values for TechnicalCueType const ( - TechnicalCueTypeColor_bars TechnicalCueType = "ColorBars" - TechnicalCueTypeEnd_credits TechnicalCueType = "EndCredits" - TechnicalCueTypeBlack_frames TechnicalCueType = "BlackFrames" + TechnicalCueTypeColorBars TechnicalCueType = "ColorBars" + TechnicalCueTypeEndCredits TechnicalCueType = "EndCredits" + TechnicalCueTypeBlackFrames TechnicalCueType = "BlackFrames" ) // Values returns all known values for TechnicalCueType. Note that this can be @@ -522,9 +522,9 @@ type VideoJobStatus string // Enum values for VideoJobStatus const ( - VideoJobStatusIn_progress VideoJobStatus = "IN_PROGRESS" - VideoJobStatusSucceeded VideoJobStatus = "SUCCEEDED" - VideoJobStatusFailed VideoJobStatus = "FAILED" + VideoJobStatusInProgress VideoJobStatus = "IN_PROGRESS" + VideoJobStatusSucceeded VideoJobStatus = "SUCCEEDED" + VideoJobStatusFailed VideoJobStatus = "FAILED" ) // Values returns all known values for VideoJobStatus. Note that this can be diff --git a/service/rekognition/types/types.go b/service/rekognition/types/types.go index 2030022376f..076b7ff2c26 100644 --- a/service/rekognition/types/types.go +++ b/service/rekognition/types/types.go @@ -366,15 +366,15 @@ type Face struct { // only the default attributes. The corresponding Start operations don't have a // FaceAttributes input parameter. // -// * GetCelebrityRecognition +// * GetCelebrityRecognition // -// * +// * // GetPersonTracking // -// * GetFaceSearch +// * GetFaceSearch // -// The Amazon Rekognition Image DetectFaces -// and IndexFaces operations can return all facial attributes. To specify which +// The Amazon Rekognition Image DetectFaces and +// IndexFaces operations can return all facial attributes. To specify which // attributes to return, use the Attributes input parameter for DetectFaces. For // IndexFaces, use the DetectAttributes input parameter. type FaceDetail struct { @@ -1348,24 +1348,24 @@ type UnindexedFace struct { // An array of reasons that specify why a face wasn't indexed. // - // * EXTREME_POSE - // - The face is at a pose that can't be detected. For example, the head is turned + // * EXTREME_POSE - + // The face is at a pose that can't be detected. For example, the head is turned // too far away from the camera. // - // * EXCEEDS_MAX_FACES - The number of faces + // * EXCEEDS_MAX_FACES - The number of faces // detected is already higher than that specified by the MaxFaces input parameter // for IndexFaces. // - // * LOW_BRIGHTNESS - The image is too dark. + // * LOW_BRIGHTNESS - The image is too dark. // - // * - // LOW_SHARPNESS - The image is too blurry. + // * LOW_SHARPNESS - + // The image is too blurry. // - // * LOW_CONFIDENCE - The face was - // detected with a low confidence. + // * LOW_CONFIDENCE - The face was detected with a low + // confidence. // - // * SMALL_BOUNDING_BOX - The bounding box - // around the face is too small. + // * SMALL_BOUNDING_BOX - The bounding box around the face is too + // small. Reasons []Reason } diff --git a/service/resourcegroups/api_op_CreateGroup.go b/service/resourcegroups/api_op_CreateGroup.go index 4fb85d02dd2..4b5493e01c7 100644 --- a/service/resourcegroups/api_op_CreateGroup.go +++ b/service/resourcegroups/api_op_CreateGroup.go @@ -66,9 +66,8 @@ type CreateGroupOutput struct { // Groups supports adding service configurations for the following resource group // types: // - // * AWS::EC2::CapacityReservationPool - Amazon EC2 capacity - // reservation pools. For more information, see Working with capacity reservation - // groups + // * AWS::EC2::CapacityReservationPool - Amazon EC2 capacity reservation + // pools. For more information, see Working with capacity reservation groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) // in the EC2 Users Guide. GroupConfiguration *types.GroupConfiguration diff --git a/service/resourcegroups/api_op_GetGroupConfiguration.go b/service/resourcegroups/api_op_GetGroupConfiguration.go index 8676b0f0873..932965be5ae 100644 --- a/service/resourcegroups/api_op_GetGroupConfiguration.go +++ b/service/resourcegroups/api_op_GetGroupConfiguration.go @@ -15,9 +15,8 @@ import ( // AWS Resource Groups supports configurations for the following resource group // types: // -// * AWS::EC2::CapacityReservationPool - Amazon EC2 capacity -// reservation pools. For more information, see Working with capacity reservation -// groups +// * AWS::EC2::CapacityReservationPool - Amazon EC2 capacity reservation +// pools. For more information, see Working with capacity reservation groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) // in the EC2 Users Guide. func (c *Client) GetGroupConfiguration(ctx context.Context, params *GetGroupConfigurationInput, optFns ...func(*Options)) (*GetGroupConfigurationOutput, error) { diff --git a/service/resourcegroups/api_op_ListGroupResources.go b/service/resourcegroups/api_op_ListGroupResources.go index 2be00617af4..7851f519b02 100644 --- a/service/resourcegroups/api_op_ListGroupResources.go +++ b/service/resourcegroups/api_op_ListGroupResources.go @@ -34,7 +34,7 @@ type ListGroupResourcesInput struct { // ListGroupResources operation. Filters the results to include only those of the // specified resource types. // - // * resource-type - Filter resources by their type. + // * resource-type - Filter resources by their type. // Specify up to five resource types in the format AWS::ServiceCode::ResourceType. // For example, AWS::EC2::Instance, or AWS::S3::Bucket. // diff --git a/service/resourcegroups/api_op_ListGroups.go b/service/resourcegroups/api_op_ListGroups.go index 47967a8c43f..839d28df042 100644 --- a/service/resourcegroups/api_op_ListGroups.go +++ b/service/resourcegroups/api_op_ListGroups.go @@ -32,16 +32,16 @@ type ListGroupsInput struct { // Filters, formatted as GroupFilter objects, that you want to apply to a // ListGroups operation. // - // * resource-type - Filter the results to include only + // * resource-type - Filter the results to include only // those of the specified resource types. Specify up to five resource types in the // format AWS::ServiceCode::ResourceType . For example, AWS::EC2::Instance, or // AWS::S3::Bucket. // - // * configuration-type - Filter the results to include only + // * configuration-type - Filter the results to include only // those groups that have the specified configuration types attached. The current // supported values are: // - // * AWS:EC2::CapacityReservationPool + // * AWS:EC2::CapacityReservationPool Filters []*types.GroupFilter // The total number of results that you want included on each page of the response. diff --git a/service/resourcegroups/doc.go b/service/resourcegroups/doc.go index a4daeb82b14..4d715af206e 100644 --- a/service/resourcegroups/doc.go +++ b/service/resourcegroups/doc.go @@ -22,18 +22,17 @@ // Groups uses a REST-compliant API that you can use to perform the following types // of operations. // -// * Create, Read, Update, and Delete (CRUD) operations on -// resource groups and resource query entities +// * Create, Read, Update, and Delete (CRUD) operations on resource +// groups and resource query entities // -// * Applying, editing, and -// removing tags from resource groups +// * Applying, editing, and removing tags from +// resource groups // -// * Resolving resource group member ARNs -// so they can be returned as search results +// * Resolving resource group member ARNs so they can be returned +// as search results // -// * Getting data about resources -// that are members of a group +// * Getting data about resources that are members of a group // -// * Searching AWS resources based on a resource -// query +// * +// Searching AWS resources based on a resource query package resourcegroups diff --git a/service/resourcegroups/types/enums.go b/service/resourcegroups/types/enums.go index 4d290138db1..d2c1e6a80b1 100644 --- a/service/resourcegroups/types/enums.go +++ b/service/resourcegroups/types/enums.go @@ -6,9 +6,9 @@ type GroupConfigurationStatus string // Enum values for GroupConfigurationStatus const ( - GroupConfigurationStatusUpdating GroupConfigurationStatus = "UPDATING" - GroupConfigurationStatusUpdate_complete GroupConfigurationStatus = "UPDATE_COMPLETE" - GroupConfigurationStatusUpdate_failed GroupConfigurationStatus = "UPDATE_FAILED" + GroupConfigurationStatusUpdating GroupConfigurationStatus = "UPDATING" + GroupConfigurationStatusUpdateComplete GroupConfigurationStatus = "UPDATE_COMPLETE" + GroupConfigurationStatusUpdateFailed GroupConfigurationStatus = "UPDATE_FAILED" ) // Values returns all known values for GroupConfigurationStatus. Note that this can @@ -44,8 +44,8 @@ type QueryErrorCode string // Enum values for QueryErrorCode const ( - QueryErrorCodeCloudformation_stack_inactive QueryErrorCode = "CLOUDFORMATION_STACK_INACTIVE" - QueryErrorCodeCloudformation_stack_not_existing QueryErrorCode = "CLOUDFORMATION_STACK_NOT_EXISTING" + QueryErrorCodeCloudformationStackInactive QueryErrorCode = "CLOUDFORMATION_STACK_INACTIVE" + QueryErrorCodeCloudformationStackNotExisting QueryErrorCode = "CLOUDFORMATION_STACK_NOT_EXISTING" ) // Values returns all known values for QueryErrorCode. Note that this can be @@ -62,8 +62,8 @@ type QueryType string // Enum values for QueryType const ( - QueryTypeTag_filters_1_0 QueryType = "TAG_FILTERS_1_0" - QueryTypeCloudformation_stack_1_0 QueryType = "CLOUDFORMATION_STACK_1_0" + QueryTypeTagFilters10 QueryType = "TAG_FILTERS_1_0" + QueryTypeCloudformationStack10 QueryType = "CLOUDFORMATION_STACK_1_0" ) // Values returns all known values for QueryType. Note that this can be expanded in diff --git a/service/resourcegroups/types/types.go b/service/resourcegroups/types/types.go index c949acd192b..fa85e5978c5 100644 --- a/service/resourcegroups/types/types.go +++ b/service/resourcegroups/types/types.go @@ -18,13 +18,13 @@ type FailedResource struct { // A resource group that contains AWS resources. You can assign resources to the // group by associating either of the following elements with the group: // -// * +// * // ResourceQuery - Use a resource query to specify a set of tag keys and values. // All resources in the same AWS Region and AWS account that have those keys with // the same values are included in the group. You can add a resource query when you // create the group. // -// * GroupConfiguration - Use a service configuration to +// * GroupConfiguration - Use a service configuration to // associate the group with an AWS service. The configuration specifies which // resource types can be included in the group. type Group struct { @@ -70,14 +70,14 @@ type GroupConfigurationItem struct { // Specifies the type of group configuration item. Each item must have a unique // value for type. You can specify the following string values: // - // * + // * // AWS::EC2::CapacityReservationPool For more information about EC2 capacity // reservation groups, see Working with capacity reservation groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) // in the EC2 Users Guide. // - // * AWS::ResourceGroups::Generic - Supports - // parameters that configure the behavior of resource groups of any type. + // * AWS::ResourceGroups::Generic - Supports parameters + // that configure the behavior of resource groups of any type. // // This member is required. Type *string @@ -92,21 +92,20 @@ type GroupConfigurationParameter struct { // The name of the group configuration parameter. You can specify the following // string values: // - // * For configuration item type - // AWS::ResourceGroups::Generic: + // * For configuration item type AWS::ResourceGroups::Generic: // - // * allowed-resource-types Specifies the - // types of resources that you can add to this group by using the GroupResources - // operation. + // * + // allowed-resource-types Specifies the types of resources that you can add to this + // group by using the GroupResources operation. // - // * For configuration item type + // * For configuration item type // AWS::EC2::CapacityReservationPool: // - // * None - This configuration item - // type doesn't support any parameters. + // * None - This configuration item type + // doesn't support any parameters. // - // For more information about EC2 - // capacity reservation groups, see Working with capacity reservation groups + // For more information about EC2 capacity + // reservation groups, see Working with capacity reservation groups // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) // in the EC2 Users Guide. // @@ -115,8 +114,8 @@ type GroupConfigurationParameter struct { // The values of for this parameter. You can specify the following string value: // - // - // * For item type allowed-resource-types: the only supported parameter value is + // * + // For item type allowed-resource-types: the only supported parameter value is // AWS::EC2::CapacityReservation. Values []*string } @@ -221,14 +220,14 @@ type ResourceIdentifier struct { // resource query JSON string that includes only resources that meet the following // criteria: // -// * The resource type must be either resource_type1 or +// * The resource type must be either resource_type1 or // resource_type2. // -// * The resource must have a tag Key1 with a value of either +// * The resource must have a tag Key1 with a value of either // ValueA or ValueB. // -// * The resource must have a tag Key2 with a value of -// either ValueC or ValueD. +// * The resource must have a tag Key2 with a value of either +// ValueC or ValueD. // // { "Type": "TAG_FILTERS_1_0", "Query": { // "ResourceTypeFilters": [ "resource_type1", "resource_type2"], "TagFilters": [ { @@ -260,11 +259,11 @@ type ResourceQuery struct { // The type of the query. You can use the following values: // - // * + // * // CLOUDFORMATION_STACK_1_0: Specifies that the Query contains an ARN for a // CloudFormation stack. // - // * TAG_FILTERS_1_0: Specifies that the Query parameter + // * TAG_FILTERS_1_0: Specifies that the Query parameter // contains a JSON string that represents a collection of simple tag filters for // resource types and tags. The JSON string uses a syntax similar to the // GetResources @@ -282,24 +281,24 @@ type ResourceQuery struct { // [{"Stage":["Test","Deploy"]},{"Version":["1","2"]}] The results of this query // could include the following. // - // * An EC2 instance that has the following - // two tags: {"Stage":"Deploy"}, and {"Version":"2"} + // * An EC2 instance that has the following two tags: + // {"Stage":"Deploy"}, and {"Version":"2"} // - // * An S3 bucket that - // has the following two tags: {"Stage":"Test"}, and {"Version":"1"} + // * An S3 bucket that has the following + // two tags: {"Stage":"Test"}, and {"Version":"1"} // - // The query - // would not include the following items in the results, however. + // The query would not include the + // following items in the results, however. // - // * An EC2 - // instance that has only the following tag: {"Stage":"Deploy"}. The instance does - // not have all of the tag keys specified in the filter, so it is excluded from the - // results. + // * An EC2 instance that has only the + // following tag: {"Stage":"Deploy"}. The instance does not have all of the tag + // keys specified in the filter, so it is excluded from the results. // - // * An RDS database that has the following two tags: - // {"Stage":"Archived"} and {"Version":"4"} The database has all of the tag keys, - // but none of those keys has an associated value that matches at least one of the - // specified values in the filter. + // * An RDS + // database that has the following two tags: {"Stage":"Archived"} and + // {"Version":"4"} The database has all of the tag keys, but none of those keys has + // an associated value that matches at least one of the specified values in the + // filter. // // This member is required. Type QueryType diff --git a/service/resourcegroupstaggingapi/api_op_DescribeReportCreation.go b/service/resourcegroupstaggingapi/api_op_DescribeReportCreation.go index 3da889ccee2..6b770c08aac 100644 --- a/service/resourcegroupstaggingapi/api_op_DescribeReportCreation.go +++ b/service/resourcegroupstaggingapi/api_op_DescribeReportCreation.go @@ -45,17 +45,17 @@ type DescribeReportCreationOutput struct { // Reports the status of the operation. The operation status can be one of the // following: // - // * RUNNING - Report creation is in progress. + // * RUNNING - Report creation is in progress. // - // * SUCCEEDED - - // Report creation is complete. You can open the report from the Amazon S3 bucket - // that you specified when you ran StartReportCreation. + // * SUCCEEDED - Report + // creation is complete. You can open the report from the Amazon S3 bucket that you + // specified when you ran StartReportCreation. // - // * FAILED - Report - // creation timed out or the Amazon S3 bucket is not accessible. + // * FAILED - Report creation timed + // out or the Amazon S3 bucket is not accessible. // - // * NO REPORT - - // No report was generated in the last 90 days. + // * NO REPORT - No report was + // generated in the last 90 days. Status *string // Metadata pertaining to the operation's result. diff --git a/service/resourcegroupstaggingapi/api_op_GetComplianceSummary.go b/service/resourcegroupstaggingapi/api_op_GetComplianceSummary.go index 8c90101a603..0732e0fdec8 100644 --- a/service/resourcegroupstaggingapi/api_op_GetComplianceSummary.go +++ b/service/resourcegroupstaggingapi/api_op_GetComplianceSummary.go @@ -58,15 +58,15 @@ type GetComplianceSummaryInput struct { // resource's Amazon Resource Name (ARN). Consult the AWS General Reference for the // following: // - // * For a list of service name strings, see AWS Service Namespaces + // * For a list of service name strings, see AWS Service Namespaces // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces). // - // - // * For resource type strings, see Example ARNs + // * + // For resource type strings, see Example ARNs // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-syntax). // - // - // * For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // * + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS // Service Namespaces // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // diff --git a/service/resourcegroupstaggingapi/api_op_GetResources.go b/service/resourcegroupstaggingapi/api_op_GetResources.go index 70298fa4561..e34982e7959 100644 --- a/service/resourcegroupstaggingapi/api_op_GetResources.go +++ b/service/resourcegroupstaggingapi/api_op_GetResources.go @@ -15,13 +15,13 @@ import ( // specified Region for the AWS account. Depending on what information you want // returned, you can also specify the following: // -// * Filters that specify what -// tags and resource types you want returned. The response includes all tags that -// are associated with the requested resources. +// * Filters that specify what tags +// and resource types you want returned. The response includes all tags that are +// associated with the requested resources. // -// * Information about compliance -// with the account's effective tag policy. For more information on tag policies, -// see Tag Policies +// * Information about compliance with +// the account's effective tag policy. For more information on tag policies, see +// Tag Policies // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // in the AWS Organizations User Guide. // @@ -70,15 +70,15 @@ type GetResourcesInput struct { // resource's Amazon Resource Name (ARN). Consult the AWS General Reference for the // following: // - // * For a list of service name strings, see AWS Service Namespaces + // * For a list of service name strings, see AWS Service Namespaces // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces). // - // - // * For resource type strings, see Example ARNs + // * + // For resource type strings, see Example ARNs // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-syntax). // - // - // * For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // * + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS // Service Namespaces // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // @@ -98,41 +98,40 @@ type GetResourcesInput struct { // can include up to 20 values. Note the following when deciding how to use // TagFilters: // - // * If you do specify a TagFilter, the response returns only - // those resources that are currently associated with the specified tag. + // * If you do specify a TagFilter, the response returns only those + // resources that are currently associated with the specified tag. // - // * If - // you don't specify a TagFilter, the response includes all resources that were - // ever associated with tags. Resources that currently don't have associated tags - // are shown with an empty tag set, like this: "Tags": []. + // * If you don't + // specify a TagFilter, the response includes all resources that were ever + // associated with tags. Resources that currently don't have associated tags are + // shown with an empty tag set, like this: "Tags": []. // - // * If you specify - // more than one filter in a single request, the response returns only those - // resources that satisfy all specified filters. + // * If you specify more than + // one filter in a single request, the response returns only those resources that + // satisfy all specified filters. // - // * If you specify a filter - // that contains more than one value for a key, the response returns resources that - // match any of the specified values for that key. + // * If you specify a filter that contains more + // than one value for a key, the response returns resources that match any of the + // specified values for that key. // - // * If you don't specify any - // values for a key, the response returns resources that are tagged with that key - // irrespective of the value. For example, for filters: filter1 = {key1, {value1}}, - // filter2 = {key2, {value2,value3,value4}} , filter3 = {key3}: + // * If you don't specify any values for a key, the + // response returns resources that are tagged with that key irrespective of the + // value. For example, for filters: filter1 = {key1, {value1}}, filter2 = {key2, + // {value2,value3,value4}} , filter3 = {key3}: // - // * - // GetResources( {filter1} ) returns resources tagged with key1=value1 + // * GetResources( {filter1} ) returns + // resources tagged with key1=value1 // - // * - // GetResources( {filter2} ) returns resources tagged with key2=value2 or - // key2=value3 or key2=value4 + // * GetResources( {filter2} ) returns resources + // tagged with key2=value2 or key2=value3 or key2=value4 // - // * GetResources( {filter3} ) returns - // resources tagged with any tag containing key3 as its tag key, irrespective of - // its value + // * GetResources( {filter3} + // ) returns resources tagged with any tag containing key3 as its tag key, + // irrespective of its value // - // * GetResources( {filter1,filter2,filter3} ) returns resources - // tagged with ( key1=value1) and ( key2=value2 or key2=value3 or key2=value4) and - // (key3, irrespective of the value) + // * GetResources( {filter1,filter2,filter3} ) returns + // resources tagged with ( key1=value1) and ( key2=value2 or key2=value3 or + // key2=value4) and (key3, irrespective of the value) TagFilters []*types.TagFilter // AWS recommends using ResourcesPerPage instead of this parameter. A limit that diff --git a/service/resourcegroupstaggingapi/api_op_TagResources.go b/service/resourcegroupstaggingapi/api_op_TagResources.go index 14f2d91161c..a300da98cd0 100644 --- a/service/resourcegroupstaggingapi/api_op_TagResources.go +++ b/service/resourcegroupstaggingapi/api_op_TagResources.go @@ -13,23 +13,23 @@ import ( // Applies one or more tags to the specified resources. Note the following: // -// * -// Not all resources can have tags. For a list of services that support tagging, -// see this list +// * Not +// all resources can have tags. For a list of services that support tagging, see +// this list // (http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html). // -// -// * Each resource can have up to 50 tags. For other limits, see Tag Naming and -// Usage Conventions +// * +// Each resource can have up to 50 tags. For other limits, see Tag Naming and Usage +// Conventions // (http://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions) // in the AWS General Reference. // -// * You can only tag resources that are located -// in the specified Region for the AWS account. +// * You can only tag resources that are located in +// the specified Region for the AWS account. // -// * To add tags to a resource, -// you need the necessary permissions for the service that the resource belongs to -// as well as permissions for adding tags. For more information, see this list +// * To add tags to a resource, you need +// the necessary permissions for the service that the resource belongs to as well +// as permissions for adding tags. For more information, see this list // (http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html). // // Do diff --git a/service/resourcegroupstaggingapi/api_op_UntagResources.go b/service/resourcegroupstaggingapi/api_op_UntagResources.go index d0fc90929e9..88752bee77c 100644 --- a/service/resourcegroupstaggingapi/api_op_UntagResources.go +++ b/service/resourcegroupstaggingapi/api_op_UntagResources.go @@ -16,14 +16,14 @@ import ( // succeeds even if you attempt to remove tags from a resource that were already // removed. Note the following: // -// * To remove tags from a resource, you need the +// * To remove tags from a resource, you need the // necessary permissions for the service that the resource belongs to as well as // permissions for removing tags. For more information, see this list // (http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/Welcome.html). // -// -// * You can only tag resources that are located in the specified Region for the -// AWS account. +// * +// You can only tag resources that are located in the specified Region for the AWS +// account. func (c *Client) UntagResources(ctx context.Context, params *UntagResourcesInput, optFns ...func(*Options)) (*UntagResourcesOutput, error) { if params == nil { params = &UntagResourcesInput{} diff --git a/service/resourcegroupstaggingapi/doc.go b/service/resourcegroupstaggingapi/doc.go index bfae4256231..ae0ac177f90 100644 --- a/service/resourcegroupstaggingapi/doc.go +++ b/service/resourcegroupstaggingapi/doc.go @@ -15,413 +15,412 @@ // simplify resource management, access management and cost allocation. You can use // the resource groups tagging API operations to complete the following tasks: // -// -// * Tag and untag supported resources located in the specified Region for the AWS +// * +// Tag and untag supported resources located in the specified Region for the AWS // account. // -// * Use tag-based filters to search for resources located in the +// * Use tag-based filters to search for resources located in the // specified Region for the AWS account. // -// * List all existing tag keys in the +// * List all existing tag keys in the // specified Region for the AWS account. // -// * List all existing values for the +// * List all existing values for the // specified key in the specified Region for the AWS account. // // To use resource // groups tagging API operations, you must add the following permissions to your // IAM policy: // -// * tag:GetResources +// * tag:GetResources // -// * tag:TagResources +// * tag:TagResources // -// * -// tag:UntagResources +// * tag:UntagResources // -// * tag:GetTagKeys +// * +// tag:GetTagKeys // -// * tag:GetTagValues +// * tag:GetTagValues // -// You'll also -// need permissions to access the resources of individual services so that you can -// tag and untag those resources. For more information on IAM policies, see -// Managing IAM Policies +// You'll also need permissions to access the +// resources of individual services so that you can tag and untag those resources. +// For more information on IAM policies, see Managing IAM Policies // (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage.html) in // the IAM User Guide. Services that support the Resource Groups Tagging API You // can use the Resource Groups Tagging API to tag resources for the following AWS // services. // -// * Alexa for Business (a4b) (https://docs.aws.amazon.com/a4b) -// +// * Alexa for Business (a4b) (https://docs.aws.amazon.com/a4b) // -// * API Gateway (https://docs.aws.amazon.com/apigateway) +// * API +// Gateway (https://docs.aws.amazon.com/apigateway) // -// * Amazon AppStream +// * Amazon AppStream // (https://docs.aws.amazon.com/appstream2) // -// * AWS AppSync +// * AWS AppSync // (https://docs.aws.amazon.com/appsync) // -// * AWS App Mesh +// * AWS App Mesh // (https://docs.aws.amazon.com/app-mesh) // -// * Amazon Athena +// * Amazon Athena // (https://docs.aws.amazon.com/athena) // -// * Amazon Aurora +// * Amazon Aurora // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide) // -// * AWS Backup +// * AWS Backup // (https://docs.aws.amazon.com/aws-backup) // -// * AWS Certificate Manager +// * AWS Certificate Manager // (https://docs.aws.amazon.com/acm) // -// * AWS Certificate Manager Private CA +// * AWS Certificate Manager Private CA // (https://docs.aws.amazon.com/acm) // -// * Amazon Cloud Directory +// * Amazon Cloud Directory // (https://docs.aws.amazon.com/clouddirectory) // -// * AWS Cloud Map +// * AWS Cloud Map // (https://docs.aws.amazon.com/cloud-map) // -// * AWS CloudFormation +// * AWS CloudFormation // (https://docs.aws.amazon.com/cloudformation) // -// * Amazon CloudFront +// * Amazon CloudFront // (https://docs.aws.amazon.com/cloudfront) // -// * AWS CloudHSM +// * AWS CloudHSM // (https://docs.aws.amazon.com/cloudhsm) // -// * AWS CloudTrail +// * AWS CloudTrail // (https://docs.aws.amazon.com/cloudtrail) // -// * Amazon CloudWatch (alarms only) +// * Amazon CloudWatch (alarms only) // (https://docs.aws.amazon.com/cloudwatch) // -// * Amazon CloudWatch Events +// * Amazon CloudWatch Events // (https://docs.aws.amazon.com/cloudwatch/?id=docs_gateway#amazon-cloudwatch-events) // -// -// * Amazon CloudWatch Logs +// * +// Amazon CloudWatch Logs // (https://docs.aws.amazon.com/cloudwatch/?id=docs_gateway#amazon-cloudwatch-logs) // +// * +// Amazon Cloudwatch Synthetics (https://docs.aws.amazon.com/cloudwatch) // -// * Amazon Cloudwatch Synthetics (https://docs.aws.amazon.com/cloudwatch) +// * AWS +// CodeBuild (https://docs.aws.amazon.com/codebuild) // -// * -// AWS CodeBuild (https://docs.aws.amazon.com/codebuild) -// -// * AWS CodeCommit +// * AWS CodeCommit // (https://docs.aws.amazon.com/codecommit) // -// * AWS CodeGuru Profiler +// * AWS CodeGuru Profiler // (https://docs.aws.amazon.com/codeguru/latest/profiler-ug/) // -// * AWS -// CodePipeline (https://docs.aws.amazon.com/codepipeline) +// * AWS CodePipeline +// (https://docs.aws.amazon.com/codepipeline) // -// * AWS CodeStar +// * AWS CodeStar // (https://docs.aws.amazon.com/codestar) // -// * AWS CodeStar Connections +// * AWS CodeStar Connections // (https://docs.aws.amazon.com/codestar-connections/latest/APIReference/) // -// * +// * // Amazon Cognito Identity (https://docs.aws.amazon.com/cognito) // -// * Amazon -// Cognito User Pools (https://docs.aws.amazon.com/cognito) +// * Amazon Cognito +// User Pools (https://docs.aws.amazon.com/cognito) // -// * Amazon -// Comprehend (https://docs.aws.amazon.com/comprehend) +// * Amazon Comprehend +// (https://docs.aws.amazon.com/comprehend) // -// * AWS Config +// * AWS Config // (https://docs.aws.amazon.com/config) // -// * Amazon Connect +// * Amazon Connect // (http://aws.amazon.com/connect/resources/?whats-new-cards#Documentation) // -// * -// AWS Data Exchange (https://docs.aws.amazon.com/data-exchange) +// * AWS +// Data Exchange (https://docs.aws.amazon.com/data-exchange) // -// * AWS Data -// Pipeline (https://docs.aws.amazon.com/data-pipeline) +// * AWS Data Pipeline +// (https://docs.aws.amazon.com/data-pipeline) // -// * AWS Database -// Migration Service (https://docs.aws.amazon.com/dms) +// * AWS Database Migration Service +// (https://docs.aws.amazon.com/dms) // -// * AWS DataSync +// * AWS DataSync // (https://docs.aws.amazon.com/datasync) // -// * AWS Device Farm +// * AWS Device Farm // (https://docs.aws.amazon.com/devicefarm) // -// * AWS Direct Connect +// * AWS Direct Connect // (https://docs.aws.amazon.com/directconnect) // -// * AWS Directory Service +// * AWS Directory Service // (https://docs.aws.amazon.com/directory-service) // -// * Amazon DynamoDB +// * Amazon DynamoDB // (https://docs.aws.amazon.com/dynamodb) // -// * Amazon EBS +// * Amazon EBS // (https://docs.aws.amazon.com/ebs) // -// * Amazon EC2 +// * Amazon EC2 // (https://docs.aws.amazon.com/ec2) // -// * EC2 Image Builder +// * EC2 Image Builder // (https://docs.aws.amazon.com/imagebuilder) // -// * Amazon ECR +// * Amazon ECR // (https://docs.aws.amazon.com/ecr) // -// * Amazon ECS +// * Amazon ECS // (https://docs.aws.amazon.com/ecs) // -// * Amazon EKS +// * Amazon EKS // (https://docs.aws.amazon.com/eks) // -// * AWS Elastic Beanstalk +// * AWS Elastic Beanstalk // (https://docs.aws.amazon.com/elastic-beanstalk) // -// * Amazon Elastic File -// System (https://docs.aws.amazon.com/efs) +// * Amazon Elastic File System +// (https://docs.aws.amazon.com/efs) // -// * Elastic Load Balancing +// * Elastic Load Balancing // (https://docs.aws.amazon.com/elasticloadbalancing) // -// * Amazon Elastic -// Inference (https://docs.aws.amazon.com/elastic-inference) +// * Amazon Elastic Inference +// (https://docs.aws.amazon.com/elastic-inference) // -// * Amazon -// ElastiCache (https://docs.aws.amazon.com/elasticache) +// * Amazon ElastiCache +// (https://docs.aws.amazon.com/elasticache) // -// * Amazon -// Elasticsearch Service (https://docs.aws.amazon.com/elasticsearch-service) +// * Amazon Elasticsearch Service +// (https://docs.aws.amazon.com/elasticsearch-service) // -// * -// AWS Elemental MediaLive (https://docs.aws.amazon.com/medialive) +// * AWS Elemental MediaLive +// (https://docs.aws.amazon.com/medialive) // -// * AWS -// Elemental MediaPackage (https://docs.aws.amazon.com/mediapackage) +// * AWS Elemental MediaPackage +// (https://docs.aws.amazon.com/mediapackage) // -// * AWS -// Elemental MediaPackage VoD (https://docs.aws.amazon.com/mediapackage) +// * AWS Elemental MediaPackage VoD +// (https://docs.aws.amazon.com/mediapackage) // -// * AWS -// Elemental MediaTailor (https://docs.aws.amazon.com/mediatailor) +// * AWS Elemental MediaTailor +// (https://docs.aws.amazon.com/mediatailor) // -// * Amazon -// EMR (https://docs.aws.amazon.com/emr) +// * Amazon EMR +// (https://docs.aws.amazon.com/emr) // -// * Amazon EventBridge Schema +// * Amazon EventBridge Schema // (https://docs.aws.amazon.com/eventbridge) // -// * AWS Firewall Manager +// * AWS Firewall Manager // (https://docs.aws.amazon.com/firewall-manager) // -// * Amazon Forecast +// * Amazon Forecast // (https://docs.aws.amazon.com/forecast) // -// * Amazon Fraud Detector +// * Amazon Fraud Detector // (https://docs.aws.amazon.com/frauddetector) // -// * Amazon FSx +// * Amazon FSx // (https://docs.aws.amazon.com/fsx) // -// * Amazon S3 Glacier +// * Amazon S3 Glacier // (https://docs.aws.amazon.com/s3/?id=docs_gateway#amazon-s3-glacier) // -// * AWS +// * AWS // Global Accelerator (https://docs.aws.amazon.com/global-accelerator) // -// * AWS +// * AWS // Ground Station (https://docs.aws.amazon.com/ground-station) // -// * AWS Glue +// * AWS Glue // (https://docs.aws.amazon.com/glue) // -// * Amazon GuardDuty +// * Amazon GuardDuty // (https://docs.aws.amazon.com/guardduty) // -// * Amazon Inspector +// * Amazon Inspector // (https://docs.aws.amazon.com/inspector) // -// * Amazon Interactive Video Service +// * Amazon Interactive Video Service // (https://docs.aws.amazon.com/ivs) // -// * AWS IoT Analytics +// * AWS IoT Analytics // (https://docs.aws.amazon.com/iotanalytics) // -// * AWS IoT Core +// * AWS IoT Core // (https://docs.aws.amazon.com/iot) // -// * AWS IoT Device Defender +// * AWS IoT Device Defender // (https://docs.aws.amazon.com/iot-device-defender) // -// * AWS IoT Device -// Management (https://docs.aws.amazon.com/iot-device-management) +// * AWS IoT Device Management +// (https://docs.aws.amazon.com/iot-device-management) // -// * AWS IoT -// Events (https://docs.aws.amazon.com/iotevents) +// * AWS IoT Events +// (https://docs.aws.amazon.com/iotevents) // -// * AWS IoT Greengrass +// * AWS IoT Greengrass // (https://docs.aws.amazon.com/greengrass) // -// * AWS IoT 1-Click +// * AWS IoT 1-Click // (https://docs.aws.amazon.com/iot-1-click) // -// * AWS IoT Sitewise +// * AWS IoT Sitewise // (https://docs.aws.amazon.com/iot-sitewise) // -// * AWS IoT Things Graph +// * AWS IoT Things Graph // (https://docs.aws.amazon.com/thingsgraph) // -// * Amazon Kendra +// * Amazon Kendra // (https://docs.aws.amazon.com/kendra) // -// * AWS Key Management Service +// * AWS Key Management Service // (https://docs.aws.amazon.com/kms) // -// * Amazon Kinesis +// * Amazon Kinesis // (https://docs.aws.amazon.com/kinesis) // -// * Amazon Kinesis Data Analytics +// * Amazon Kinesis Data Analytics // (https://docs.aws.amazon.com/kinesis/?id=docs_gateway#amazon-kinesis-data-analytics) // -// -// * Amazon Kinesis Data Firehose +// * +// Amazon Kinesis Data Firehose // (https://docs.aws.amazon.com/kinesis/?id=docs_gateway#amazon-kinesis-data-firehose) // +// * +// AWS Lambda (https://docs.aws.amazon.com/lambda) // -// * AWS Lambda (https://docs.aws.amazon.com/lambda) -// -// * Amazon Lex +// * Amazon Lex // (https://docs.aws.amazon.com/lex) // -// * AWS License Manager +// * AWS License Manager // (https://docs.aws.amazon.com/license-manager) // -// * Amazon Lightsail +// * Amazon Lightsail // (https://docs.aws.amazon.com/lightsail) // -// * Amazon Macie +// * Amazon Macie // (https://docs.aws.amazon.com/macie) // -// * Amazon Machine Learning +// * Amazon Machine Learning // (https://docs.aws.amazon.com/machine-learning) // -// * Amazon MQ +// * Amazon MQ // (https://docs.aws.amazon.com/amazon-mq) // -// * Amazon MSK +// * Amazon MSK // (https://docs.aws.amazon.com/msk) // -// * Amazon MSK +// * Amazon MSK // (https://docs.aws.amazon.com/msk) // -// * Amazon Neptune +// * Amazon Neptune // (https://docs.aws.amazon.com/neptune) // -// * AWS Network Manager +// * AWS Network Manager // (https://docs.aws.amazon.com/vpc/latest/tgw/what-is-network-manager.html) // -// * -// AWS OpsWorks (https://docs.aws.amazon.com/opsworks) +// * AWS +// OpsWorks (https://docs.aws.amazon.com/opsworks) // -// * AWS OpsWorks CM +// * AWS OpsWorks CM // (https://docs.aws.amazon.com/opsworks) // -// * AWS Organizations +// * AWS Organizations // (https://docs.aws.amazon.com/organizations) // -// * Amazon Pinpoint +// * Amazon Pinpoint // (https://docs.aws.amazon.com/pinpoint) // -// * Amazon Quantum Ledger Database -// (QLDB) (https://docs.aws.amazon.com/qldb) +// * Amazon Quantum Ledger Database (QLDB) +// (https://docs.aws.amazon.com/qldb) // -// * Amazon RDS +// * Amazon RDS // (https://docs.aws.amazon.com/rds) // -// * Amazon Redshift +// * Amazon Redshift // (https://docs.aws.amazon.com/redshift) // -// * AWS Resource Access Manager +// * AWS Resource Access Manager // (https://docs.aws.amazon.com/ram) // -// * AWS Resource Groups +// * AWS Resource Groups // (https://docs.aws.amazon.com/ARG) // -// * AWS RoboMaker +// * AWS RoboMaker // (https://docs.aws.amazon.com/robomaker) // -// * Amazon Route 53 +// * Amazon Route 53 // (https://docs.aws.amazon.com/route53) // -// * Amazon Route 53 Resolver +// * Amazon Route 53 Resolver // (https://docs.aws.amazon.com/route53) // -// * Amazon S3 (buckets only) +// * Amazon S3 (buckets only) // (https://docs.aws.amazon.com/s3) // -// * Amazon SageMaker +// * Amazon SageMaker // (https://docs.aws.amazon.com/sagemaker) // -// * Savings Plans +// * Savings Plans // (https://docs.aws.amazon.com/savingsplans) // -// * AWS Secrets Manager +// * AWS Secrets Manager // (https://docs.aws.amazon.com/secretsmanager) // -// * AWS Security Hub +// * AWS Security Hub // (https://docs.aws.amazon.com/securityhub) // -// * AWS Service Catalog +// * AWS Service Catalog // (https://docs.aws.amazon.com/servicecatalog) // -// * Amazon Simple Email Service +// * Amazon Simple Email Service // (SES) (https://docs.aws.amazon.com/ses) // -// * Amazon Simple Notification -// Service (SNS) (https://docs.aws.amazon.com/sns) +// * Amazon Simple Notification Service +// (SNS) (https://docs.aws.amazon.com/sns) // -// * Amazon Simple Queue -// Service (SQS) (https://docs.aws.amazon.com/sqs) +// * Amazon Simple Queue Service (SQS) +// (https://docs.aws.amazon.com/sqs) // -// * Amazon Simple Workflow -// Service (https://docs.aws.amazon.com/swf) +// * Amazon Simple Workflow Service +// (https://docs.aws.amazon.com/swf) // -// * AWS Step Functions +// * AWS Step Functions // (https://docs.aws.amazon.com/step-functions) // -// * AWS Storage Gateway +// * AWS Storage Gateway // (https://docs.aws.amazon.com/storagegateway) // -// * AWS Systems Manager +// * AWS Systems Manager // (https://docs.aws.amazon.com/systems-manager) // -// * AWS Transfer for SFTP +// * AWS Transfer for SFTP // (https://docs.aws.amazon.com/transfer) // -// * Amazon VPC +// * Amazon VPC // (https://docs.aws.amazon.com/vpc) // -// * AWS WAF +// * AWS WAF // (https://docs.aws.amazon.com/waf) // -// * AWS WAF Regional +// * AWS WAF Regional // (https://docs.aws.amazon.com/waf) // -// * Amazon WorkLink +// * Amazon WorkLink // (https://docs.aws.amazon.com/worklink) // -// * Amazon WorkSpaces +// * Amazon WorkSpaces // (https://docs.aws.amazon.com/workspaces) package resourcegroupstaggingapi diff --git a/service/resourcegroupstaggingapi/types/enums.go b/service/resourcegroupstaggingapi/types/enums.go index e6b38d3bf7f..19fd7a5cf61 100644 --- a/service/resourcegroupstaggingapi/types/enums.go +++ b/service/resourcegroupstaggingapi/types/enums.go @@ -6,8 +6,8 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeInternal_service_exception ErrorCode = "InternalServiceException" - ErrorCodeInvalid_parameter_exception ErrorCode = "InvalidParameterException" + ErrorCodeInternalServiceException ErrorCode = "InternalServiceException" + ErrorCodeInvalidParameterException ErrorCode = "InvalidParameterException" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -24,9 +24,9 @@ type GroupByAttribute string // Enum values for GroupByAttribute const ( - GroupByAttributeTarget_id GroupByAttribute = "TARGET_ID" - GroupByAttributeRegion GroupByAttribute = "REGION" - GroupByAttributeResource_type GroupByAttribute = "RESOURCE_TYPE" + GroupByAttributeTargetId GroupByAttribute = "TARGET_ID" + GroupByAttributeRegion GroupByAttribute = "REGION" + GroupByAttributeResourceType GroupByAttribute = "RESOURCE_TYPE" ) // Values returns all known values for GroupByAttribute. Note that this can be diff --git a/service/resourcegroupstaggingapi/types/errors.go b/service/resourcegroupstaggingapi/types/errors.go index d71d6e7779f..9f37bbd7f93 100644 --- a/service/resourcegroupstaggingapi/types/errors.go +++ b/service/resourcegroupstaggingapi/types/errors.go @@ -31,18 +31,18 @@ func (e *ConcurrentModificationException) ErrorFault() smithy.ErrorFault { retur // Some of the reasons in the following list might not apply to this specific // operation. // -// * You must meet the prerequisites for using tag policies. For +// * You must meet the prerequisites for using tag policies. For // information, see Prerequisites and Permissions for Using Tag Policies // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies-prereqs.html) // in the AWS Organizations User Guide. // -// * You must enable the tag policies -// service principal (tagpolicies.tag.amazonaws.com) to integrate with AWS -// Organizations For information, see EnableAWSServiceAccess +// * You must enable the tag policies service +// principal (tagpolicies.tag.amazonaws.com) to integrate with AWS Organizations +// For information, see EnableAWSServiceAccess // (http://docs.aws.amazon.com/organizations/latest/APIReference/API_EnableAWSServiceAccess.html). // -// -// * You must have a tag policy attached to the organization root, an OU, or an +// * +// You must have a tag policy attached to the organization root, an OU, or an // account. type ConstraintViolationException struct { Message *string @@ -80,19 +80,19 @@ func (e *InternalServiceException) ErrorFault() smithy.ErrorFault { return smith // This error indicates one of the following: // -// * A parameter is missing. +// * A parameter is missing. // -// * -// A malformed string was supplied for the request parameter. +// * A +// malformed string was supplied for the request parameter. // -// * An -// out-of-range value was supplied for the request parameter. +// * An out-of-range +// value was supplied for the request parameter. // -// * The target ID -// is invalid, unsupported, or doesn't exist. +// * The target ID is invalid, +// unsupported, or doesn't exist. // -// * You can't access the Amazon S3 -// bucket for report storage. For more information, see Additional Requirements for +// * You can't access the Amazon S3 bucket for +// report storage. For more information, see Additional Requirements for // Organization-wide Tag Compliance Reports // (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies-prereqs.html#bucket-policies-org-report) // in the AWS Organizations User Guide. diff --git a/service/resourcegroupstaggingapi/types/types.go b/service/resourcegroupstaggingapi/types/types.go index c95ccddecc2..139a8606545 100644 --- a/service/resourcegroupstaggingapi/types/types.go +++ b/service/resourcegroupstaggingapi/types/types.go @@ -23,7 +23,7 @@ type ComplianceDetails struct { // that hosts the resource that the ARN key represents. The following are common // error codes that you might receive from other AWS services: // -// * +// * // InternalServiceException – This can mean that the Resource Groups Tagging API // didn't receive a response from another AWS service. It can also mean the the // resource type in the request is not supported by the Resource Groups Tagging @@ -31,9 +31,9 @@ type ComplianceDetails struct { // (http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html) // to verify the changes. // -// * AccessDeniedException – This can mean that you -// need permission to calling tagging operations in the AWS service that contains -// the resource. For example, to use the Resource Groups Tagging API to tag a +// * AccessDeniedException – This can mean that you need +// permission to calling tagging operations in the AWS service that contains the +// resource. For example, to use the Resource Groups Tagging API to tag a // CloudWatch alarm resource, you need permission to call TagResources // (http://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_TagResources.html) // and TagResource diff --git a/service/robomaker/types/enums.go b/service/robomaker/types/enums.go index 178361d8f21..fb547629365 100644 --- a/service/robomaker/types/enums.go +++ b/service/robomaker/types/enums.go @@ -6,9 +6,9 @@ type Architecture string // Enum values for Architecture const ( - ArchitectureX86_64 Architecture = "X86_64" - ArchitectureArm64 Architecture = "ARM64" - ArchitectureArmhf Architecture = "ARMHF" + ArchitectureX8664 Architecture = "X86_64" + ArchitectureArm64 Architecture = "ARM64" + ArchitectureArmhf Architecture = "ARMHF" ) // Values returns all known values for Architecture. Note that this can be expanded diff --git a/service/route53/api_op_ChangeResourceRecordSets.go b/service/route53/api_op_ChangeResourceRecordSets.go index ffafcccb07f..b839707f5c5 100644 --- a/service/route53/api_op_ChangeResourceRecordSets.go +++ b/service/route53/api_op_ChangeResourceRecordSets.go @@ -44,27 +44,27 @@ import ( // the Amazon Route 53 Developer Guide. Create, Delete, and Upsert Use // ChangeResourceRecordsSetsRequest to perform the following actions: // -// * -// CREATE: Creates a resource record set that has the specified values. +// * CREATE: +// Creates a resource record set that has the specified values. // -// * -// DELETE: Deletes an existing resource record set that has the specified values. +// * DELETE: Deletes +// an existing resource record set that has the specified values. // +// * UPSERT: If a +// resource record set does not already exist, AWS creates it. If a resource set +// does exist, Route 53 updates it with the values in the request. // -// * UPSERT: If a resource record set does not already exist, AWS creates it. If a -// resource set does exist, Route 53 updates it with the values in the -// request. -// -// Syntaxes for Creating, Updating, and Deleting Resource Record Sets The -// syntax for a request depends on the type of resource record set that you want to -// create, delete, or update, such as weighted, alias, or failover. The XML -// elements in your request must appear in the order listed in the syntax. For an -// example for each type of resource record set, see "Examples." Don't refer to the -// syntax in the "Parameter Syntax" section, which includes all of the elements for -// every kind of resource record set that you can create, delete, or update by -// using ChangeResourceRecordSets. Change Propagation to Route 53 DNS Servers When -// you submit a ChangeResourceRecordSets request, Route 53 propagates your changes -// to all of the Route 53 authoritative DNS servers. While your changes are +// Syntaxes for +// Creating, Updating, and Deleting Resource Record Sets The syntax for a request +// depends on the type of resource record set that you want to create, delete, or +// update, such as weighted, alias, or failover. The XML elements in your request +// must appear in the order listed in the syntax. For an example for each type of +// resource record set, see "Examples." Don't refer to the syntax in the "Parameter +// Syntax" section, which includes all of the elements for every kind of resource +// record set that you can create, delete, or update by using +// ChangeResourceRecordSets. Change Propagation to Route 53 DNS Servers When you +// submit a ChangeResourceRecordSets request, Route 53 propagates your changes to +// all of the Route 53 authoritative DNS servers. While your changes are // propagating, GetChange returns a status of PENDING. When propagation is // complete, GetChange returns a status of INSYNC. Changes generally propagate to // all Route 53 name servers within 60 seconds. For more information, see GetChange diff --git a/service/route53/api_op_ChangeTagsForResource.go b/service/route53/api_op_ChangeTagsForResource.go index 81d313904cb..d4e467c451e 100644 --- a/service/route53/api_op_ChangeTagsForResource.go +++ b/service/route53/api_op_ChangeTagsForResource.go @@ -41,10 +41,10 @@ type ChangeTagsForResourceInput struct { // The type of the resource. // - // * The resource type for health checks is + // * The resource type for health checks is // healthcheck. // - // * The resource type for hosted zones is hostedzone. + // * The resource type for hosted zones is hostedzone. // // This member is required. ResourceType types.TagResourceType diff --git a/service/route53/api_op_CreateHealthCheck.go b/service/route53/api_op_CreateHealthCheck.go index 15b7ac58902..b2dffbd48c6 100644 --- a/service/route53/api_op_CreateHealthCheck.go +++ b/service/route53/api_op_CreateHealthCheck.go @@ -24,21 +24,21 @@ import ( // with failover resource record sets in a private hosted zone. Note the // following: // -// * Route 53 health checkers are outside the VPC. To check the -// health of an endpoint within a VPC by IP address, you must assign a public IP -// address to the instance in the VPC. +// * Route 53 health checkers are outside the VPC. To check the health +// of an endpoint within a VPC by IP address, you must assign a public IP address +// to the instance in the VPC. // -// * You can configure a health checker to -// check the health of an external resource that the instance relies on, such as a -// database server. +// * You can configure a health checker to check the +// health of an external resource that the instance relies on, such as a database +// server. // -// * You can create a CloudWatch metric, associate an alarm -// with the metric, and then create a health check that is based on the state of -// the alarm. For example, you might create a CloudWatch metric that checks the -// status of the Amazon EC2 StatusCheckFailed metric, add an alarm to the metric, -// and then create a health check that is based on the state of the alarm. For -// information about creating CloudWatch metrics and alarms by using the CloudWatch -// console, see the Amazon CloudWatch User Guide +// * You can create a CloudWatch metric, associate an alarm with the +// metric, and then create a health check that is based on the state of the alarm. +// For example, you might create a CloudWatch metric that checks the status of the +// Amazon EC2 StatusCheckFailed metric, add an alarm to the metric, and then create +// a health check that is based on the state of the alarm. For information about +// creating CloudWatch metrics and alarms by using the CloudWatch console, see the +// Amazon CloudWatch User Guide // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatch.html). func (c *Client) CreateHealthCheck(ctx context.Context, params *CreateHealthCheckInput, optFns ...func(*Options)) (*CreateHealthCheckOutput, error) { if params == nil { @@ -62,22 +62,22 @@ type CreateHealthCheckInput struct { // failed CreateHealthCheck request without the risk of creating two identical // health checks: // - // * If you send a CreateHealthCheck request with the same + // * If you send a CreateHealthCheck request with the same // CallerReference and settings as a previous request, and if the health check // doesn't exist, Amazon Route 53 creates the health check. If the health check // does exist, Route 53 returns the settings for the existing health check. // - // * - // If you send a CreateHealthCheck request with the same CallerReference as a - // deleted health check, regardless of the settings, Route 53 returns a + // * If + // you send a CreateHealthCheck request with the same CallerReference as a deleted + // health check, regardless of the settings, Route 53 returns a // HealthCheckAlreadyExists error. // - // * If you send a CreateHealthCheck request - // with the same CallerReference as an existing health check but with different + // * If you send a CreateHealthCheck request with + // the same CallerReference as an existing health check but with different // settings, Route 53 returns a HealthCheckAlreadyExists error. // - // * If you send - // a CreateHealthCheck request with a unique CallerReference but settings identical + // * If you send a + // CreateHealthCheck request with a unique CallerReference but settings identical // to an existing health check, Route 53 creates the health check. // // This member is required. diff --git a/service/route53/api_op_CreateHostedZone.go b/service/route53/api_op_CreateHostedZone.go index 64eb67a55a2..129311ccffb 100644 --- a/service/route53/api_op_CreateHostedZone.go +++ b/service/route53/api_op_CreateHostedZone.go @@ -22,21 +22,21 @@ import ( // about charges for hosted zones, see Amazon Route 53 Pricing // (http://aws.amazon.com/route53/pricing/). Note the following: // -// * You can't +// * You can't // create a hosted zone for a top-level domain (TLD) such as .com. // -// * For -// public hosted zones, Route 53 automatically creates a default SOA record and -// four NS records for the zone. For more information about SOA and NS records, see -// NS and SOA Records that Route 53 Creates for a Hosted Zone +// * For public +// hosted zones, Route 53 automatically creates a default SOA record and four NS +// records for the zone. For more information about SOA and NS records, see NS and +// SOA Records that Route 53 Creates for a Hosted Zone // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) // in the Amazon Route 53 Developer Guide. If you want to use the same name servers // for multiple public hosted zones, you can optionally associate a reusable // delegation set with the hosted zone. See the DelegationSetId element. // -// * If -// your domain is registered with a registrar other than Route 53, you must update -// the name servers with your registrar to make Route 53 the DNS service for the +// * If your +// domain is registered with a registrar other than Route 53, you must update the +// name servers with your registrar to make Route 53 the DNS service for the // domain. For more information, see Migrating DNS Service for an Existing Domain // to Amazon Route 53 // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html) in @@ -96,14 +96,14 @@ type CreateHostedZoneInput struct { // (Optional) A complex type that contains the following optional values: // - // * - // For public and private hosted zones, an optional comment + // * For + // public and private hosted zones, an optional comment // - // * For private - // hosted zones, an optional PrivateZone element + // * For private hosted + // zones, an optional PrivateZone element // - // If you don't specify a comment or - // the PrivateZone element, omit HostedZoneConfig and the other elements. + // If you don't specify a comment or the + // PrivateZone element, omit HostedZoneConfig and the other elements. HostedZoneConfig *types.HostedZoneConfig // (Private hosted zones only) A complex type that contains information about the diff --git a/service/route53/api_op_CreateQueryLoggingConfig.go b/service/route53/api_op_CreateQueryLoggingConfig.go index 6bbc44668a7..e1c77f4ee3f 100644 --- a/service/route53/api_op_CreateQueryLoggingConfig.go +++ b/service/route53/api_op_CreateQueryLoggingConfig.go @@ -17,47 +17,46 @@ import ( // that Route 53 receives for a specified public hosted zone, such as the // following: // -// * Route 53 edge location that responded to the DNS query +// * Route 53 edge location that responded to the DNS query // -// * -// Domain or subdomain that was requested +// * Domain +// or subdomain that was requested // -// * DNS record type, such as A or -// AAAA +// * DNS record type, such as A or AAAA // -// * DNS response code, such as NoError or ServFail +// * DNS +// response code, such as NoError or ServFail // -// Log Group and -// Resource Policy Before you create a query logging configuration, perform the -// following operations. If you create a query logging configuration using the -// Route 53 console, Route 53 performs these operations automatically. +// Log Group and Resource Policy Before +// you create a query logging configuration, perform the following operations. If +// you create a query logging configuration using the Route 53 console, Route 53 +// performs these operations automatically. // -// * -// Create a CloudWatch Logs log group, and make note of the ARN, which you specify -// when you create a query logging configuration. Note the following: +// * Create a CloudWatch Logs log group, +// and make note of the ARN, which you specify when you create a query logging +// configuration. Note the following: // -// * -// You must create the log group in the us-east-1 region. +// * You must create the log group in the +// us-east-1 region. // -// * You must use -// the same AWS account to create the log group and the hosted zone that you want -// to configure query logging for. +// * You must use the same AWS account to create the log group +// and the hosted zone that you want to configure query logging for. // -// * When you create log groups for query -// logging, we recommend that you use a consistent prefix, for example: -// /aws/route53/hosted zone name In the next step, you'll create a resource -// policy, which controls access to one or more log groups and the associated AWS -// resources, such as Route 53 hosted zones. There's a limit on the number of -// resource policies that you can create, so we recommend that you use a consistent -// prefix so you can use the same resource policy for all the log groups that you -// create for query logging. +// * When you +// create log groups for query logging, we recommend that you use a consistent +// prefix, for example: /aws/route53/hosted zone name In the next step, you'll +// create a resource policy, which controls access to one or more log groups and +// the associated AWS resources, such as Route 53 hosted zones. There's a limit on +// the number of resource policies that you can create, so we recommend that you +// use a consistent prefix so you can use the same resource policy for all the log +// groups that you create for query logging. // -// * Create a CloudWatch Logs resource policy, and -// give it the permissions that Route 53 needs to create log streams and to send -// query logs to log streams. For the value of Resource, specify the ARN for the -// log group that you created in the previous step. To use the same resource policy -// for all the CloudWatch Logs log groups that you created for query logging -// configurations, replace the hosted zone name with , for example: +// * Create a CloudWatch Logs resource +// policy, and give it the permissions that Route 53 needs to create log streams +// and to send query logs to log streams. For the value of Resource, specify the +// ARN for the log group that you created in the previous step. To use the same +// resource policy for all the CloudWatch Logs log groups that you created for +// query logging configurations, replace the hosted zone name with , for example: // arn:aws:logs:us-east-1:123412341234:log-group:/aws/route53/ You can't use the // CloudWatch console to create or edit a resource policy. You must use the // CloudWatch API, one of the AWS SDKs, or the AWS CLI. @@ -66,16 +65,16 @@ import ( // Locations When Route 53 finishes creating the configuration for DNS query // logging, it does the following: // -// * Creates a log stream for an edge location -// the first time that the edge location responds to DNS queries for the specified +// * Creates a log stream for an edge location the +// first time that the edge location responds to DNS queries for the specified // hosted zone. That log stream is used to log all queries that Route 53 responds // to for that edge location. // -// * Begins to send query logs to the applicable -// log stream. +// * Begins to send query logs to the applicable log +// stream. // -// The name of each log stream is in the following format: hosted -// zone ID/edge location code The edge location code is a three-letter code and an +// The name of each log stream is in the following format: hosted zone +// ID/edge location code The edge location code is a three-letter code and an // arbitrarily assigned number, for example, DFW3. The three-letter code typically // corresponds with the International Air Transport Association airport code for an // airport near the edge location. (These abbreviations might change in the diff --git a/service/route53/api_op_CreateReusableDelegationSet.go b/service/route53/api_op_CreateReusableDelegationSet.go index 1f77d115702..ecf7d6b5c8f 100644 --- a/service/route53/api_op_CreateReusableDelegationSet.go +++ b/service/route53/api_op_CreateReusableDelegationSet.go @@ -24,41 +24,41 @@ import ( // is comparable to the process for configuring white label name servers. You need // to perform the following steps: // -// * Create a reusable delegation set. +// * Create a reusable delegation set. // -// * -// Recreate hosted zones, and reduce the TTL to 60 seconds or less. +// * Recreate +// hosted zones, and reduce the TTL to 60 seconds or less. // -// * Recreate -// resource record sets in the new hosted zones. +// * Recreate resource +// record sets in the new hosted zones. // -// * Change the registrar's name -// servers to use the name servers for the new hosted zones. +// * Change the registrar's name servers to +// use the name servers for the new hosted zones. // -// * Monitor traffic -// for the website or application. +// * Monitor traffic for the +// website or application. // -// * Change TTLs back to their original -// values. +// * Change TTLs back to their original values. // -// If you want to migrate existing hosted zones to use a reusable -// delegation set, the existing hosted zones can't use any of the name servers that -// are assigned to the reusable delegation set. If one or more hosted zones do use -// one or more name servers that are assigned to the reusable delegation set, you -// can do one of the following: +// If you +// want to migrate existing hosted zones to use a reusable delegation set, the +// existing hosted zones can't use any of the name servers that are assigned to the +// reusable delegation set. If one or more hosted zones do use one or more name +// servers that are assigned to the reusable delegation set, you can do one of the +// following: // -// * For small numbers of hosted zones—up to a -// few hundred—it's relatively easy to create reusable delegation sets until you -// get one that has four name servers that don't overlap with any of the name -// servers in your hosted zones. +// * For small numbers of hosted zones—up to a few hundred—it's +// relatively easy to create reusable delegation sets until you get one that has +// four name servers that don't overlap with any of the name servers in your hosted +// zones. // -// * For larger numbers of hosted zones, the -// easiest solution is to use more than one reusable delegation set. +// * For larger numbers of hosted zones, the easiest solution is to use +// more than one reusable delegation set. // -// * For -// larger numbers of hosted zones, you can also migrate hosted zones that have -// overlapping name servers to hosted zones that don't have overlapping name -// servers, then migrate the hosted zones again to use the reusable delegation set. +// * For larger numbers of hosted zones, +// you can also migrate hosted zones that have overlapping name servers to hosted +// zones that don't have overlapping name servers, then migrate the hosted zones +// again to use the reusable delegation set. func (c *Client) CreateReusableDelegationSet(ctx context.Context, params *CreateReusableDelegationSetInput, optFns ...func(*Options)) (*CreateReusableDelegationSetOutput, error) { if params == nil { params = &CreateReusableDelegationSetInput{} diff --git a/service/route53/api_op_DeleteHostedZone.go b/service/route53/api_op_DeleteHostedZone.go index f689b3b90d8..b3bfaa059b2 100644 --- a/service/route53/api_op_DeleteHostedZone.go +++ b/service/route53/api_op_DeleteHostedZone.go @@ -45,12 +45,12 @@ import ( // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_ChangeResourceRecordSets.html). // To verify that the hosted zone has been deleted, do one of the following: // -// * -// Use the GetHostedZone action to request information about the hosted zone. +// * Use +// the GetHostedZone action to request information about the hosted zone. // -// -// * Use the ListHostedZones action to get a list of the hosted zones associated -// with the current AWS account. +// * Use +// the ListHostedZones action to get a list of the hosted zones associated with the +// current AWS account. func (c *Client) DeleteHostedZone(ctx context.Context, params *DeleteHostedZoneInput, optFns ...func(*Options)) (*DeleteHostedZoneOutput, error) { if params == nil { params = &DeleteHostedZoneInput{} diff --git a/service/route53/api_op_DeleteTrafficPolicy.go b/service/route53/api_op_DeleteTrafficPolicy.go index b0cff7b2023..cbd91374ad7 100644 --- a/service/route53/api_op_DeleteTrafficPolicy.go +++ b/service/route53/api_op_DeleteTrafficPolicy.go @@ -14,16 +14,16 @@ import ( // on the policy to indicate that it has been deleted. However, Route 53 never // fully deletes the traffic policy. Note the following: // -// * Deleted traffic +// * Deleted traffic // policies aren't listed if you run ListTrafficPolicies // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListTrafficPolicies.html). // +// * +// There's no way to get a list of deleted policies. // -// * There's no way to get a list of deleted policies. -// -// * If you retain the ID -// of the policy, you can get information about the policy, including the traffic -// policy document, by running GetTrafficPolicy +// * If you retain the ID of the +// policy, you can get information about the policy, including the traffic policy +// document, by running GetTrafficPolicy // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetTrafficPolicy.html). func (c *Client) DeleteTrafficPolicy(ctx context.Context, params *DeleteTrafficPolicyInput, optFns ...func(*Options)) (*DeleteTrafficPolicyOutput, error) { if params == nil { diff --git a/service/route53/api_op_DisassociateVPCFromHostedZone.go b/service/route53/api_op_DisassociateVPCFromHostedZone.go index 866395642b5..afc0aa893d3 100644 --- a/service/route53/api_op_DisassociateVPCFromHostedZone.go +++ b/service/route53/api_op_DisassociateVPCFromHostedZone.go @@ -14,18 +14,18 @@ import ( // Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon Route // 53 private hosted zone. Note the following: // -// * You can't disassociate the -// last Amazon VPC from a private hosted zone. +// * You can't disassociate the last +// Amazon VPC from a private hosted zone. // -// * You can't convert a private -// hosted zone into a public hosted zone. +// * You can't convert a private hosted +// zone into a public hosted zone. // -// * You can submit a +// * You can submit a // DisassociateVPCFromHostedZone request using either the account that created the // hosted zone or the account that created the Amazon VPC. // -// * Some services, -// such as AWS Cloud Map and Amazon Elastic File System (Amazon EFS) automatically +// * Some services, such +// as AWS Cloud Map and Amazon Elastic File System (Amazon EFS) automatically // create hosted zones and associate VPCs with the hosted zones. A service can // create a hosted zone using your account or using its own account. You can // disassociate a VPC from a hosted zone only if the service created the hosted diff --git a/service/route53/api_op_GetAccountLimit.go b/service/route53/api_op_GetAccountLimit.go index 5289b7e6d2d..fa1141354e1 100644 --- a/service/route53/api_op_GetAccountLimit.go +++ b/service/route53/api_op_GetAccountLimit.go @@ -43,26 +43,25 @@ type GetAccountLimitInput struct { // The limit that you want to get. Valid values include the following: // - // * + // * // MAX_HEALTH_CHECKS_BY_OWNER: The maximum number of health checks that you can // create using the current account. // - // * MAX_HOSTED_ZONES_BY_OWNER: The maximum + // * MAX_HOSTED_ZONES_BY_OWNER: The maximum // number of hosted zones that you can create using the current account. // - // * + // * // MAX_REUSABLE_DELEGATION_SETS_BY_OWNER: The maximum number of reusable delegation // sets that you can create using the current account. // - // * + // * // MAX_TRAFFIC_POLICIES_BY_OWNER: The maximum number of traffic policies that you // can create using the current account. // - // * - // MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER: The maximum number of traffic policy - // instances that you can create using the current account. (Traffic policy - // instances are referred to as traffic flow policy records in the Amazon Route 53 - // console.) + // * MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER: + // The maximum number of traffic policy instances that you can create using the + // current account. (Traffic policy instances are referred to as traffic flow + // policy records in the Amazon Route 53 console.) // // This member is required. Type types.AccountLimitType diff --git a/service/route53/api_op_GetChange.go b/service/route53/api_op_GetChange.go index e6a932b6723..aa83028d58a 100644 --- a/service/route53/api_op_GetChange.go +++ b/service/route53/api_op_GetChange.go @@ -14,12 +14,12 @@ import ( // Returns the current status of a change batch request. The status is one of the // following values: // -// * PENDING indicates that the changes in this request have -// not propagated to all Amazon Route 53 DNS servers. This is the initial status of -// all change batch requests. +// * PENDING indicates that the changes in this request have not +// propagated to all Amazon Route 53 DNS servers. This is the initial status of all +// change batch requests. // -// * INSYNC indicates that the changes have -// propagated to all Route 53 DNS servers. +// * INSYNC indicates that the changes have propagated to +// all Route 53 DNS servers. func (c *Client) GetChange(ctx context.Context, params *GetChangeInput, optFns ...func(*Options)) (*GetChangeOutput, error) { if params == nil { params = &GetChangeInput{} diff --git a/service/route53/api_op_GetGeoLocation.go b/service/route53/api_op_GetGeoLocation.go index 070790511ba..f9a61654066 100644 --- a/service/route53/api_op_GetGeoLocation.go +++ b/service/route53/api_op_GetGeoLocation.go @@ -42,21 +42,21 @@ type GetGeoLocationInput struct { // For geolocation resource record sets, a two-letter abbreviation that identifies // a continent. Amazon Route 53 supports the following continent codes: // - // * AF: + // * AF: // Africa // - // * AN: Antarctica + // * AN: Antarctica // - // * AS: Asia + // * AS: Asia // - // * EU: Europe + // * EU: Europe // - // * OC: - // Oceania + // * OC: Oceania // - // * NA: North America + // * NA: North + // America // - // * SA: South America + // * SA: South America ContinentCode *string // Amazon Route 53 uses the two-letter country codes that are specified in ISO diff --git a/service/route53/api_op_GetHostedZoneLimit.go b/service/route53/api_op_GetHostedZoneLimit.go index d853c1b3f33..1125a21f5b4 100644 --- a/service/route53/api_op_GetHostedZoneLimit.go +++ b/service/route53/api_op_GetHostedZoneLimit.go @@ -43,11 +43,11 @@ type GetHostedZoneLimitInput struct { // The limit that you want to get. Valid values include the following: // - // * + // * // MAX_RRSETS_BY_ZONE: The maximum number of records that you can create in the // specified hosted zone. // - // * MAX_VPCS_ASSOCIATED_BY_ZONE: The maximum number of + // * MAX_VPCS_ASSOCIATED_BY_ZONE: The maximum number of // Amazon VPCs that you can associate with the specified private hosted zone. // // This member is required. diff --git a/service/route53/api_op_ListHostedZonesByName.go b/service/route53/api_op_ListHostedZonesByName.go index 38e897596ed..5cb94efba26 100644 --- a/service/route53/api_op_ListHostedZonesByName.go +++ b/service/route53/api_op_ListHostedZonesByName.go @@ -29,27 +29,27 @@ import ( // them in groups of up to 100. The response includes values that help navigate // from one group of MaxItems hosted zones to the next: // -// * The DNSName and +// * The DNSName and // HostedZoneId elements in the response contain the values, if any, specified for // the dnsname and hostedzoneid parameters in the request that produced the current // response. // -// * The MaxItems element in the response contains the value, if -// any, that you specified for the maxitems parameter in the request that produced -// the current response. +// * The MaxItems element in the response contains the value, if any, +// that you specified for the maxitems parameter in the request that produced the +// current response. // -// * If the value of IsTruncated in the response is -// true, there are more hosted zones associated with the current AWS account. If -// IsTruncated is false, this response includes the last hosted zone that is -// associated with the current account. The NextDNSName element and -// NextHostedZoneId elements are omitted from the response. +// * If the value of IsTruncated in the response is true, there +// are more hosted zones associated with the current AWS account. If IsTruncated is +// false, this response includes the last hosted zone that is associated with the +// current account. The NextDNSName element and NextHostedZoneId elements are +// omitted from the response. // -// * The NextDNSName -// and NextHostedZoneId elements in the response contain the domain name and the -// hosted zone ID of the next hosted zone that is associated with the current AWS -// account. If you want to list more hosted zones, make another call to -// ListHostedZonesByName, and specify the value of NextDNSName and NextHostedZoneId -// in the dnsname and hostedzoneid parameters, respectively. +// * The NextDNSName and NextHostedZoneId elements in +// the response contain the domain name and the hosted zone ID of the next hosted +// zone that is associated with the current AWS account. If you want to list more +// hosted zones, make another call to ListHostedZonesByName, and specify the value +// of NextDNSName and NextHostedZoneId in the dnsname and hostedzoneid parameters, +// respectively. func (c *Client) ListHostedZonesByName(ctx context.Context, params *ListHostedZonesByNameInput, optFns ...func(*Options)) (*ListHostedZonesByNameOutput, error) { if params == nil { params = &ListHostedZonesByNameInput{} diff --git a/service/route53/api_op_ListHostedZonesByVPC.go b/service/route53/api_op_ListHostedZonesByVPC.go index d0359f67969..ef7e4372e9e 100644 --- a/service/route53/api_op_ListHostedZonesByVPC.go +++ b/service/route53/api_op_ListHostedZonesByVPC.go @@ -16,14 +16,14 @@ import ( // HostedZoneOwner structure in the response contains one of the following // values: // -// * An OwningAccount element, which contains the account number of -// either the current AWS account or another AWS account. Some services, such as -// AWS Cloud Map, create hosted zones using the current account. +// * An OwningAccount element, which contains the account number of either +// the current AWS account or another AWS account. Some services, such as AWS Cloud +// Map, create hosted zones using the current account. // -// * An -// OwningService element, which identifies the AWS service that created and owns -// the hosted zone. For example, if a hosted zone was created by Amazon Elastic -// File System (Amazon EFS), the value of Owner is efs.amazonaws.com. +// * An OwningService element, +// which identifies the AWS service that created and owns the hosted zone. For +// example, if a hosted zone was created by Amazon Elastic File System (Amazon +// EFS), the value of Owner is efs.amazonaws.com. func (c *Client) ListHostedZonesByVPC(ctx context.Context, params *ListHostedZonesByVPCInput, optFns ...func(*Options)) (*ListHostedZonesByVPCOutput, error) { if params == nil { params = &ListHostedZonesByVPCInput{} diff --git a/service/route53/api_op_ListResourceRecordSets.go b/service/route53/api_op_ListResourceRecordSets.go index 4988bae88f0..3c899c503be 100644 --- a/service/route53/api_op_ListResourceRecordSets.go +++ b/service/route53/api_op_ListResourceRecordSets.go @@ -92,28 +92,27 @@ type ListResourceRecordSetsInput struct { // resource record sets: A | AAAA | CAA | CNAME | MX | NAPTR | PTR | SPF | SRV | // TXT Values for alias resource record sets: // - // * API Gateway custom regional - // API or edge-optimized API: A + // * API Gateway custom regional API or + // edge-optimized API: A // - // * CloudFront distribution: A or AAAA + // * CloudFront distribution: A or AAAA // - // * - // Elastic Beanstalk environment that has a regionalized subdomain: A + // * Elastic Beanstalk + // environment that has a regionalized subdomain: A // - // * - // Elastic Load Balancing load balancer: A | AAAA + // * Elastic Load Balancing load + // balancer: A | AAAA // - // * S3 bucket: A + // * S3 bucket: A // - // * VPC - // interface VPC endpoint: A + // * VPC interface VPC endpoint: A // - // * Another resource record set in this hosted - // zone: The type of the resource record set that the alias - // references. + // * Another + // resource record set in this hosted zone: The type of the resource record set + // that the alias references. // - // Constraint: Specifying type without specifying name returns an - // InvalidInput error. + // Constraint: Specifying type without specifying name + // returns an InvalidInput error. StartRecordType types.RRType } diff --git a/service/route53/api_op_ListTagsForResource.go b/service/route53/api_op_ListTagsForResource.go index bc8cd26ee85..ed9a65866a6 100644 --- a/service/route53/api_op_ListTagsForResource.go +++ b/service/route53/api_op_ListTagsForResource.go @@ -41,10 +41,10 @@ type ListTagsForResourceInput struct { // The type of the resource. // - // * The resource type for health checks is + // * The resource type for health checks is // healthcheck. // - // * The resource type for hosted zones is hostedzone. + // * The resource type for hosted zones is hostedzone. // // This member is required. ResourceType types.TagResourceType diff --git a/service/route53/api_op_ListTagsForResources.go b/service/route53/api_op_ListTagsForResources.go index 6c9cebaa513..b6a362e91d1 100644 --- a/service/route53/api_op_ListTagsForResources.go +++ b/service/route53/api_op_ListTagsForResources.go @@ -42,10 +42,10 @@ type ListTagsForResourcesInput struct { // The type of the resources. // - // * The resource type for health checks is + // * The resource type for health checks is // healthcheck. // - // * The resource type for hosted zones is hostedzone. + // * The resource type for hosted zones is hostedzone. // // This member is required. ResourceType types.TagResourceType diff --git a/service/route53/api_op_TestDNSAnswer.go b/service/route53/api_op_TestDNSAnswer.go index 5f6cb9b840a..dfffe110111 100644 --- a/service/route53/api_op_TestDNSAnswer.go +++ b/service/route53/api_op_TestDNSAnswer.go @@ -63,10 +63,10 @@ type TestDNSAnswerInput struct { // IPv6 addresses. The range of valid values depends on whether edns0clientsubnetip // is an IPv4 or an IPv6 address: // - // * IPv4: Specify a value between 0 and 32 + // * IPv4: Specify a value between 0 and 32 // - // - // * IPv6: Specify a value between 0 and 128 + // * + // IPv6: Specify a value between 0 and 128 EDNS0ClientSubnetMask *string // If you want to simulate a request from a specific DNS resolver, specify the IP diff --git a/service/route53/api_op_UpdateHealthCheck.go b/service/route53/api_op_UpdateHealthCheck.go index f9273ad284f..6aa49ee635b 100644 --- a/service/route53/api_op_UpdateHealthCheck.go +++ b/service/route53/api_op_UpdateHealthCheck.go @@ -54,21 +54,21 @@ type UpdateHealthCheckInput struct { // Stops Route 53 from performing health checks. When you disable a health check, // here's what happens: // - // * Health checks that check the health of endpoints: - // Route 53 stops submitting requests to your application, server, or other - // resource. + // * Health checks that check the health of endpoints: Route + // 53 stops submitting requests to your application, server, or other resource. // - // * Calculated health checks: Route 53 stops aggregating the status - // of the referenced health checks. + // * + // Calculated health checks: Route 53 stops aggregating the status of the + // referenced health checks. // - // * Health checks that monitor CloudWatch - // alarms: Route 53 stops monitoring the corresponding CloudWatch metrics. + // * Health checks that monitor CloudWatch alarms: Route + // 53 stops monitoring the corresponding CloudWatch metrics. // - // After - // you disable a health check, Route 53 considers the status of the health check to - // always be healthy. If you configured DNS failover, Route 53 continues to route - // traffic to the corresponding resources. If you want to stop routing traffic to a - // resource, change the value of Inverted + // After you disable a + // health check, Route 53 considers the status of the health check to always be + // healthy. If you configured DNS failover, Route 53 continues to route traffic to + // the corresponding resources. If you want to stop routing traffic to a resource, + // change the value of Inverted // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-Inverted). // Charges for a health check still apply when the health check is disabled. For // more information, see Amazon Route 53 Pricing @@ -113,18 +113,18 @@ type UpdateHealthCheckInput struct { // which you want Route 53 to perform health checks. When Route 53 checks the // health of an endpoint, here is how it constructs the Host header: // - // * If you + // * If you // specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type, Route 53 // passes the value of FullyQualifiedDomainName to the endpoint in the Host // header. // - // * If you specify a value of 443 for Port and HTTPS or - // HTTPS_STR_MATCH for Type, Route 53 passes the value of FullyQualifiedDomainName - // to the endpoint in the Host header. + // * If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH + // for Type, Route 53 passes the value of FullyQualifiedDomainName to the endpoint + // in the Host header. // - // * If you specify another value for Port - // and any value except TCP for Type, Route 53 passes FullyQualifiedDomainName:Port - // to the endpoint in the Host header. + // * If you specify another value for Port and any value + // except TCP for Type, Route 53 passes FullyQualifiedDomainName:Port to the + // endpoint in the Host header. // // If you don't specify a value for // FullyQualifiedDomainName, Route 53 substitutes the value of IPAddress in the @@ -159,12 +159,12 @@ type UpdateHealthCheckInput struct { // that you include that value in your UpdateHealthCheck request. This prevents // Route 53 from overwriting an intervening update: // - // * If the value in the + // * If the value in the // UpdateHealthCheck request matches the value of HealthCheckVersion in the health // check, Route 53 updates the health check with the new settings. // - // * If the - // value of HealthCheckVersion in the health check is greater, the health check was + // * If the value + // of HealthCheckVersion in the health check is greater, the health check was // changed after you got the version number. Route 53 does not update the health // check, and it returns a HealthCheckVersionMismatch error. HealthCheckVersion *int64 @@ -175,11 +175,11 @@ type UpdateHealthCheckInput struct { // associate with a CALCULATED health check, use the ChildHealthChecks and // ChildHealthCheck elements. Note the following: // - // * If you specify a number + // * If you specify a number // greater than the number of child health checks, Route 53 always considers this // health check to be unhealthy. // - // * If you specify 0, Route 53 always considers + // * If you specify 0, Route 53 always considers // this health check to be healthy. HealthThreshold *int32 @@ -190,27 +190,26 @@ type UpdateHealthCheckInput struct { // Using an IP address that is returned by DNS, Route 53 then checks the health of // the endpoint. Use one of the following formats for the value of IPAddress: // - // - // * IPv4 address: four values between 0 and 255, separated by periods (.), for + // * + // IPv4 address: four values between 0 and 255, separated by periods (.), for // example, 192.0.2.44. // - // * IPv6 address: eight groups of four hexadecimal - // values, separated by colons (:), for example, - // 2001:0db8:85a3:0000:0000:abcd:0001:2345. You can also shorten IPv6 addresses as - // described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345. + // * IPv6 address: eight groups of four hexadecimal values, + // separated by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. + // You can also shorten IPv6 addresses as described in RFC 5952, for example, + // 2001:db8:85a3::abcd:1:2345. // - // If the endpoint - // is an EC2 instance, we recommend that you create an Elastic IP address, - // associate it with your EC2 instance, and specify the Elastic IP address for - // IPAddress. This ensures that the IP address of your instance never changes. For - // more information, see the applicable documentation: + // If the endpoint is an EC2 instance, we recommend + // that you create an Elastic IP address, associate it with your EC2 instance, and + // specify the Elastic IP address for IPAddress. This ensures that the IP address + // of your instance never changes. For more information, see the applicable + // documentation: // - // * Linux: Elastic IP - // Addresses (EIP) + // * Linux: Elastic IP Addresses (EIP) // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon EC2 User Guide for Linux Instances // - // * Windows: Elastic IP + // * Windows: Elastic IP // Addresses (EIP) // (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-ip-addresses-eip.html) // in the Amazon EC2 User Guide for Windows Instances @@ -225,29 +224,30 @@ type UpdateHealthCheckInput struct { // information about IP addresses for which you can't create health checks, see the // following documents: // - // * RFC 5735, Special Use IPv4 Addresses + // * RFC 5735, Special Use IPv4 Addresses // (https://tools.ietf.org/html/rfc5735) // - // * RFC 6598, IANA-Reserved IPv4 Prefix - // for Shared Address Space (https://tools.ietf.org/html/rfc6598) + // * RFC 6598, IANA-Reserved IPv4 Prefix for + // Shared Address Space (https://tools.ietf.org/html/rfc6598) // - // * RFC 5156, + // * RFC 5156, // Special-Use IPv6 Addresses (https://tools.ietf.org/html/rfc5156) IPAddress *string // When CloudWatch has insufficient data about the metric to determine the alarm - // state, the status that you want Amazon Route 53 to assign to the health check: - // + // state, the status that you want Amazon Route 53 to assign to the health + // check: // // * Healthy: Route 53 considers the health check to be healthy. // - // * Unhealthy: - // Route 53 considers the health check to be unhealthy. + // * + // Unhealthy: Route 53 considers the health check to be unhealthy. // - // * LastKnownStatus: - // Route 53 uses the status of the health check from the last time CloudWatch had - // sufficient data to determine the alarm state. For new health checks that have no - // last known status, the default status for the health check is healthy. + // * + // LastKnownStatus: Route 53 uses the status of the health check from the last time + // CloudWatch had sufficient data to determine the alarm state. For new health + // checks that have no last known status, the default status for the health check + // is healthy. InsufficientDataHealthStatus types.InsufficientDataHealthStatus // Specify whether you want Amazon Route 53 to invert the status of a health check, @@ -268,21 +268,20 @@ type UpdateHealthCheckInput struct { // that you want to reset to the default value. Valid values for // ResettableElementName include the following: // - // * ChildHealthChecks: Amazon - // Route 53 resets ChildHealthChecks + // * ChildHealthChecks: Amazon Route + // 53 resets ChildHealthChecks // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-ChildHealthChecks) // to null. // - // * FullyQualifiedDomainName: Route 53 resets - // FullyQualifiedDomainName + // * FullyQualifiedDomainName: Route 53 resets FullyQualifiedDomainName // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-FullyQualifiedDomainName). // to null. // - // * Regions: Route 53 resets the Regions + // * Regions: Route 53 resets the Regions // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions) // list to the default set of regions. // - // * ResourcePath: Route 53 resets + // * ResourcePath: Route 53 resets // ResourcePath // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-ResourcePath) // to null. diff --git a/service/route53/api_op_UpdateTrafficPolicyInstance.go b/service/route53/api_op_UpdateTrafficPolicyInstance.go index d5b55ebc4b1..41ae8236e12 100644 --- a/service/route53/api_op_UpdateTrafficPolicyInstance.go +++ b/service/route53/api_op_UpdateTrafficPolicyInstance.go @@ -18,18 +18,18 @@ import ( // group of resource record sets with another. Route 53 performs the following // operations: // -// * Route 53 creates a new group of resource record sets based on -// the specified traffic policy. This is true regardless of how significant the +// * Route 53 creates a new group of resource record sets based on the +// specified traffic policy. This is true regardless of how significant the // differences are between the existing resource record sets and the new resource // record sets. // -// * When all of the new resource record sets have been created, +// * When all of the new resource record sets have been created, // Route 53 starts to respond to DNS queries for the root resource record set name // (such as example.com) by using the new resource record sets. // -// * Route 53 -// deletes the old group of resource record sets that are associated with the root -// resource record set name. +// * Route 53 deletes +// the old group of resource record sets that are associated with the root resource +// record set name. func (c *Client) UpdateTrafficPolicyInstance(ctx context.Context, params *UpdateTrafficPolicyInstanceInput, optFns ...func(*Options)) (*UpdateTrafficPolicyInstanceOutput, error) { if params == nil { params = &UpdateTrafficPolicyInstanceInput{} diff --git a/service/route53/types/enums.go b/service/route53/types/enums.go index 2121d0403af..94d6aaa2649 100644 --- a/service/route53/types/enums.go +++ b/service/route53/types/enums.go @@ -6,11 +6,11 @@ type AccountLimitType string // Enum values for AccountLimitType const ( - AccountLimitTypeMax_health_checks_by_owner AccountLimitType = "MAX_HEALTH_CHECKS_BY_OWNER" - AccountLimitTypeMax_hosted_zones_by_owner AccountLimitType = "MAX_HOSTED_ZONES_BY_OWNER" - AccountLimitTypeMax_traffic_policy_instances_by_owner AccountLimitType = "MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER" - AccountLimitTypeMax_reusable_delegation_sets_by_owner AccountLimitType = "MAX_REUSABLE_DELEGATION_SETS_BY_OWNER" - AccountLimitTypeMax_traffic_policies_by_owner AccountLimitType = "MAX_TRAFFIC_POLICIES_BY_OWNER" + AccountLimitTypeMaxHealthChecksByOwner AccountLimitType = "MAX_HEALTH_CHECKS_BY_OWNER" + AccountLimitTypeMaxHostedZonesByOwner AccountLimitType = "MAX_HOSTED_ZONES_BY_OWNER" + AccountLimitTypeMaxTrafficPolicyInstancesByOwner AccountLimitType = "MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER" + AccountLimitTypeMaxReusableDelegationSetsByOwner AccountLimitType = "MAX_REUSABLE_DELEGATION_SETS_BY_OWNER" + AccountLimitTypeMaxTrafficPoliciesByOwner AccountLimitType = "MAX_TRAFFIC_POLICIES_BY_OWNER" ) // Values returns all known values for AccountLimitType. Note that this can be @@ -188,13 +188,13 @@ type HealthCheckType string // Enum values for HealthCheckType const ( - HealthCheckTypeHttp HealthCheckType = "HTTP" - HealthCheckTypeHttps HealthCheckType = "HTTPS" - HealthCheckTypeHttp_str_match HealthCheckType = "HTTP_STR_MATCH" - HealthCheckTypeHttps_str_match HealthCheckType = "HTTPS_STR_MATCH" - HealthCheckTypeTcp HealthCheckType = "TCP" - HealthCheckTypeCalculated HealthCheckType = "CALCULATED" - HealthCheckTypeCloudwatch_metric HealthCheckType = "CLOUDWATCH_METRIC" + HealthCheckTypeHttp HealthCheckType = "HTTP" + HealthCheckTypeHttps HealthCheckType = "HTTPS" + HealthCheckTypeHttpStrMatch HealthCheckType = "HTTP_STR_MATCH" + HealthCheckTypeHttpsStrMatch HealthCheckType = "HTTPS_STR_MATCH" + HealthCheckTypeTcp HealthCheckType = "TCP" + HealthCheckTypeCalculated HealthCheckType = "CALCULATED" + HealthCheckTypeCloudwatchMetric HealthCheckType = "CLOUDWATCH_METRIC" ) // Values returns all known values for HealthCheckType. Note that this can be @@ -216,8 +216,8 @@ type HostedZoneLimitType string // Enum values for HostedZoneLimitType const ( - HostedZoneLimitTypeMax_rrsets_by_zone HostedZoneLimitType = "MAX_RRSETS_BY_ZONE" - HostedZoneLimitTypeMax_vpcs_associated_by_zone HostedZoneLimitType = "MAX_VPCS_ASSOCIATED_BY_ZONE" + HostedZoneLimitTypeMaxRrsetsByZone HostedZoneLimitType = "MAX_RRSETS_BY_ZONE" + HostedZoneLimitTypeMaxVpcsAssociatedByZone HostedZoneLimitType = "MAX_VPCS_ASSOCIATED_BY_ZONE" ) // Values returns all known values for HostedZoneLimitType. Note that this can be @@ -354,7 +354,7 @@ type ReusableDelegationSetLimitType string // Enum values for ReusableDelegationSetLimitType const ( - ReusableDelegationSetLimitTypeMax_zones_by_reusable_delegation_set ReusableDelegationSetLimitType = "MAX_ZONES_BY_REUSABLE_DELEGATION_SET" + ReusableDelegationSetLimitTypeMaxZonesByReusableDelegationSet ReusableDelegationSetLimitType = "MAX_ZONES_BY_REUSABLE_DELEGATION_SET" ) // Values returns all known values for ReusableDelegationSetLimitType. Note that diff --git a/service/route53/types/errors.go b/service/route53/types/errors.go index 7af01cf06f8..c7db8976bf4 100644 --- a/service/route53/types/errors.go +++ b/service/route53/types/errors.go @@ -27,7 +27,7 @@ func (e *ConcurrentModification) ErrorFault() smithy.ErrorFault { return smithy. // The cause of this error depends on the operation that you're performing: // -// * +// * // Create a public hosted zone: Two hosted zones that have the same name or that // have a parent/child relationship (example.com and test.example.com) can't have // any common name servers. You tried to create a hosted zone that has the same @@ -37,11 +37,11 @@ func (e *ConcurrentModification) ErrorFault() smithy.ErrorFault { return smithy. // CreateReusableDelegationSet // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). // -// -// * Create a private hosted zone: A hosted zone with the specified name already +// * +// Create a private hosted zone: A hosted zone with the specified name already // exists and is already associated with the Amazon VPC that you specified. // -// * +// * // Associate VPCs with a private hosted zone: The VPC that you specified is already // associated with another hosted zone that has the same name. type ConflictingDomainExists struct { @@ -174,12 +174,12 @@ func (e *DelegationSetNotReusable) ErrorFault() smithy.ErrorFault { return smith // The health check you're attempting to create already exists. Amazon Route 53 // returns this error when you submit a request that has the following values: // -// -// * The same value for CallerReference as an existing health check, and one or -// more values that differ from the existing health check that has the same caller +// * +// The same value for CallerReference as an existing health check, and one or more +// values that differ from the existing health check that has the same caller // reference. // -// * The same value for CallerReference as a health check that you +// * The same value for CallerReference as a health check that you // created and later deleted, regardless of the other settings in the request. type HealthCheckAlreadyExists struct { Message *string @@ -323,15 +323,15 @@ func (e *IncompatibleVersion) ErrorFault() smithy.ErrorFault { return smithy.Fau // Amazon Route 53 doesn't have the permissions required to create log streams and // send query logs to log streams. Possible causes include the following: // -// * -// There is no resource policy that specifies the log group ARN in the value for +// * There +// is no resource policy that specifies the log group ARN in the value for // Resource. // -// * The resource policy that includes the log group ARN in the -// value for Resource doesn't have the necessary permissions. +// * The resource policy that includes the log group ARN in the value +// for Resource doesn't have the necessary permissions. // -// * The resource -// policy hasn't finished propagating yet. +// * The resource policy +// hasn't finished propagating yet. type InsufficientCloudWatchLogsResourcePolicy struct { Message *string } diff --git a/service/route53/types/types.go b/service/route53/types/types.go index 8d21dbeefe2..e961ab33286 100644 --- a/service/route53/types/types.go +++ b/service/route53/types/types.go @@ -12,26 +12,25 @@ type AccountLimit struct { // The limit that you requested. Valid values include the following: // - // * + // * // MAX_HEALTH_CHECKS_BY_OWNER: The maximum number of health checks that you can // create using the current account. // - // * MAX_HOSTED_ZONES_BY_OWNER: The maximum + // * MAX_HOSTED_ZONES_BY_OWNER: The maximum // number of hosted zones that you can create using the current account. // - // * + // * // MAX_REUSABLE_DELEGATION_SETS_BY_OWNER: The maximum number of reusable delegation // sets that you can create using the current account. // - // * + // * // MAX_TRAFFIC_POLICIES_BY_OWNER: The maximum number of traffic policies that you // can create using the current account. // - // * - // MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER: The maximum number of traffic policy - // instances that you can create using the current account. (Traffic policy - // instances are referred to as traffic flow policy records in the Amazon Route 53 - // console.) + // * MAX_TRAFFIC_POLICY_INSTANCES_BY_OWNER: + // The maximum number of traffic policy instances that you can create using the + // current account. (Traffic policy instances are referred to as traffic flow + // policy records in the Amazon Route 53 console.) // // This member is required. Type AccountLimitType @@ -52,14 +51,14 @@ type AlarmIdentifier struct { // to use to determine whether this health check is healthy. Route 53 supports // CloudWatch alarms with the following features: // - // * Standard-resolution - // metrics. High-resolution metrics aren't supported. For more information, see + // * Standard-resolution metrics. + // High-resolution metrics aren't supported. For more information, see // High-Resolution Metrics // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#high-resolution-metrics) // in the Amazon CloudWatch User Guide. // - // * Statistics: Average, Minimum, - // Maximum, Sum, and SampleCount. Extended statistics aren't supported. + // * Statistics: Average, Minimum, Maximum, + // Sum, and SampleCount. Extended statistics aren't supported. // // This member is required. Name *string @@ -79,12 +78,12 @@ type AlarmIdentifier struct { // to. When creating resource record sets for a private hosted zone, note the // following: // -// * Creating geolocation alias resource record sets or latency -// alias resource record sets in a private hosted zone is unsupported. +// * Creating geolocation alias resource record sets or latency alias +// resource record sets in a private hosted zone is unsupported. // -// * For -// information about creating failover resource record sets in a private hosted -// zone, see Configuring Failover in a Private Hosted Zone +// * For information +// about creating failover resource record sets in a private hosted zone, see +// Configuring Failover in a Private Hosted Zone // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html). type AliasTarget struct { @@ -94,10 +93,10 @@ type AliasTarget struct { // the applicable value using the AWS CLI command get-domain-names // (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-domain-names.html): // + // * + // For regional APIs, specify the value of regionalDomainName. // - // * For regional APIs, specify the value of regionalDomainName. - // - // * For + // * For // edge-optimized APIs, specify the value of distributionDomainName. This is the // name of the associated CloudFront distribution, such as // da1b2c3d4e5.cloudfront.net. @@ -140,19 +139,19 @@ type AliasTarget struct { // attribute for the environment. You can use the following methods to get the // value of the CNAME attribute: // - // * AWS Management Console: For information - // about how to get the value by using the console, see Using Custom Domains with - // AWS Elastic Beanstalk + // * AWS Management Console: For information about + // how to get the value by using the console, see Using Custom Domains with AWS + // Elastic Beanstalk // (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html) in // the AWS Elastic Beanstalk Developer Guide. // - // * Elastic Beanstalk API: Use the + // * Elastic Beanstalk API: Use the // DescribeEnvironments action to get the value of the CNAME attribute. For more // information, see DescribeEnvironments // (https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) // in the AWS Elastic Beanstalk API Reference. // - // * AWS CLI: Use the + // * AWS CLI: Use the // describe-environments command to get the value of the CNAME attribute. For more // information, see describe-environments // (https://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html) @@ -162,46 +161,46 @@ type AliasTarget struct { // is associated with the load balancer. Get the DNS name by using the AWS // Management Console, the ELB API, or the AWS CLI. // - // * AWS Management Console: - // Go to the EC2 page, choose Load Balancers in the navigation pane, choose the - // load balancer, choose the Description tab, and get the value of the DNS name - // field. If you're routing traffic to a Classic Load Balancer, get the value that - // begins with dualstack. If you're routing traffic to another type of load - // balancer, get the value that applies to the record type, A or AAAA. + // * AWS Management Console: Go + // to the EC2 page, choose Load Balancers in the navigation pane, choose the load + // balancer, choose the Description tab, and get the value of the DNS name field. + // If you're routing traffic to a Classic Load Balancer, get the value that begins + // with dualstack. If you're routing traffic to another type of load balancer, get + // the value that applies to the record type, A or AAAA. // - // * - // Elastic Load Balancing API: Use DescribeLoadBalancers to get the value of - // DNSName. For more information, see the applicable guide: + // * Elastic Load Balancing + // API: Use DescribeLoadBalancers to get the value of DNSName. For more + // information, see the applicable guide: // - // * Classic Load - // Balancers: DescribeLoadBalancers + // * Classic Load Balancers: + // DescribeLoadBalancers // (https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) // - // - // * Application and Network Load Balancers: DescribeLoadBalancers + // * + // Application and Network Load Balancers: DescribeLoadBalancers // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) // - // - // * AWS CLI: Use describe-load-balancers to get the value of DNSName. For more + // * + // AWS CLI: Use describe-load-balancers to get the value of DNSName. For more // information, see the applicable guide: // - // * Classic Load Balancers: + // * Classic Load Balancers: // describe-load-balancers // (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) // - // - // * Application and Network Load Balancers: describe-load-balancers + // * + // Application and Network Load Balancers: describe-load-balancers // (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) // // AWS // Global Accelerator accelerator Specify the DNS name for your accelerator: // - // * + // * // Global Accelerator API: To get the DNS name, use DescribeAccelerator // (https://docs.aws.amazon.com/global-accelerator/latest/api/API_DescribeAccelerator.html). // - // - // * AWS CLI: To get the DNS name, use describe-accelerator + // * + // AWS CLI: To get the DNS name, use describe-accelerator // (https://docs.aws.amazon.com/cli/latest/reference/globalaccelerator/describe-accelerator.html). // // Amazon @@ -243,25 +242,25 @@ type AliasTarget struct { // requirements. ELB load balancers Health checking behavior depends on the type of // load balancer: // - // * Classic Load Balancers: If you specify an ELB Classic Load + // * Classic Load Balancers: If you specify an ELB Classic Load // Balancer in DNSName, Elastic Load Balancing routes queries only to the healthy // Amazon EC2 instances that are registered with the load balancer. If you set // EvaluateTargetHealth to true and either no EC2 instances are healthy or the load // balancer itself is unhealthy, Route 53 routes queries to other resources. // - // * + // * // Application and Network Load Balancers: If you specify an ELB Application or // Network Load Balancer and you set EvaluateTargetHealth to true, Route 53 routes // queries to the load balancer based on the health of the target groups that are // associated with the load balancer: // - // * For an Application or Network Load + // * For an Application or Network Load // Balancer to be considered healthy, every target group that contains targets must // contain at least one healthy target. If any target group contains only unhealthy // targets, the load balancer is considered unhealthy, and Route 53 routes queries // to other resources. // - // * A target group that has no registered targets is + // * A target group that has no registered targets is // considered unhealthy. // // When you create a load balancer, you configure settings @@ -289,10 +288,10 @@ type AliasTarget struct { // the AWS CLI command get-domain-names // (https://docs.aws.amazon.com/cli/latest/reference/apigateway/get-domain-names.html): // + // * + // For regional APIs, specify the value of regionalHostedZoneId. // - // * For regional APIs, specify the value of regionalHostedZoneId. - // - // * For + // * For // edge-optimized APIs, specify the value of distributionHostedZoneId. // // Amazon @@ -310,42 +309,41 @@ type AliasTarget struct { // Reference. ELB load balancer Specify the value of the hosted zone ID for the // load balancer. Use the following methods to get the hosted zone ID: // - // * - // Service Endpoints (https://docs.aws.amazon.com/general/latest/gr/elb.html) table - // in the "Elastic Load Balancing Endpoints and Quotas" topic in the Amazon Web - // Services General Reference: Use the value that corresponds with the region that - // you created your load balancer in. Note that there are separate columns for + // * Service + // Endpoints (https://docs.aws.amazon.com/general/latest/gr/elb.html) table in the + // "Elastic Load Balancing Endpoints and Quotas" topic in the Amazon Web Services + // General Reference: Use the value that corresponds with the region that you + // created your load balancer in. Note that there are separate columns for // Application and Classic Load Balancers and for Network Load Balancers. // - // * - // AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers in the + // * AWS + // Management Console: Go to the Amazon EC2 page, choose Load Balancers in the // navigation pane, select the load balancer, and get the value of the Hosted zone // field on the Description tab. // - // * Elastic Load Balancing API: Use + // * Elastic Load Balancing API: Use // DescribeLoadBalancers to get the applicable value. For more information, see the // applicable guide: // - // * Classic Load Balancers: Use DescribeLoadBalancers + // * Classic Load Balancers: Use DescribeLoadBalancers // (https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) // to get the value of CanonicalHostedZoneNameId. // - // * Application and - // Network Load Balancers: Use DescribeLoadBalancers + // * Application and Network Load + // Balancers: Use DescribeLoadBalancers // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) // to get the value of CanonicalHostedZoneId. // - // * AWS CLI: Use + // * AWS CLI: Use // describe-load-balancers to get the applicable value. For more information, see // the applicable guide: // - // * Classic Load Balancers: Use - // describe-load-balancers + // * Classic Load Balancers: Use describe-load-balancers // (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) // to get the value of CanonicalHostedZoneNameId. // - // * Application and - // Network Load Balancers: Use describe-load-balancers + // * Application and Network Load + // Balancers: Use describe-load-balancers // (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) // to get the value of CanonicalHostedZoneId. // @@ -368,19 +366,19 @@ type Change struct { // The action to perform: // - // * CREATE: Creates a resource record set that has the + // * CREATE: Creates a resource record set that has the // specified values. // - // * DELETE: Deletes a existing resource record set. To - // delete the resource record set that is associated with a traffic policy - // instance, use DeleteTrafficPolicyInstance + // * DELETE: Deletes a existing resource record set. To delete + // the resource record set that is associated with a traffic policy instance, use + // DeleteTrafficPolicyInstance // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteTrafficPolicyInstance.html). // Amazon Route 53 will delete the resource record set automatically. If you delete // the resource record set by using ChangeResourceRecordSets, Route 53 doesn't // automatically delete the traffic policy instance, and you'll continue to be // charged for it even though it's no longer in use. // - // * UPSERT: If a resource + // * UPSERT: If a resource // record set doesn't already exist, Route 53 creates it. If a resource record set // does exist, Route 53 updates it with the values in the request. // @@ -532,24 +530,24 @@ type GeoLocation struct { // The two-letter code for the continent. Amazon Route 53 supports the following // continent codes: // - // * AF: Africa + // * AF: Africa // - // * AN: Antarctica + // * AN: Antarctica // - // * AS: Asia + // * AS: Asia // - // * - // EU: Europe + // * EU: Europe // - // * OC: Oceania + // * + // OC: Oceania // - // * NA: North America + // * NA: North America // - // * SA: South - // America + // * SA: South America // - // Constraint: Specifying ContinentCode with either CountryCode or - // SubdivisionCode returns an InvalidInput error. + // Constraint: Specifying + // ContinentCode with either CountryCode or SubdivisionCode returns an InvalidInput + // error. ContinentCode *string // For geolocation resource record sets, the two-letter code for a country. Amazon @@ -636,36 +634,36 @@ type HealthCheckConfig struct { // of Type after you create a health check. You can create the following types of // health checks: // - // * HTTP: Route 53 tries to establish a TCP connection. If + // * HTTP: Route 53 tries to establish a TCP connection. If // successful, Route 53 submits an HTTP request and waits for an HTTP status code // of 200 or greater and less than 400. // - // * HTTPS: Route 53 tries to establish a - // TCP connection. If successful, Route 53 submits an HTTPS request and waits for - // an HTTP status code of 200 or greater and less than 400. If you specify HTTPS - // for the value of Type, the endpoint must support TLS v1.0 or later. + // * HTTPS: Route 53 tries to establish a TCP + // connection. If successful, Route 53 submits an HTTPS request and waits for an + // HTTP status code of 200 or greater and less than 400. If you specify HTTPS for + // the value of Type, the endpoint must support TLS v1.0 or later. // - // * + // * // HTTP_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, // Route 53 submits an HTTP request and searches the first 5,120 bytes of the // response body for the string that you specify in SearchString. // - // * + // * // HTTPS_STR_MATCH: Route 53 tries to establish a TCP connection. If successful, // Route 53 submits an HTTPS request and searches the first 5,120 bytes of the // response body for the string that you specify in SearchString. // - // * TCP: Route - // 53 tries to establish a TCP connection. + // * TCP: Route 53 + // tries to establish a TCP connection. // - // * CLOUDWATCH_METRIC: The health - // check is associated with a CloudWatch alarm. If the state of the alarm is OK, - // the health check is considered healthy. If the state is ALARM, the health check - // is considered unhealthy. If CloudWatch doesn't have sufficient data to determine + // * CLOUDWATCH_METRIC: The health check is + // associated with a CloudWatch alarm. If the state of the alarm is OK, the health + // check is considered healthy. If the state is ALARM, the health check is + // considered unhealthy. If CloudWatch doesn't have sufficient data to determine // whether the state is OK or ALARM, the health check status depends on the setting // for InsufficientDataHealthStatus: Healthy, Unhealthy, or LastKnownStatus. // - // * + // * // CALCULATED: For health checks that monitor the status of other health checks, // Route 53 adds up the number of health checks that Route 53 health checkers // consider to be healthy and compares that number with the value of @@ -692,21 +690,21 @@ type HealthCheckConfig struct { // Stops Route 53 from performing health checks. When you disable a health check, // here's what happens: // - // * Health checks that check the health of endpoints: - // Route 53 stops submitting requests to your application, server, or other - // resource. + // * Health checks that check the health of endpoints: Route + // 53 stops submitting requests to your application, server, or other resource. // - // * Calculated health checks: Route 53 stops aggregating the status - // of the referenced health checks. + // * + // Calculated health checks: Route 53 stops aggregating the status of the + // referenced health checks. // - // * Health checks that monitor CloudWatch - // alarms: Route 53 stops monitoring the corresponding CloudWatch metrics. + // * Health checks that monitor CloudWatch alarms: Route + // 53 stops monitoring the corresponding CloudWatch metrics. // - // After - // you disable a health check, Route 53 considers the status of the health check to - // always be healthy. If you configured DNS failover, Route 53 continues to route - // traffic to the corresponding resources. If you want to stop routing traffic to a - // resource, change the value of Inverted + // After you disable a + // health check, Route 53 considers the status of the health check to always be + // healthy. If you configured DNS failover, Route 53 continues to route traffic to + // the corresponding resources. If you want to stop routing traffic to a resource, + // change the value of Inverted // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-Inverted). // Charges for a health check still apply when the health check is disabled. For // more information, see Amazon Route 53 Pricing @@ -749,18 +747,18 @@ type HealthCheckConfig struct { // which you want Route 53 to perform health checks. When Route 53 checks the // health of an endpoint, here is how it constructs the Host header: // - // * If you + // * If you // specify a value of 80 for Port and HTTP or HTTP_STR_MATCH for Type, Route 53 // passes the value of FullyQualifiedDomainName to the endpoint in the Host // header. // - // * If you specify a value of 443 for Port and HTTPS or - // HTTPS_STR_MATCH for Type, Route 53 passes the value of FullyQualifiedDomainName - // to the endpoint in the Host header. + // * If you specify a value of 443 for Port and HTTPS or HTTPS_STR_MATCH + // for Type, Route 53 passes the value of FullyQualifiedDomainName to the endpoint + // in the Host header. // - // * If you specify another value for Port - // and any value except TCP for Type, Route 53 passes FullyQualifiedDomainName:Port - // to the endpoint in the Host header. + // * If you specify another value for Port and any value + // except TCP for Type, Route 53 passes FullyQualifiedDomainName:Port to the + // endpoint in the Host header. // // If you don't specify a value for // FullyQualifiedDomainName, Route 53 substitutes the value of IPAddress in the @@ -795,12 +793,12 @@ type HealthCheckConfig struct { // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-ChildHealthChecks) // element. Note the following: // - // * If you specify a number greater than the - // number of child health checks, Route 53 always considers this health check to be + // * If you specify a number greater than the number + // of child health checks, Route 53 always considers this health check to be // unhealthy. // - // * If you specify 0, Route 53 always considers this health check - // to be healthy. + // * If you specify 0, Route 53 always considers this health check to + // be healthy. HealthThreshold *int32 // The IPv4 or IPv6 IP address of the endpoint that you want Amazon Route 53 to @@ -810,33 +808,33 @@ type HealthCheckConfig struct { // Using an IP address returned by DNS, Route 53 then checks the health of the // endpoint. Use one of the following formats for the value of IPAddress: // - // * - // IPv4 address: four values between 0 and 255, separated by periods (.), for - // example, 192.0.2.44. + // * IPv4 + // address: four values between 0 and 255, separated by periods (.), for example, + // 192.0.2.44. // - // * IPv6 address: eight groups of four hexadecimal - // values, separated by colons (:), for example, - // 2001:0db8:85a3:0000:0000:abcd:0001:2345. You can also shorten IPv6 addresses as - // described in RFC 5952, for example, 2001:db8:85a3::abcd:1:2345. + // * IPv6 address: eight groups of four hexadecimal values, separated + // by colons (:), for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. You can + // also shorten IPv6 addresses as described in RFC 5952, for example, + // 2001:db8:85a3::abcd:1:2345. // - // If the endpoint - // is an EC2 instance, we recommend that you create an Elastic IP address, - // associate it with your EC2 instance, and specify the Elastic IP address for - // IPAddress. This ensures that the IP address of your instance will never change. - // For more information, see FullyQualifiedDomainName + // If the endpoint is an EC2 instance, we recommend + // that you create an Elastic IP address, associate it with your EC2 instance, and + // specify the Elastic IP address for IPAddress. This ensures that the IP address + // of your instance will never change. For more information, see + // FullyQualifiedDomainName // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_UpdateHealthCheck.html#Route53-UpdateHealthCheck-request-FullyQualifiedDomainName). // Constraints: Route 53 can't check the health of endpoints for which the IP // address is in local, private, non-routable, or multicast ranges. For more // information about IP addresses for which you can't create health checks, see the // following documents: // - // * RFC 5735, Special Use IPv4 Addresses + // * RFC 5735, Special Use IPv4 Addresses // (https://tools.ietf.org/html/rfc5735) // - // * RFC 6598, IANA-Reserved IPv4 Prefix - // for Shared Address Space (https://tools.ietf.org/html/rfc6598) + // * RFC 6598, IANA-Reserved IPv4 Prefix for + // Shared Address Space (https://tools.ietf.org/html/rfc6598) // - // * RFC 5156, + // * RFC 5156, // Special-Use IPv6 Addresses (https://tools.ietf.org/html/rfc5156) // // When the value @@ -844,18 +842,19 @@ type HealthCheckConfig struct { IPAddress *string // When CloudWatch has insufficient data about the metric to determine the alarm - // state, the status that you want Amazon Route 53 to assign to the health check: - // + // state, the status that you want Amazon Route 53 to assign to the health + // check: // // * Healthy: Route 53 considers the health check to be healthy. // - // * Unhealthy: - // Route 53 considers the health check to be unhealthy. + // * + // Unhealthy: Route 53 considers the health check to be unhealthy. // - // * LastKnownStatus: - // Route 53 uses the status of the health check from the last time that CloudWatch - // had sufficient data to determine the alarm state. For new health checks that - // have no last known status, the default status for the health check is healthy. + // * + // LastKnownStatus: Route 53 uses the status of the health check from the last time + // that CloudWatch had sufficient data to determine the alarm state. For new health + // checks that have no last known status, the default status for the health check + // is healthy. InsufficientDataHealthStatus InsufficientDataHealthStatus // Specify whether you want Amazon Route 53 to invert the status of a health check, @@ -978,11 +977,11 @@ type HostedZoneLimit struct { // The limit that you requested. Valid values include the following: // - // * + // * // MAX_RRSETS_BY_ZONE: The maximum number of records that you can create in the // specified hosted zone. // - // * MAX_VPCS_ASSOCIATED_BY_ZONE: The maximum number of + // * MAX_VPCS_ASSOCIATED_BY_ZONE: The maximum number of // Amazon VPCs that you can associate with the specified private hosted zone. // // This member is required. @@ -1111,15 +1110,15 @@ type ResourceRecordSet struct { // replace the leftmost label in a domain name, for example, .example.com. Note the // following: // - // * The * must replace the entire label. For example, you can't + // * The * must replace the entire label. For example, you can't // specify prod.example.com or prod.example.com. // - // * The * can't replace any of - // the middle labels, for example, marketing..example.com. + // * The * can't replace any of the + // middle labels, for example, marketing..example.com. // - // * If you include * - // in any position other than the leftmost label in a domain name, DNS treats it as - // an * character (ASCII 42), not as a wildcard. You can't use the * wildcard for + // * If you include * in any + // position other than the leftmost label in a domain name, DNS treats it as an * + // character (ASCII 42), not as a wildcard. You can't use the * wildcard for // resource records sets that have a type of NS. // // You can use the * wildcard as the @@ -1151,32 +1150,32 @@ type ResourceRecordSet struct { // The SPF DNS Record Type (http://tools.ietf.org/html/rfc7208#section-14.1). // Values for alias resource record sets: // - // * Amazon API Gateway custom regional + // * Amazon API Gateway custom regional // APIs and edge-optimized APIs: A // - // * CloudFront distributions: A If IPv6 is + // * CloudFront distributions: A If IPv6 is // enabled for the distribution, create two resource record sets to route traffic // to your distribution, one with a value of A and one with a value of AAAA. // - // * + // * // Amazon API Gateway environment that has a regionalized subdomain: A // - // * ELB - // load balancers: A | AAAA + // * ELB load + // balancers: A | AAAA // - // * Amazon S3 buckets: A + // * Amazon S3 buckets: A // - // * Amazon Virtual - // Private Cloud interface VPC endpoints A + // * Amazon Virtual Private Cloud + // interface VPC endpoints A // - // * Another resource record set in - // this hosted zone: Specify the type of the resource record set that you're - // creating the alias for. All values are supported except NS and SOA. If you're - // creating an alias record that has the same name as the hosted zone (known as the - // zone apex), you can't route traffic to a record for which the value of Type is - // CNAME. This is because the alias record must have the same type as the record - // you're routing traffic to, and creating a CNAME record for the zone apex isn't - // supported even for an alias record. + // * Another resource record set in this hosted zone: + // Specify the type of the resource record set that you're creating the alias for. + // All values are supported except NS and SOA. If you're creating an alias record + // that has the same name as the hosted zone (known as the zone apex), you can't + // route traffic to a record for which the value of Type is CNAME. This is because + // the alias record must have the same type as the record you're routing traffic + // to, and creating a CNAME record for the zone apex isn't supported even for an + // alias record. // // This member is required. Type RRType @@ -1186,16 +1185,16 @@ type ResourceRecordSet struct { // to. If you're creating resource records sets for a private hosted zone, note the // following: // - // * You can't create an alias resource record set in a private - // hosted zone to route traffic to a CloudFront distribution. + // * You can't create an alias resource record set in a private hosted + // zone to route traffic to a CloudFront distribution. // - // * Creating - // geolocation alias resource record sets or latency alias resource record sets in - // a private hosted zone is unsupported. + // * Creating geolocation + // alias resource record sets or latency alias resource record sets in a private + // hosted zone is unsupported. // - // * For information about creating - // failover resource record sets in a private hosted zone, see Configuring Failover - // in a Private Hosted Zone + // * For information about creating failover resource + // record sets in a private hosted zone, see Configuring Failover in a Private + // Hosted Zone // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) // in the Amazon Route 53 Developer Guide. AliasTarget *AliasTarget @@ -1206,27 +1205,27 @@ type ResourceRecordSet struct { // specify SECONDARY. In addition, you include the HealthCheckId element and // specify the health check that you want Amazon Route 53 to perform for each // resource record set. Except where noted, the following failover behaviors assume - // that you have included the HealthCheckId element in both resource record sets: - // + // that you have included the HealthCheckId element in both resource record + // sets: // - // * When the primary resource record set is healthy, Route 53 responds to DNS - // queries with the applicable value from the primary resource record set + // * When the primary resource record set is healthy, Route 53 responds to + // DNS queries with the applicable value from the primary resource record set // regardless of the health of the secondary resource record set. // - // * When the + // * When the // primary resource record set is unhealthy and the secondary resource record set // is healthy, Route 53 responds to DNS queries with the applicable value from the // secondary resource record set. // - // * When the secondary resource record set is + // * When the secondary resource record set is // unhealthy, Route 53 responds to DNS queries with the applicable value from the // primary resource record set regardless of the health of the primary resource // record set. // - // * If you omit the HealthCheckId element for the secondary - // resource record set, and if the primary resource record set is unhealthy, Route - // 53 always responds to DNS queries with the applicable value from the secondary - // resource record set. This is true regardless of the health of the associated + // * If you omit the HealthCheckId element for the secondary resource + // record set, and if the primary resource record set is unhealthy, Route 53 always + // responds to DNS queries with the applicable value from the secondary resource + // record set. This is true regardless of the health of the associated // endpoint. // // You can't create non-failover resource record sets that have the same @@ -1236,11 +1235,11 @@ type ResourceRecordSet struct { // about configuring failover for Route 53, see the following topics in the Amazon // Route 53 Developer Guide: // - // * Route 53 Health Checks and DNS Failover + // * Route 53 Health Checks and DNS Failover // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) // - // - // * Configuring Failover in a Private Hosted Zone + // * + // Configuring Failover in a Private Hosted Zone // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) Failover ResourceRecordSetFailover @@ -1278,32 +1277,32 @@ type ResourceRecordSet struct { // 53 determines whether a resource record set is healthy based on one of the // following: // - // * By periodically sending a request to the endpoint that is + // * By periodically sending a request to the endpoint that is // specified in the health check // - // * By aggregating the status of a specified - // group of health checks (calculated health checks) + // * By aggregating the status of a specified group + // of health checks (calculated health checks) // - // * By determining the - // current state of a CloudWatch alarm (CloudWatch metric health checks) + // * By determining the current state + // of a CloudWatch alarm (CloudWatch metric health checks) // - // Route 53 - // doesn't check the health of the endpoint that is specified in the resource - // record set, for example, the endpoint specified by the IP address in the Value - // element. When you add a HealthCheckId element to a resource record set, Route 53 - // checks the health of the endpoint that you specified in the health check. For - // more information, see the following topics in the Amazon Route 53 Developer - // Guide: + // Route 53 doesn't check + // the health of the endpoint that is specified in the resource record set, for + // example, the endpoint specified by the IP address in the Value element. When you + // add a HealthCheckId element to a resource record set, Route 53 checks the health + // of the endpoint that you specified in the health check. For more information, + // see the following topics in the Amazon Route 53 Developer Guide: // - // * How Amazon Route 53 Determines Whether an Endpoint Is Healthy + // * How Amazon + // Route 53 Determines Whether an Endpoint Is Healthy // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // - // - // * Route 53 Health Checks and DNS Failover + // * + // Route 53 Health Checks and DNS Failover // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) // - // - // * Configuring Failover in a Private Hosted Zone + // * + // Configuring Failover in a Private Hosted Zone // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) // // When @@ -1313,73 +1312,73 @@ type ResourceRecordSet struct { // health check. Configuring health checks makes sense only in the following // configurations: // - // * Non-alias resource record sets: You're checking the - // health of a group of non-alias resource record sets that have the same routing - // policy, name, and type (such as multiple weighted records named www.example.com - // with a type of A) and you specify health check IDs for all the resource record - // sets. If the health check status for a resource record set is healthy, Route 53 - // includes the record among the records that it responds to DNS queries with. If - // the health check status for a resource record set is unhealthy, Route 53 stops - // responding to DNS queries using the value for that resource record set. If the - // health check status for all resource record sets in the group is unhealthy, - // Route 53 considers all resource record sets in the group healthy and responds to - // DNS queries accordingly. - // - // * Alias resource record sets: You specify the - // following settings: - // - // * You set EvaluateTargetHealth to true for an alias - // resource record set in a group of resource record sets that have the same - // routing policy, name, and type (such as multiple weighted records named - // www.example.com with a type of A). - // - // * You configure the alias resource - // record set to route traffic to a non-alias resource record set in the same - // hosted zone. - // - // * You specify a health check ID for the non-alias resource - // record set. + // * Non-alias resource record sets: You're checking the health of + // a group of non-alias resource record sets that have the same routing policy, + // name, and type (such as multiple weighted records named www.example.com with a + // type of A) and you specify health check IDs for all the resource record sets. If + // the health check status for a resource record set is healthy, Route 53 includes + // the record among the records that it responds to DNS queries with. If the health + // check status for a resource record set is unhealthy, Route 53 stops responding + // to DNS queries using the value for that resource record set. If the health check + // status for all resource record sets in the group is unhealthy, Route 53 + // considers all resource record sets in the group healthy and responds to DNS + // queries accordingly. + // + // * Alias resource record sets: You specify the following + // settings: + // + // * You set EvaluateTargetHealth to true for an alias resource record + // set in a group of resource record sets that have the same routing policy, name, + // and type (such as multiple weighted records named www.example.com with a type of + // A). + // + // * You configure the alias resource record set to route traffic to a + // non-alias resource record set in the same hosted zone. + // + // * You specify a health + // check ID for the non-alias resource record set. + // + // If the health check status is + // healthy, Route 53 considers the alias resource record set to be healthy and + // includes the alias record among the records that it responds to DNS queries + // with. If the health check status is unhealthy, Route 53 stops responding to DNS + // queries using the alias resource record set. The alias resource record set can + // also route traffic to a group of non-alias resource record sets that have the + // same routing policy, name, and type. In that configuration, associate health + // checks with all of the resource record sets in the group of non-alias resource + // record sets. + // + // Geolocation Routing For geolocation resource record sets, if an + // endpoint is unhealthy, Route 53 looks for a resource record set for the larger, + // associated geographic region. For example, suppose you have resource record sets + // for a state in the United States, for the entire United States, for North + // America, and a resource record set that has for CountryCode is , which applies + // to all locations. If the endpoint for the state resource record set is + // unhealthy, Route 53 checks for healthy resource record sets in the following + // order until it finds a resource record set for which the endpoint is healthy: + // + // * + // The United States + // + // * North America + // + // * The default resource record + // set + // + // Specifying the Health Check Endpoint by Domain Name If your health checks + // specify the endpoint only by domain name, we recommend that you create a + // separate health check for each endpoint. For example, create a health check for + // each HTTP server that is serving content for www.example.com. For the value of + // FullyQualifiedDomainName, specify the domain name of the server (such as + // us-east-2-www.example.com), not the name of the resource record sets + // (www.example.com). Health check results will be unpredictable if you do the + // following: // - // If the health check status is healthy, Route 53 considers the - // alias resource record set to be healthy and includes the alias record among the - // records that it responds to DNS queries with. If the health check status is - // unhealthy, Route 53 stops responding to DNS queries using the alias resource - // record set. The alias resource record set can also route traffic to a group of - // non-alias resource record sets that have the same routing policy, name, and - // type. In that configuration, associate health checks with all of the resource - // record sets in the group of non-alias resource record sets. - // - // Geolocation Routing - // For geolocation resource record sets, if an endpoint is unhealthy, Route 53 - // looks for a resource record set for the larger, associated geographic region. - // For example, suppose you have resource record sets for a state in the United - // States, for the entire United States, for North America, and a resource record - // set that has for CountryCode is , which applies to all locations. If the - // endpoint for the state resource record set is unhealthy, Route 53 checks for - // healthy resource record sets in the following order until it finds a resource - // record set for which the endpoint is healthy: - // - // * The United States - // - // * - // North America - // - // * The default resource record set - // - // Specifying the Health - // Check Endpoint by Domain Name If your health checks specify the endpoint only by - // domain name, we recommend that you create a separate health check for each - // endpoint. For example, create a health check for each HTTP server that is - // serving content for www.example.com. For the value of FullyQualifiedDomainName, - // specify the domain name of the server (such as us-east-2-www.example.com), not - // the name of the resource record sets (www.example.com). Health check results - // will be unpredictable if you do the following: - // - // * Create a health check that - // has the same value for FullyQualifiedDomainName as the name of a resource record - // set. - // - // * Associate that health check with the resource record set. + // * Create a health check that has the same value for + // FullyQualifiedDomainName as the name of a resource record set. + // + // * Associate that + // health check with the resource record set. HealthCheckId *string // Multivalue answer resource record sets only: To route traffic approximately @@ -1387,31 +1386,31 @@ type ResourceRecordSet struct { // answer record for each resource and specify true for MultiValueAnswer. Note the // following: // - // * If you associate a health check with a multivalue answer - // resource record set, Amazon Route 53 responds to DNS queries with the - // corresponding IP address only when the health check is healthy. + // * If you associate a health check with a multivalue answer resource + // record set, Amazon Route 53 responds to DNS queries with the corresponding IP + // address only when the health check is healthy. // - // * If you - // don't associate a health check with a multivalue answer record, Route 53 always - // considers the record to be healthy. + // * If you don't associate a + // health check with a multivalue answer record, Route 53 always considers the + // record to be healthy. // - // * Route 53 responds to DNS queries with - // up to eight healthy records; if you have eight or fewer healthy records, Route - // 53 responds to all DNS queries with all the healthy records. + // * Route 53 responds to DNS queries with up to eight + // healthy records; if you have eight or fewer healthy records, Route 53 responds + // to all DNS queries with all the healthy records. // - // * If you have - // more than eight healthy records, Route 53 responds to different DNS resolvers - // with different combinations of healthy records. + // * If you have more than eight + // healthy records, Route 53 responds to different DNS resolvers with different + // combinations of healthy records. // - // * When all records are - // unhealthy, Route 53 responds to DNS queries with up to eight unhealthy - // records. + // * When all records are unhealthy, Route 53 + // responds to DNS queries with up to eight unhealthy records. // - // * If a resource becomes unavailable after a resolver caches a - // response, client software typically tries another of the IP addresses in the - // response. + // * If a resource + // becomes unavailable after a resolver caches a response, client software + // typically tries another of the IP addresses in the response. // - // You can't create multivalue answer alias records. + // You can't create + // multivalue answer alias records. MultiValueAnswer *bool // Latency-based resource record sets only: The Amazon EC2 Region where you created @@ -1426,20 +1425,19 @@ type ResourceRecordSet struct { // 53 then returns the value that is associated with the selected resource record // set. Note the following: // - // * You can only specify one ResourceRecord per - // latency resource record set. + // * You can only specify one ResourceRecord per latency + // resource record set. // - // * You can only create one latency resource - // record set for each Amazon EC2 Region. + // * You can only create one latency resource record set for + // each Amazon EC2 Region. // - // * You aren't required to create - // latency resource record sets for all Amazon EC2 Regions. Route 53 will choose - // the region with the best latency from among the regions that you create latency - // resource record sets for. + // * You aren't required to create latency resource record + // sets for all Amazon EC2 Regions. Route 53 will choose the region with the best + // latency from among the regions that you create latency resource record sets + // for. // - // * You can't create non-latency resource record - // sets that have the same values for the Name and Type elements as latency - // resource record sets. + // * You can't create non-latency resource record sets that have the same + // values for the Name and Type elements as latency resource record sets. Region ResourceRecordSetRegion // Information about the resource records to act upon. If you're creating an alias @@ -1459,25 +1457,25 @@ type ResourceRecordSet struct { // The resource record cache time to live (TTL), in seconds. Note the following: // - // - // * If you're creating or updating an alias resource record set, omit TTL. Amazon + // * + // If you're creating or updating an alias resource record set, omit TTL. Amazon // Route 53 uses the value of TTL for the alias target. // - // * If you're - // associating this resource record set with a health check (if you're adding a - // HealthCheckId element), we recommend that you specify a TTL of 60 seconds or - // less so clients respond quickly to changes in health status. - // - // * All of the - // resource record sets in a group of weighted resource record sets must have the - // same value for TTL. - // - // * If a group of weighted resource record sets includes - // one or more weighted alias resource record sets for which the alias target is an - // ELB load balancer, we recommend that you specify a TTL of 60 seconds for all of - // the non-alias weighted resource record sets that have the same name and type. - // Values other than 60 seconds (the TTL for load balancers) will change the effect - // of the values that you specify for Weight. + // * If you're associating + // this resource record set with a health check (if you're adding a HealthCheckId + // element), we recommend that you specify a TTL of 60 seconds or less so clients + // respond quickly to changes in health status. + // + // * All of the resource record sets + // in a group of weighted resource record sets must have the same value for TTL. + // + // * + // If a group of weighted resource record sets includes one or more weighted alias + // resource record sets for which the alias target is an ELB load balancer, we + // recommend that you specify a TTL of 60 seconds for all of the non-alias weighted + // resource record sets that have the same name and type. Values other than 60 + // seconds (the TTL for load balancers) will change the effect of the values that + // you specify for Weight. TTL *int64 // When you create a traffic policy instance, Amazon Route 53 automatically creates @@ -1499,28 +1497,28 @@ type ResourceRecordSet struct { // queries based on the ratio of a resource's weight to the total. Note the // following: // - // * You must specify a value for the Weight element for every - // weighted resource record set. + // * You must specify a value for the Weight element for every weighted + // resource record set. // - // * You can only specify one ResourceRecord per - // weighted resource record set. + // * You can only specify one ResourceRecord per weighted + // resource record set. // - // * You can't create latency, failover, or - // geolocation resource record sets that have the same values for the Name and Type - // elements as weighted resource record sets. + // * You can't create latency, failover, or geolocation + // resource record sets that have the same values for the Name and Type elements as + // weighted resource record sets. // - // * You can create a maximum of - // 100 weighted resource record sets that have the same values for the Name and - // Type elements. + // * You can create a maximum of 100 weighted + // resource record sets that have the same values for the Name and Type + // elements. // - // * For weighted (but not weighted alias) resource record - // sets, if you set Weight to 0 for a resource record set, Route 53 never responds - // to queries with the applicable value for that resource record set. However, if - // you set Weight to 0 for all resource record sets that have the same combination - // of DNS name and type, traffic is routed to all resources with equal probability. - // The effect of setting Weight to 0 is different when you associate health checks - // with weighted resource record sets. For more information, see Options for - // Configuring Route 53 Active-Active and Active-Passive Failover + // * For weighted (but not weighted alias) resource record sets, if you + // set Weight to 0 for a resource record set, Route 53 never responds to queries + // with the applicable value for that resource record set. However, if you set + // Weight to 0 for all resource record sets that have the same combination of DNS + // name and type, traffic is routed to all resources with equal probability. The + // effect of setting Weight to 0 is different when you associate health checks with + // weighted resource record sets. For more information, see Options for Configuring + // Route 53 Active-Active and Active-Passive Failover // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) // in the Amazon Route 53 Developer Guide. Weight *int64 @@ -1534,10 +1532,10 @@ type ResourceTagSet struct { // The type of the resource. // - // * The resource type for health checks is + // * The resource type for health checks is // healthcheck. // - // * The resource type for hosted zones is hostedzone. + // * The resource type for hosted zones is hostedzone. ResourceType TagResourceType // The tags associated with the specified resource. @@ -1582,29 +1580,29 @@ type Tag struct { // The value of Key depends on the operation that you want to perform: // - // * Add a - // tag to a health check or hosted zone: Key is the name that you want to give the - // new tag. + // * Add a tag + // to a health check or hosted zone: Key is the name that you want to give the new + // tag. // - // * Edit a tag: Key is the name of the tag that you want to change - // the Value for. + // * Edit a tag: Key is the name of the tag that you want to change the Value + // for. // - // * Delete a key: Key is the name of the tag you want to - // remove. + // * Delete a key: Key is the name of the tag you want to remove. // - // * Give a name to a health check: Edit the default Name tag. In the - // Amazon Route 53 console, the list of your health checks includes a Name column - // that lets you see the name that you've given to each health check. + // * Give a + // name to a health check: Edit the default Name tag. In the Amazon Route 53 + // console, the list of your health checks includes a Name column that lets you see + // the name that you've given to each health check. Key *string // The value of Value depends on the operation that you want to perform: // - // * Add - // a tag to a health check or hosted zone: Value is the value that you want to give + // * Add a + // tag to a health check or hosted zone: Value is the value that you want to give // the new tag. // - // * Edit a tag: Value is the new value that you want to assign - // the tag. + // * Edit a tag: Value is the new value that you want to assign the + // tag. Value *string } diff --git a/service/route53domains/api_op_CheckDomainAvailability.go b/service/route53domains/api_op_CheckDomainAvailability.go index 3327701bbfb..358f9953aaf 100644 --- a/service/route53domains/api_op_CheckDomainAvailability.go +++ b/service/route53domains/api_op_CheckDomainAvailability.go @@ -39,21 +39,21 @@ type CheckDomainAvailabilityInput struct { // in the Amazon Route 53 Developer Guide. The domain name can contain only the // following characters: // - // * Letters a through z. Domain names are not case + // * Letters a through z. Domain names are not case // sensitive. // - // * Numbers 0 through 9. + // * Numbers 0 through 9. // - // * Hyphen (-). You can't specify a - // hyphen at the beginning or end of a label. + // * Hyphen (-). You can't specify a hyphen at + // the beginning or end of a label. // - // * Period (.) to separate the - // labels in the name, such as the . in example.com. + // * Period (.) to separate the labels in the + // name, such as the . in example.com. // - // Internationalized domain - // names are not supported for some top-level domains. To determine whether the TLD - // that you want to use supports internationalized domain names, see Domains that - // You Can Register with Amazon Route 53 + // Internationalized domain names are not + // supported for some top-level domains. To determine whether the TLD that you want + // to use supports internationalized domain names, see Domains that You Can + // Register with Amazon Route 53 // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar-tld-list.html). // For more information, see Formatting Internationalized Domain Names // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-idns). diff --git a/service/route53domains/api_op_CheckDomainTransferability.go b/service/route53domains/api_op_CheckDomainTransferability.go index 8123bf7e62d..54d83d841d9 100644 --- a/service/route53domains/api_op_CheckDomainTransferability.go +++ b/service/route53domains/api_op_CheckDomainTransferability.go @@ -37,16 +37,16 @@ type CheckDomainTransferabilityInput struct { // in the Amazon Route 53 Developer Guide. The domain name can contain only the // following characters: // - // * Letters a through z. Domain names are not case + // * Letters a through z. Domain names are not case // sensitive. // - // * Numbers 0 through 9. + // * Numbers 0 through 9. // - // * Hyphen (-). You can't specify a - // hyphen at the beginning or end of a label. + // * Hyphen (-). You can't specify a hyphen at + // the beginning or end of a label. // - // * Period (.) to separate the - // labels in the name, such as the . in example.com. + // * Period (.) to separate the labels in the + // name, such as the . in example.com. // // This member is required. DomainName *string diff --git a/service/route53domains/api_op_GetDomainSuggestions.go b/service/route53domains/api_op_GetDomainSuggestions.go index 5b01e5c8f87..5ecfc4418af 100644 --- a/service/route53domains/api_op_GetDomainSuggestions.go +++ b/service/route53domains/api_op_GetDomainSuggestions.go @@ -37,21 +37,21 @@ type GetDomainSuggestionsInput struct { // in the Amazon Route 53 Developer Guide. The domain name can contain only the // following characters: // - // * Letters a through z. Domain names are not case + // * Letters a through z. Domain names are not case // sensitive. // - // * Numbers 0 through 9. + // * Numbers 0 through 9. // - // * Hyphen (-). You can't specify a - // hyphen at the beginning or end of a label. + // * Hyphen (-). You can't specify a hyphen at + // the beginning or end of a label. // - // * Period (.) to separate the - // labels in the name, such as the . in example.com. + // * Period (.) to separate the labels in the + // name, such as the . in example.com. // - // Internationalized domain - // names are not supported for some top-level domains. To determine whether the TLD - // that you want to use supports internationalized domain names, see Domains that - // You Can Register with Amazon Route 53 + // Internationalized domain names are not + // supported for some top-level domains. To determine whether the TLD that you want + // to use supports internationalized domain names, see Domains that You Can + // Register with Amazon Route 53 // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar-tld-list.html). // // This member is required. diff --git a/service/route53domains/api_op_RegisterDomain.go b/service/route53domains/api_op_RegisterDomain.go index f037c04b468..d52e4025b8d 100644 --- a/service/route53domains/api_op_RegisterDomain.go +++ b/service/route53domains/api_op_RegisterDomain.go @@ -17,29 +17,29 @@ import ( // requires extra parameters. When you register a domain, Amazon Route 53 does the // following: // -// * Creates a Route 53 hosted zone that has the same name as the +// * Creates a Route 53 hosted zone that has the same name as the // domain. Route 53 assigns four name servers to your hosted zone and automatically // updates your domain registration with the names of these name servers. // -// * +// * // Enables autorenew, so your domain registration will renew automatically each // year. We'll notify you in advance of the renewal date so you can choose whether // to renew the registration. // -// * Optionally enables privacy protection, so -// WHOIS queries return contact information either for Amazon Registrar (for .com, -// .net, and .org domains) or for our registrar associate, Gandi (for all other -// TLDs). If you don't enable privacy protection, WHOIS queries return the -// information that you entered for the registrant, admin, and tech contacts. +// * Optionally enables privacy protection, so WHOIS +// queries return contact information either for Amazon Registrar (for .com, .net, +// and .org domains) or for our registrar associate, Gandi (for all other TLDs). If +// you don't enable privacy protection, WHOIS queries return the information that +// you entered for the registrant, admin, and tech contacts. // +// * If registration is +// successful, returns an operation ID that you can use to track the progress and +// completion of the action. If the request is not completed successfully, the +// domain registrant is notified by email. // -// * If registration is successful, returns an operation ID that you can use to -// track the progress and completion of the action. If the request is not completed -// successfully, the domain registrant is notified by email. -// -// * Charges your -// AWS account an amount based on the top-level domain. For more information, see -// Amazon Route 53 Pricing (http://aws.amazon.com/route53/pricing/). +// * Charges your AWS account an amount +// based on the top-level domain. For more information, see Amazon Route 53 Pricing +// (http://aws.amazon.com/route53/pricing/). func (c *Client) RegisterDomain(ctx context.Context, params *RegisterDomainInput, optFns ...func(*Options)) (*RegisterDomainOutput, error) { if params == nil { params = &RegisterDomainInput{} @@ -72,21 +72,21 @@ type RegisterDomainInput struct { // in the Amazon Route 53 Developer Guide. The domain name can contain only the // following characters: // - // * Letters a through z. Domain names are not case + // * Letters a through z. Domain names are not case // sensitive. // - // * Numbers 0 through 9. + // * Numbers 0 through 9. // - // * Hyphen (-). You can't specify a - // hyphen at the beginning or end of a label. + // * Hyphen (-). You can't specify a hyphen at + // the beginning or end of a label. // - // * Period (.) to separate the - // labels in the name, such as the . in example.com. + // * Period (.) to separate the labels in the + // name, such as the . in example.com. // - // Internationalized domain - // names are not supported for some top-level domains. To determine whether the TLD - // that you want to use supports internationalized domain names, see Domains that - // You Can Register with Amazon Route 53 + // Internationalized domain names are not + // supported for some top-level domains. To determine whether the TLD that you want + // to use supports internationalized domain names, see Domains that You Can + // Register with Amazon Route 53 // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar-tld-list.html). // For more information, see Formatting Internationalized Domain Names // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html#domain-name-format-idns). diff --git a/service/route53domains/api_op_TransferDomain.go b/service/route53domains/api_op_TransferDomain.go index 5468eddeabb..eb63eb153df 100644 --- a/service/route53domains/api_op_TransferDomain.go +++ b/service/route53domains/api_op_TransferDomain.go @@ -17,20 +17,19 @@ import ( // TLDs). For more information about transferring domains, see the following // topics: // -// * For transfer requirements, a detailed procedure, and information +// * For transfer requirements, a detailed procedure, and information // about viewing the status of a domain that you're transferring to Route 53, see // Transferring Registration for a Domain to Amazon Route 53 // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-transfer-to-route-53.html) // in the Amazon Route 53 Developer Guide. // -// * For information about how to -// transfer a domain from one AWS account to another, see -// TransferDomainToAnotherAwsAccount +// * For information about how to transfer +// a domain from one AWS account to another, see TransferDomainToAnotherAwsAccount // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains_TransferDomainToAnotherAwsAccount.html). // -// -// * For information about how to transfer a domain to another domain registrar, -// see Transferring a Domain from Amazon Route 53 to Another Registrar +// * +// For information about how to transfer a domain to another domain registrar, see +// Transferring a Domain from Amazon Route 53 to Another Registrar // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/domain-transfer-from-route-53.html) // in the Amazon Route 53 Developer Guide. // @@ -77,16 +76,16 @@ type TransferDomainInput struct { // in the Amazon Route 53 Developer Guide. The domain name can contain only the // following characters: // - // * Letters a through z. Domain names are not case + // * Letters a through z. Domain names are not case // sensitive. // - // * Numbers 0 through 9. + // * Numbers 0 through 9. // - // * Hyphen (-). You can't specify a - // hyphen at the beginning or end of a label. + // * Hyphen (-). You can't specify a hyphen at + // the beginning or end of a label. // - // * Period (.) to separate the - // labels in the name, such as the . in example.com. + // * Period (.) to separate the labels in the + // name, such as the . in example.com. // // This member is required. DomainName *string diff --git a/service/route53domains/api_op_TransferDomainToAnotherAwsAccount.go b/service/route53domains/api_op_TransferDomainToAnotherAwsAccount.go index 067cd707cf7..8f18e8ae099 100644 --- a/service/route53domains/api_op_TransferDomainToAnotherAwsAccount.go +++ b/service/route53domains/api_op_TransferDomainToAnotherAwsAccount.go @@ -13,18 +13,18 @@ import ( // Transfers a domain from the current AWS account to another AWS account. Note the // following: // -// * The AWS account that you're transferring the domain to must -// accept the transfer. If the other account doesn't accept the transfer within 3 -// days, we cancel the transfer. See AcceptDomainTransferFromAnotherAwsAccount +// * The AWS account that you're transferring the domain to must accept +// the transfer. If the other account doesn't accept the transfer within 3 days, we +// cancel the transfer. See AcceptDomainTransferFromAnotherAwsAccount // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains_AcceptDomainTransferFromAnotherAwsAccount.html). // -// -// * You can cancel the transfer before the other account accepts it. See +// * +// You can cancel the transfer before the other account accepts it. See // CancelDomainTransferToAnotherAwsAccount // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains_CancelDomainTransferToAnotherAwsAccount.html). // -// -// * The other account can reject the transfer. See +// * +// The other account can reject the transfer. See // RejectDomainTransferFromAnotherAwsAccount // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_domains_RejectDomainTransferFromAnotherAwsAccount.html). // diff --git a/service/route53domains/types/enums.go b/service/route53domains/types/enums.go index f5caba204cb..9c8742645de 100644 --- a/service/route53domains/types/enums.go +++ b/service/route53domains/types/enums.go @@ -9,7 +9,7 @@ const ( ContactTypePerson ContactType = "PERSON" ContactTypeCompany ContactType = "COMPANY" ContactTypeAssociation ContactType = "ASSOCIATION" - ContactTypePublic_body ContactType = "PUBLIC_BODY" + ContactTypePublicBody ContactType = "PUBLIC_BODY" ContactTypeReseller ContactType = "RESELLER" ) @@ -502,14 +502,14 @@ type DomainAvailability string // Enum values for DomainAvailability const ( - DomainAvailabilityAvailable DomainAvailability = "AVAILABLE" - DomainAvailabilityAvailable_reserved DomainAvailability = "AVAILABLE_RESERVED" - DomainAvailabilityAvailable_preorder DomainAvailability = "AVAILABLE_PREORDER" - DomainAvailabilityUnavailable DomainAvailability = "UNAVAILABLE" - DomainAvailabilityUnavailable_premium DomainAvailability = "UNAVAILABLE_PREMIUM" - DomainAvailabilityUnavailable_restricted DomainAvailability = "UNAVAILABLE_RESTRICTED" - DomainAvailabilityReserved DomainAvailability = "RESERVED" - DomainAvailabilityDont_know DomainAvailability = "DONT_KNOW" + DomainAvailabilityAvailable DomainAvailability = "AVAILABLE" + DomainAvailabilityAvailableReserved DomainAvailability = "AVAILABLE_RESERVED" + DomainAvailabilityAvailablePreorder DomainAvailability = "AVAILABLE_PREORDER" + DomainAvailabilityUnavailable DomainAvailability = "UNAVAILABLE" + DomainAvailabilityUnavailablePremium DomainAvailability = "UNAVAILABLE_PREMIUM" + DomainAvailabilityUnavailableRestricted DomainAvailability = "UNAVAILABLE_RESTRICTED" + DomainAvailabilityReserved DomainAvailability = "RESERVED" + DomainAvailabilityDontKnow DomainAvailability = "DONT_KNOW" ) // Values returns all known values for DomainAvailability. Note that this can be @@ -532,35 +532,35 @@ type ExtraParamName string // Enum values for ExtraParamName const ( - ExtraParamNameDuns_number ExtraParamName = "DUNS_NUMBER" - ExtraParamNameBrand_number ExtraParamName = "BRAND_NUMBER" - ExtraParamNameBirth_department ExtraParamName = "BIRTH_DEPARTMENT" - ExtraParamNameBirth_date_in_yyyy_mm_dd ExtraParamName = "BIRTH_DATE_IN_YYYY_MM_DD" - ExtraParamNameBirth_country ExtraParamName = "BIRTH_COUNTRY" - ExtraParamNameBirth_city ExtraParamName = "BIRTH_CITY" - ExtraParamNameDocument_number ExtraParamName = "DOCUMENT_NUMBER" - ExtraParamNameAu_id_number ExtraParamName = "AU_ID_NUMBER" - ExtraParamNameAu_id_type ExtraParamName = "AU_ID_TYPE" - ExtraParamNameCa_legal_type ExtraParamName = "CA_LEGAL_TYPE" - ExtraParamNameCa_business_entity_type ExtraParamName = "CA_BUSINESS_ENTITY_TYPE" - ExtraParamNameCa_legal_representative ExtraParamName = "CA_LEGAL_REPRESENTATIVE" - ExtraParamNameCa_legal_representative_capacity ExtraParamName = "CA_LEGAL_REPRESENTATIVE_CAPACITY" - ExtraParamNameEs_identification ExtraParamName = "ES_IDENTIFICATION" - ExtraParamNameEs_identification_type ExtraParamName = "ES_IDENTIFICATION_TYPE" - ExtraParamNameEs_legal_form ExtraParamName = "ES_LEGAL_FORM" - ExtraParamNameFi_business_number ExtraParamName = "FI_BUSINESS_NUMBER" - ExtraParamNameOnwer_fi_id_number ExtraParamName = "FI_ID_NUMBER" - ExtraParamNameFi_nationality ExtraParamName = "FI_NATIONALITY" - ExtraParamNameFi_organization_type ExtraParamName = "FI_ORGANIZATION_TYPE" - ExtraParamNameIt_nationality ExtraParamName = "IT_NATIONALITY" - ExtraParamNameIt_pin ExtraParamName = "IT_PIN" - ExtraParamNameIt_registrant_entity_type ExtraParamName = "IT_REGISTRANT_ENTITY_TYPE" - ExtraParamNameRu_passport_data ExtraParamName = "RU_PASSPORT_DATA" - ExtraParamNameSe_id_number ExtraParamName = "SE_ID_NUMBER" - ExtraParamNameSg_id_number ExtraParamName = "SG_ID_NUMBER" - ExtraParamNameVat_number ExtraParamName = "VAT_NUMBER" - ExtraParamNameUk_contact_type ExtraParamName = "UK_CONTACT_TYPE" - ExtraParamNameUk_company_number ExtraParamName = "UK_COMPANY_NUMBER" + ExtraParamNameDunsNumber ExtraParamName = "DUNS_NUMBER" + ExtraParamNameBrandNumber ExtraParamName = "BRAND_NUMBER" + ExtraParamNameBirthDepartment ExtraParamName = "BIRTH_DEPARTMENT" + ExtraParamNameBirthDateInYyyyMmDd ExtraParamName = "BIRTH_DATE_IN_YYYY_MM_DD" + ExtraParamNameBirthCountry ExtraParamName = "BIRTH_COUNTRY" + ExtraParamNameBirthCity ExtraParamName = "BIRTH_CITY" + ExtraParamNameDocumentNumber ExtraParamName = "DOCUMENT_NUMBER" + ExtraParamNameAuIdNumber ExtraParamName = "AU_ID_NUMBER" + ExtraParamNameAuIdType ExtraParamName = "AU_ID_TYPE" + ExtraParamNameCaLegalType ExtraParamName = "CA_LEGAL_TYPE" + ExtraParamNameCaBusinessEntityType ExtraParamName = "CA_BUSINESS_ENTITY_TYPE" + ExtraParamNameCaLegalRepresentative ExtraParamName = "CA_LEGAL_REPRESENTATIVE" + ExtraParamNameCaLegalRepresentativeCapacity ExtraParamName = "CA_LEGAL_REPRESENTATIVE_CAPACITY" + ExtraParamNameEsIdentification ExtraParamName = "ES_IDENTIFICATION" + ExtraParamNameEsIdentificationType ExtraParamName = "ES_IDENTIFICATION_TYPE" + ExtraParamNameEsLegalForm ExtraParamName = "ES_LEGAL_FORM" + ExtraParamNameFiBusinessNumber ExtraParamName = "FI_BUSINESS_NUMBER" + ExtraParamNameOnwerFiIdNumber ExtraParamName = "FI_ID_NUMBER" + ExtraParamNameFiNationality ExtraParamName = "FI_NATIONALITY" + ExtraParamNameFiOrganizationType ExtraParamName = "FI_ORGANIZATION_TYPE" + ExtraParamNameItNationality ExtraParamName = "IT_NATIONALITY" + ExtraParamNameItPin ExtraParamName = "IT_PIN" + ExtraParamNameItRegistrantEntityType ExtraParamName = "IT_REGISTRANT_ENTITY_TYPE" + ExtraParamNameRuPassportData ExtraParamName = "RU_PASSPORT_DATA" + ExtraParamNameSeIdNumber ExtraParamName = "SE_ID_NUMBER" + ExtraParamNameSgIdNumber ExtraParamName = "SG_ID_NUMBER" + ExtraParamNameVatNumber ExtraParamName = "VAT_NUMBER" + ExtraParamNameUkContactType ExtraParamName = "UK_CONTACT_TYPE" + ExtraParamNameUkCompanyNumber ExtraParamName = "UK_COMPANY_NUMBER" ) // Values returns all known values for ExtraParamName. Note that this can be @@ -604,11 +604,11 @@ type OperationStatus string // Enum values for OperationStatus const ( - OperationStatusSubmitted OperationStatus = "SUBMITTED" - OperationStatusIn_progress OperationStatus = "IN_PROGRESS" - OperationStatusError OperationStatus = "ERROR" - OperationStatusSuccessful OperationStatus = "SUCCESSFUL" - OperationStatusFailed OperationStatus = "FAILED" + OperationStatusSubmitted OperationStatus = "SUBMITTED" + OperationStatusInProgress OperationStatus = "IN_PROGRESS" + OperationStatusError OperationStatus = "ERROR" + OperationStatusSuccessful OperationStatus = "SUCCESSFUL" + OperationStatusFailed OperationStatus = "FAILED" ) // Values returns all known values for OperationStatus. Note that this can be @@ -628,24 +628,24 @@ type OperationType string // Enum values for OperationType const ( - OperationTypeRegister_domain OperationType = "REGISTER_DOMAIN" - OperationTypeDelete_domain OperationType = "DELETE_DOMAIN" - OperationTypeTransfer_in_domain OperationType = "TRANSFER_IN_DOMAIN" - OperationTypeUpdate_domain_contact OperationType = "UPDATE_DOMAIN_CONTACT" - OperationTypeUpdate_nameserver OperationType = "UPDATE_NAMESERVER" - OperationTypeChange_privacy_protection OperationType = "CHANGE_PRIVACY_PROTECTION" - OperationTypeDomain_lock OperationType = "DOMAIN_LOCK" - OperationTypeEnable_autorenew OperationType = "ENABLE_AUTORENEW" - OperationTypeDisable_autorenew OperationType = "DISABLE_AUTORENEW" - OperationTypeAdd_dnssec OperationType = "ADD_DNSSEC" - OperationTypeRemove_dnssec OperationType = "REMOVE_DNSSEC" - OperationTypeExpire_domain OperationType = "EXPIRE_DOMAIN" - OperationTypeTransfer_out_domain OperationType = "TRANSFER_OUT_DOMAIN" - OperationTypeChange_domain_owner OperationType = "CHANGE_DOMAIN_OWNER" - OperationTypeRenew_domain OperationType = "RENEW_DOMAIN" - OperationTypePush_domain OperationType = "PUSH_DOMAIN" - OperationTypeInternal_transfer_out_domain OperationType = "INTERNAL_TRANSFER_OUT_DOMAIN" - OperationTypeInternal_transfer_in_domain OperationType = "INTERNAL_TRANSFER_IN_DOMAIN" + OperationTypeRegisterDomain OperationType = "REGISTER_DOMAIN" + OperationTypeDeleteDomain OperationType = "DELETE_DOMAIN" + OperationTypeTransferInDomain OperationType = "TRANSFER_IN_DOMAIN" + OperationTypeUpdateDomainContact OperationType = "UPDATE_DOMAIN_CONTACT" + OperationTypeUpdateNameserver OperationType = "UPDATE_NAMESERVER" + OperationTypeChangePrivacyProtection OperationType = "CHANGE_PRIVACY_PROTECTION" + OperationTypeDomainLock OperationType = "DOMAIN_LOCK" + OperationTypeEnableAutorenew OperationType = "ENABLE_AUTORENEW" + OperationTypeDisableAutorenew OperationType = "DISABLE_AUTORENEW" + OperationTypeAddDnssec OperationType = "ADD_DNSSEC" + OperationTypeRemoveDnssec OperationType = "REMOVE_DNSSEC" + OperationTypeExpireDomain OperationType = "EXPIRE_DOMAIN" + OperationTypeTransferOutDomain OperationType = "TRANSFER_OUT_DOMAIN" + OperationTypeChangeDomainOwner OperationType = "CHANGE_DOMAIN_OWNER" + OperationTypeRenewDomain OperationType = "RENEW_DOMAIN" + OperationTypePushDomain OperationType = "PUSH_DOMAIN" + OperationTypeInternalTransferOutDomain OperationType = "INTERNAL_TRANSFER_OUT_DOMAIN" + OperationTypeInternalTransferInDomain OperationType = "INTERNAL_TRANSFER_IN_DOMAIN" ) // Values returns all known values for OperationType. Note that this can be @@ -700,7 +700,7 @@ type Transferable string const ( TransferableTransferable Transferable = "TRANSFERABLE" TransferableUntransferable Transferable = "UNTRANSFERABLE" - TransferableDont_know Transferable = "DONT_KNOW" + TransferableDontKnow Transferable = "DONT_KNOW" ) // Values returns all known values for Transferable. Note that this can be expanded diff --git a/service/route53domains/types/types.go b/service/route53domains/types/types.go index 44d5411c64b..a1ca7cf5a13 100644 --- a/service/route53domains/types/types.go +++ b/service/route53domains/types/types.go @@ -46,17 +46,17 @@ type ContactDetail struct { // Indicates whether the contact is a person, company, association, or public // organization. Note the following: // - // * If you specify a value other than - // PERSON, you must also specify a value for OrganizationName. + // * If you specify a value other than PERSON, + // you must also specify a value for OrganizationName. // - // * For some - // TLDs, the privacy protection available depends on the value that you specify for - // Contact Type. For the privacy protection settings for your TLD, see Domains that - // You Can Register with Amazon Route 53 + // * For some TLDs, the + // privacy protection available depends on the value that you specify for Contact + // Type. For the privacy protection settings for your TLD, see Domains that You Can + // Register with Amazon Route 53 // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar-tld-list.html) // in the Amazon Route 53 Developer Guide // - // * For .es domains, if you specify + // * For .es domains, if you specify // PERSON, you must specify INDIVIDUAL for the value of ES_LEGAL_FORM. ContactType ContactType @@ -155,400 +155,387 @@ type ExtraParam struct { // are the top-level domains that require additional parameters and the names of // the parameters that they require: .com.au and .net.au // - // * AU_ID_NUMBER + // * AU_ID_NUMBER // - // * + // * // AU_ID_TYPE Valid values include the following: // - // * ABN (Australian - // business number) + // * ABN (Australian business + // number) // - // * ACN (Australian company number) + // * ACN (Australian company number) // - // * TM - // (Trademark number) + // * TM (Trademark number) // // .ca // - // * BRAND_NUMBER + // * + // BRAND_NUMBER // - // * CA_BUSINESS_ENTITY_TYPE Valid - // values include the following: + // * CA_BUSINESS_ENTITY_TYPE Valid values include the following: // - // * BANK (Bank) + // * + // BANK (Bank) // - // * - // COMMERCIAL_COMPANY (Commercial company) + // * COMMERCIAL_COMPANY (Commercial company) // - // * COMPANY (Company) + // * COMPANY (Company) // - // * + // * // COOPERATION (Cooperation) // - // * COOPERATIVE (Cooperative) + // * COOPERATIVE (Cooperative) // - // * - // COOPRIX (Cooprix) + // * COOPRIX (Cooprix) // - // * CORP (Corporation) + // * + // CORP (Corporation) // - // * CREDIT_UNION (Credit - // union) + // * CREDIT_UNION (Credit union) // - // * FOMIA (Federation of mutual insurance associations) + // * FOMIA (Federation of mutual + // insurance associations) // - // * - // INC (Incorporated) + // * INC (Incorporated) // - // * LTD (Limited) + // * LTD (Limited) // - // * LTEE (Limitée) + // * LTEE + // (Limitée) // - // * - // LLC (Limited liability corporation) + // * LLC (Limited liability corporation) // - // * LLP (Limited liability + // * LLP (Limited liability // partnership) // - // * LTE (Lte.) - // - // * MBA (Mutual benefit association) + // * LTE (Lte.) // + // * MBA (Mutual benefit association) // - // * MIC (Mutual insurance company) - // - // * NFP (Not-for-profit corporation) + // * MIC (Mutual + // insurance company) // + // * NFP (Not-for-profit corporation) // // * SA (S.A.) // - // * SAVINGS_COMPANY (Savings company) + // * + // SAVINGS_COMPANY (Savings company) // - // * - // SAVINGS_UNION (Savings union) + // * SAVINGS_UNION (Savings union) // - // * SARL (Société à responsabilité - // limitée) + // * SARL + // (Société à responsabilité limitée) // - // * TRUST (Trust) + // * TRUST (Trust) // - // * ULC (Unlimited liability + // * ULC (Unlimited liability // corporation) // - // * CA_LEGAL_TYPE When ContactType is PERSON, valid values - // include the following: + // * CA_LEGAL_TYPE When ContactType is PERSON, valid values include + // the following: // - // * ABO (Aboriginal Peoples indigenous to - // Canada) - // - // * CCT (Canadian citizen) + // * ABO (Aboriginal Peoples indigenous to Canada) // - // * LGR (Legal Representative - // of a Canadian Citizen or Permanent Resident) + // * CCT (Canadian + // citizen) // - // * RES (Permanent resident - // of Canada) + // * LGR (Legal Representative of a Canadian Citizen or Permanent + // Resident) // - // When ContactType is a value other than PERSON, valid values - // include the following: + // * RES (Permanent resident of Canada) // - // * ASS (Canadian unincorporated association) + // When ContactType is a value + // other than PERSON, valid values include the following: // + // * ASS (Canadian + // unincorporated association) // // * CCO (Canadian corporation) // - // * EDU (Canadian educational institution) - // + // * EDU (Canadian + // educational institution) // // * GOV (Government or government entity in Canada) // - // * HOP (Canadian - // Hospital) - // - // * INB (Indian Band recognized by the Indian Act of Canada) + // * + // HOP (Canadian Hospital) // + // * INB (Indian Band recognized by the Indian Act of + // Canada) // // * LAM (Canadian Library, Archive, or Museum) // - // * MAJ (Her/His Majesty the - // Queen/King) - // - // * OMK (Official mark registered in Canada) + // * MAJ (Her/His Majesty + // the Queen/King) // - // * PLT - // (Canadian Political Party) + // * OMK (Official mark registered in Canada) // - // * PRT (Partnership Registered in Canada) + // * PLT (Canadian + // Political Party) // + // * PRT (Partnership Registered in Canada) // - // * TDM (Trademark registered in Canada) + // * TDM (Trademark + // registered in Canada) // - // * TRD (Canadian Trade Union) + // * TRD (Canadian Trade Union) // - // - // * TRS (Trust established in Canada) + // * TRS (Trust established in + // Canada) // // .es // - // * ES_IDENTIFICATION Specify the - // applicable value: - // - // * For contacts inside Spain: Enter your passport - // ID. + // * ES_IDENTIFICATION Specify the applicable value: // - // * For contacts outside of Spain: Enter the VAT identification - // number for the company. For .es domains, the value of ContactType must be - // PERSON. + // * For contacts + // inside Spain: Enter your passport ID. // - // * ES_IDENTIFICATION_TYPE Valid values include the following: + // * For contacts outside of Spain: Enter + // the VAT identification number for the company. For .es domains, the value of + // ContactType must be PERSON. // + // * ES_IDENTIFICATION_TYPE Valid values include the + // following: // // * DNI_AND_NIF (For Spanish contacts) // - // * NIE (For foreigners with legal - // residence) + // * NIE (For foreigners with + // legal residence) // - // * OTHER (For contacts outside of Spain) + // * OTHER (For contacts outside of Spain) // - // * ES_LEGAL_FORM - // Valid values include the following: - // - // * ASSOCIATION + // * ES_LEGAL_FORM Valid + // values include the following: // - // * - // CENTRAL_GOVERNMENT_BODY + // * ASSOCIATION // - // * CIVIL_SOCIETY + // * CENTRAL_GOVERNMENT_BODY // - // * - // COMMUNITY_OF_OWNERS + // * + // CIVIL_SOCIETY // - // * COMMUNITY_PROPERTY + // * COMMUNITY_OF_OWNERS // - // * CONSULATE + // * COMMUNITY_PROPERTY // + // * CONSULATE // - // * COOPERATIVE + // * + // COOPERATIVE // - // * DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL + // * DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL // - // * + // * // ECONOMIC_INTEREST_GROUP // - // * EMBASSY + // * EMBASSY // - // * - // ENTITY_MANAGING_NATURAL_AREAS + // * ENTITY_MANAGING_NATURAL_AREAS // - // * FARM_PARTNERSHIP + // * + // FARM_PARTNERSHIP // - // * - // FOUNDATION + // * FOUNDATION // - // * GENERAL_AND_LIMITED_PARTNERSHIP + // * GENERAL_AND_LIMITED_PARTNERSHIP // - // * + // * // GENERAL_PARTNERSHIP // - // * INDIVIDUAL + // * INDIVIDUAL // - // * LIMITED_COMPANY + // * LIMITED_COMPANY // - // * - // LOCAL_AUTHORITY + // * LOCAL_AUTHORITY // - // * LOCAL_PUBLIC_ENTITY + // * + // LOCAL_PUBLIC_ENTITY // - // * - // MUTUAL_INSURANCE_COMPANY + // * MUTUAL_INSURANCE_COMPANY // - // * NATIONAL_PUBLIC_ENTITY + // * NATIONAL_PUBLIC_ENTITY // - // * + // * // ORDER_OR_RELIGIOUS_INSTITUTION // - // * OTHERS (Only for contacts outside of - // Spain) + // * OTHERS (Only for contacts outside of Spain) // - // * POLITICAL_PARTY + // * + // POLITICAL_PARTY // - // * PROFESSIONAL_ASSOCIATION + // * PROFESSIONAL_ASSOCIATION // - // * - // PUBLIC_LAW_ASSOCIATION + // * PUBLIC_LAW_ASSOCIATION // - // * PUBLIC_LIMITED_COMPANY + // * + // PUBLIC_LIMITED_COMPANY // - // * - // REGIONAL_GOVERNMENT_BODY + // * REGIONAL_GOVERNMENT_BODY // - // * REGIONAL_PUBLIC_ENTITY + // * REGIONAL_PUBLIC_ENTITY // - // * + // * // SAVINGS_BANK // - // * SPANISH_OFFICE + // * SPANISH_OFFICE // - // * SPORTS_ASSOCIATION + // * SPORTS_ASSOCIATION // - // * - // SPORTS_FEDERATION + // * SPORTS_FEDERATION // - // * SPORTS_LIMITED_COMPANY + // * + // SPORTS_LIMITED_COMPANY // - // * - // TEMPORARY_ALLIANCE_OF_ENTERPRISES + // * TEMPORARY_ALLIANCE_OF_ENTERPRISES // - // * TRADE_UNION + // * TRADE_UNION // - // * + // * // WORKER_OWNED_COMPANY // - // * WORKER_OWNED_LIMITED_COMPANY + // * WORKER_OWNED_LIMITED_COMPANY // // .fi // - // * + // * // BIRTH_DATE_IN_YYYY_MM_DD // - // * FI_BUSINESS_NUMBER - // - // * FI_ID_NUMBER + // * FI_BUSINESS_NUMBER // - // * - // FI_NATIONALITY Valid values include the following: + // * FI_ID_NUMBER // - // * FINNISH + // * FI_NATIONALITY + // Valid values include the following: // - // * - // NOT_FINNISH + // * FINNISH // - // * FI_ORGANIZATION_TYPE Valid values include the following: + // * NOT_FINNISH // + // * + // FI_ORGANIZATION_TYPE Valid values include the following: // // * COMPANY // - // * CORPORATION - // - // * GOVERNMENT + // * + // CORPORATION // - // * INSTITUTION + // * GOVERNMENT // + // * INSTITUTION // // * POLITICAL_PARTY // - // * PUBLIC_COMMUNITY + // * + // PUBLIC_COMMUNITY // - // * TOWNSHIP + // * TOWNSHIP // // .fr // - // * - // BIRTH_CITY - // - // * BIRTH_COUNTRY + // * BIRTH_CITY // - // * BIRTH_DATE_IN_YYYY_MM_DD + // * BIRTH_COUNTRY // - // * - // BIRTH_DEPARTMENT: Specify the INSEE code that corresponds with the department - // where the contact was born. If the contact was born somewhere other than France - // or its overseas departments, specify 99. For more information, including a list - // of departments and the corresponding INSEE numbers, see the Wikipedia entry - // Departments of France (https://en.wikipedia.org/wiki/Departments_of_France). + // * + // BIRTH_DATE_IN_YYYY_MM_DD // + // * BIRTH_DEPARTMENT: Specify the INSEE code that + // corresponds with the department where the contact was born. If the contact was + // born somewhere other than France or its overseas departments, specify 99. For + // more information, including a list of departments and the corresponding INSEE + // numbers, see the Wikipedia entry Departments of France + // (https://en.wikipedia.org/wiki/Departments_of_France). // // * BRAND_NUMBER // // .it // - // * IT_NATIONALITY - // - // * IT_PIN + // * + // IT_NATIONALITY // - // * - // IT_REGISTRANT_ENTITY_TYPE Valid values include the following: + // * IT_PIN // - // * - // FOREIGNERS + // * IT_REGISTRANT_ENTITY_TYPE Valid values include the + // following: // - // * FREELANCE_WORKERS (Freelance workers and professionals) + // * FOREIGNERS // + // * FREELANCE_WORKERS (Freelance workers and + // professionals) // - // * ITALIAN_COMPANIES (Italian companies and one-person companies) + // * ITALIAN_COMPANIES (Italian companies and one-person + // companies) // - // * - // NON_PROFIT_ORGANIZATIONS + // * NON_PROFIT_ORGANIZATIONS // - // * OTHER_SUBJECTS + // * OTHER_SUBJECTS // - // * + // * // PUBLIC_ORGANIZATIONS // // .ru // - // * BIRTH_DATE_IN_YYYY_MM_DD + // * BIRTH_DATE_IN_YYYY_MM_DD // - // * + // * // RU_PASSPORT_DATA // // .se // - // * BIRTH_COUNTRY + // * BIRTH_COUNTRY // - // * SE_ID_NUMBER + // * SE_ID_NUMBER // // .sg // - // * + // * // SG_ID_NUMBER // // .co.uk, .me.uk, and .org.uk // - // * UK_CONTACT_TYPE Valid values + // * UK_CONTACT_TYPE Valid values // include the following: // - // * CRC (UK Corporation by Royal Charter) - // + // * CRC (UK Corporation by Royal Charter) // - // * FCORP (Non-UK Corporation) + // * FCORP (Non-UK + // Corporation) // - // * FIND (Non-UK Individual, representing - // self) + // * FIND (Non-UK Individual, representing self) // - // * FOTHER (Non-UK Entity that does not fit into any other - // category) + // * FOTHER (Non-UK + // Entity that does not fit into any other category) // - // * GOV (UK Government Body) + // * GOV (UK Government Body) // - // * IND (UK Individual - // (representing self)) + // * + // IND (UK Individual (representing self)) // - // * IP (UK Industrial/Provident Registered - // Company) - // - // * LLP (UK Limited Liability Partnership) + // * IP (UK Industrial/Provident + // Registered Company) // - // * LTD (UK - // Limited Company) + // * LLP (UK Limited Liability Partnership) // - // * OTHER (UK Entity that does not fit into any other - // category) + // * LTD (UK Limited + // Company) // - // * PLC (UK Public Limited Company) + // * OTHER (UK Entity that does not fit into any other category) // - // * PTNR (UK - // Partnership) + // * PLC + // (UK Public Limited Company) // - // * RCHAR (UK Registered Charity) + // * PTNR (UK Partnership) // - // * SCH (UK - // School) + // * RCHAR (UK Registered + // Charity) // - // * STAT (UK Statutory Body) + // * SCH (UK School) // - // * STRA (UK Sole Trader) + // * STAT (UK Statutory Body) // + // * STRA (UK Sole + // Trader) // // * UK_COMPANY_NUMBER // diff --git a/service/route53resolver/api_op_CreateResolverEndpoint.go b/service/route53resolver/api_op_CreateResolverEndpoint.go index 5d6c529bdc7..24c49cf0e22 100644 --- a/service/route53resolver/api_op_CreateResolverEndpoint.go +++ b/service/route53resolver/api_op_CreateResolverEndpoint.go @@ -14,11 +14,11 @@ import ( // Creates a Resolver endpoint. There are two types of Resolver endpoints, inbound // and outbound: // -// * An inbound Resolver endpoint forwards DNS queries to the -// DNS service for a VPC from your network. +// * An inbound Resolver endpoint forwards DNS queries to the DNS +// service for a VPC from your network. // -// * An outbound Resolver endpoint -// forwards DNS queries from the DNS service for a VPC to your network. +// * An outbound Resolver endpoint forwards +// DNS queries from the DNS service for a VPC to your network. func (c *Client) CreateResolverEndpoint(ctx context.Context, params *CreateResolverEndpointInput, optFns ...func(*Options)) (*CreateResolverEndpointOutput, error) { if params == nil { params = &CreateResolverEndpointInput{} @@ -45,11 +45,11 @@ type CreateResolverEndpointInput struct { // Specify the applicable value: // - // * INBOUND: Resolver forwards DNS queries to - // the DNS service for a VPC from your network + // * INBOUND: Resolver forwards DNS queries to the + // DNS service for a VPC from your network // - // * OUTBOUND: Resolver forwards - // DNS queries from the DNS service for a VPC to your network + // * OUTBOUND: Resolver forwards DNS + // queries from the DNS service for a VPC to your network // // This member is required. Direction types.ResolverEndpointDirection diff --git a/service/route53resolver/api_op_CreateResolverQueryLogConfig.go b/service/route53resolver/api_op_CreateResolverQueryLogConfig.go index f390c64d5b9..86ef850d3fe 100644 --- a/service/route53resolver/api_op_CreateResolverQueryLogConfig.go +++ b/service/route53resolver/api_op_CreateResolverQueryLogConfig.go @@ -52,16 +52,16 @@ type CreateResolverQueryLogConfigInput struct { // query logs to an S3 bucket, a CloudWatch Logs log group, or a Kinesis Data // Firehose delivery stream. Examples of valid values include the following: // - // * - // S3 bucket: arn:aws:s3:::examplebucket You can optionally append a file prefix to + // * S3 + // bucket: arn:aws:s3:::examplebucket You can optionally append a file prefix to // the end of the ARN. arn:aws:s3:::examplebucket/development/ // - // * CloudWatch - // Logs log group: + // * CloudWatch Logs + // log group: // arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:* // - // - // * Kinesis Data Firehose delivery stream: + // * + // Kinesis Data Firehose delivery stream: // arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name // // This member is required. diff --git a/service/route53resolver/api_op_DeleteResolverEndpoint.go b/service/route53resolver/api_op_DeleteResolverEndpoint.go index 7d98ff7b332..ddcfe6c7817 100644 --- a/service/route53resolver/api_op_DeleteResolverEndpoint.go +++ b/service/route53resolver/api_op_DeleteResolverEndpoint.go @@ -14,12 +14,12 @@ import ( // Deletes a Resolver endpoint. The effect of deleting a Resolver endpoint depends // on whether it's an inbound or an outbound Resolver endpoint: // -// * Inbound: DNS +// * Inbound: DNS // queries from your network are no longer routed to the DNS service for the // specified VPC. // -// * Outbound: DNS queries from a VPC are no longer routed to -// your network. +// * Outbound: DNS queries from a VPC are no longer routed to your +// network. func (c *Client) DeleteResolverEndpoint(ctx context.Context, params *DeleteResolverEndpointInput, optFns ...func(*Options)) (*DeleteResolverEndpointOutput, error) { if params == nil { params = &DeleteResolverEndpointInput{} diff --git a/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go b/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go index 5dc0d2e78fb..acba2c9d554 100644 --- a/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go +++ b/service/route53resolver/api_op_DisassociateResolverQueryLogConfig.go @@ -17,11 +17,11 @@ import ( // logging configuration with other accounts, VPCs can be disassociated from the // configuration in the following ways: // -// * The accounts that you shared the +// * The accounts that you shared the // configuration with can disassociate VPCs from the configuration. // -// * You can -// stop sharing the configuration. +// * You can stop +// sharing the configuration. func (c *Client) DisassociateResolverQueryLogConfig(ctx context.Context, params *DisassociateResolverQueryLogConfigInput, optFns ...func(*Options)) (*DisassociateResolverQueryLogConfigOutput, error) { if params == nil { params = &DisassociateResolverQueryLogConfigInput{} diff --git a/service/route53resolver/api_op_ListResolverQueryLogConfigAssociations.go b/service/route53resolver/api_op_ListResolverQueryLogConfigAssociations.go index 08551d9dac9..a1a111fb259 100644 --- a/service/route53resolver/api_op_ListResolverQueryLogConfigAssociations.go +++ b/service/route53resolver/api_op_ListResolverQueryLogConfigAssociations.go @@ -55,53 +55,52 @@ type ListResolverQueryLogConfigAssociationsInput struct { // specify the NextToken parameter, you must use the same value for SortBy, if any, // as in the previous request. Valid values include the following elements: // - // * + // * // CreationTime: The ID of the query logging association. // - // * Error: If the - // value of Status is FAILED, the value of Error indicates the cause: + // * Error: If the value of + // Status is FAILED, the value of Error indicates the cause: // - // * + // * // DESTINATION_NOT_FOUND: The specified destination (for example, an Amazon S3 // bucket) was deleted. // - // * ACCESS_DENIED: Permissions don't allow sending - // logs to the destination. + // * ACCESS_DENIED: Permissions don't allow sending logs to + // the destination. // - // If Status is a value other than FAILED, ERROR is - // null. + // If Status is a value other than FAILED, ERROR is null. // - // * Id: The ID of the query logging association + // * Id: + // The ID of the query logging association // - // * - // ResolverQueryLogConfigId: The ID of the query logging configuration + // * ResolverQueryLogConfigId: The ID of + // the query logging configuration // - // * - // ResourceId: The ID of the VPC that is associated with the query logging - // configuration + // * ResourceId: The ID of the VPC that is + // associated with the query logging configuration // - // * Status: The current status of the configuration. Valid - // values include the following: + // * Status: The current status of + // the configuration. Valid values include the following: // - // * CREATING: Resolver is creating an - // association between an Amazon VPC and a query logging configuration. + // * CREATING: Resolver is + // creating an association between an Amazon VPC and a query logging + // configuration. // - // * - // CREATED: The association between an Amazon VPC and a query logging configuration - // was successfully created. Resolver is logging queries that originate in the - // specified VPC. + // * CREATED: The association between an Amazon VPC and a query + // logging configuration was successfully created. Resolver is logging queries that + // originate in the specified VPC. // - // * DELETING: Resolver is deleting this query logging - // association. + // * DELETING: Resolver is deleting this query + // logging association. // - // * FAILED: Resolver either couldn't create or couldn't + // * FAILED: Resolver either couldn't create or couldn't // delete the query logging association. Here are two common causes: // - // * - // The specified destination (for example, an Amazon S3 bucket) was deleted. + // * The + // specified destination (for example, an Amazon S3 bucket) was deleted. // - // - // * Permissions don't allow sending logs to the destination. + // * + // Permissions don't allow sending logs to the destination. SortBy *string // If you specified a value for SortBy, the order that you want query logging diff --git a/service/route53resolver/api_op_ListResolverQueryLogConfigs.go b/service/route53resolver/api_op_ListResolverQueryLogConfigs.go index 438935d940a..1e0cbcc210f 100644 --- a/service/route53resolver/api_op_ListResolverQueryLogConfigs.go +++ b/service/route53resolver/api_op_ListResolverQueryLogConfigs.go @@ -54,57 +54,55 @@ type ListResolverQueryLogConfigsInput struct { // specify the NextToken parameter, you must use the same value for SortBy, if any, // as in the previous request. Valid values include the following elements: // - // * - // Arn: The ARN of the query logging configuration + // * Arn: + // The ARN of the query logging configuration // - // * AssociationCount: The - // number of VPCs that are associated with the specified configuration + // * AssociationCount: The number of + // VPCs that are associated with the specified configuration // - // * - // CreationTime: The date and time that Resolver returned when the configuration - // was created + // * CreationTime: The + // date and time that Resolver returned when the configuration was created // - // * CreatorRequestId: The value that was specified for - // CreatorRequestId when the configuration was created + // * + // CreatorRequestId: The value that was specified for CreatorRequestId when the + // configuration was created // - // * DestinationArn: The - // location that logs are sent to + // * DestinationArn: The location that logs are sent + // to // - // * Id: The ID of the configuration + // * Id: The ID of the configuration // - // * - // Name: The name of the configuration + // * Name: The name of the configuration // - // * OwnerId: The AWS account number of - // the account that created the configuration + // * + // OwnerId: The AWS account number of the account that created the configuration // - // * ShareStatus: Whether the - // configuration is shared with other AWS accounts or shared with the current - // account by another AWS account. Sharing is configured through AWS Resource - // Access Manager (AWS RAM). + // * + // ShareStatus: Whether the configuration is shared with other AWS accounts or + // shared with the current account by another AWS account. Sharing is configured + // through AWS Resource Access Manager (AWS RAM). // - // * Status: The current status of the - // configuration. Valid values include the following: + // * Status: The current status of + // the configuration. Valid values include the following: // - // * CREATING: Resolver - // is creating the query logging configuration. + // * CREATING: Resolver is + // creating the query logging configuration. // - // * CREATED: The query - // logging configuration was successfully created. Resolver is logging queries that + // * CREATED: The query logging + // configuration was successfully created. Resolver is logging queries that // originate in the specified VPC. // - // * DELETING: Resolver is deleting this - // query logging configuration. + // * DELETING: Resolver is deleting this query + // logging configuration. // - // * FAILED: Resolver either couldn't create - // or couldn't delete the query logging configuration. Here are two common - // causes: + // * FAILED: Resolver either couldn't create or couldn't + // delete the query logging configuration. Here are two common causes: // - // * The specified destination (for example, an Amazon S3 - // bucket) was deleted. + // * The + // specified destination (for example, an Amazon S3 bucket) was deleted. // - // * Permissions don't allow sending logs to the - // destination. + // * + // Permissions don't allow sending logs to the destination. SortBy *string // If you specified a value for SortBy, the order that you want query logging diff --git a/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go b/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go index b3fd15737ee..42dc6d53be3 100644 --- a/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go +++ b/service/route53resolver/api_op_PutResolverQueryLogConfigPolicy.go @@ -40,16 +40,16 @@ type PutResolverQueryLogConfigPolicyInput struct { // operations that you want the account to be able to perform. You can specify the // following operations in the Actions section of the statement: // - // * + // * // route53resolver:AssociateResolverQueryLogConfig // - // * + // * // route53resolver:DisassociateResolverQueryLogConfig // - // * + // * // route53resolver:ListResolverQueryLogConfigAssociations // - // * + // * // route53resolver:ListResolverQueryLogConfigs // // In the Resource section of the diff --git a/service/route53resolver/api_op_PutResolverRulePolicy.go b/service/route53resolver/api_op_PutResolverRulePolicy.go index 46dc2fe0486..a2973e1ab63 100644 --- a/service/route53resolver/api_op_PutResolverRulePolicy.go +++ b/service/route53resolver/api_op_PutResolverRulePolicy.go @@ -40,18 +40,17 @@ type PutResolverRulePolicyInput struct { // account to be able to perform. You can specify the following operations in the // Actions section of the statement: // - // * route53resolver:GetResolverRule + // * route53resolver:GetResolverRule // - // * + // * // route53resolver:AssociateResolverRule // - // * + // * // route53resolver:DisassociateResolverRule // - // * - // route53resolver:ListResolverRules + // * route53resolver:ListResolverRules // - // * + // * // route53resolver:ListResolverRuleAssociations // // In the Resource section of the diff --git a/service/route53resolver/api_op_TagResource.go b/service/route53resolver/api_op_TagResource.go index 92df8554b7c..70bb65eb92b 100644 --- a/service/route53resolver/api_op_TagResource.go +++ b/service/route53resolver/api_op_TagResource.go @@ -32,28 +32,28 @@ type TagResourceInput struct { // The Amazon Resource Name (ARN) for the resource that you want to add tags to. To // get the ARN for a resource, use the applicable Get or List command: // - // * + // * // GetResolverEndpoint // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) // - // - // * GetResolverRule + // * + // GetResolverRule // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRule.html) // - // - // * GetResolverRuleAssociation + // * + // GetResolverRuleAssociation // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRuleAssociation.html) // - // - // * ListResolverEndpoints + // * + // ListResolverEndpoints // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html) // - // - // * ListResolverRuleAssociations + // * + // ListResolverRuleAssociations // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html) // - // - // * ListResolverRules + // * + // ListResolverRules // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html) // // This member is required. diff --git a/service/route53resolver/api_op_UntagResource.go b/service/route53resolver/api_op_UntagResource.go index 27a471ab8c4..f3b3ff7f244 100644 --- a/service/route53resolver/api_op_UntagResource.go +++ b/service/route53resolver/api_op_UntagResource.go @@ -31,28 +31,28 @@ type UntagResourceInput struct { // The Amazon Resource Name (ARN) for the resource that you want to remove tags // from. To get the ARN for a resource, use the applicable Get or List command: // - // - // * GetResolverEndpoint + // * + // GetResolverEndpoint // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) // - // - // * GetResolverRule + // * + // GetResolverRule // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRule.html) // - // - // * GetResolverRuleAssociation + // * + // GetResolverRuleAssociation // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRuleAssociation.html) // - // - // * ListResolverEndpoints + // * + // ListResolverEndpoints // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html) // - // - // * ListResolverRuleAssociations + // * + // ListResolverRuleAssociations // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html) // - // - // * ListResolverRules + // * + // ListResolverRules // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html) // // This member is required. diff --git a/service/route53resolver/types/types.go b/service/route53resolver/types/types.go index 7510b122e40..3c2ad549e20 100644 --- a/service/route53resolver/types/types.go +++ b/service/route53resolver/types/types.go @@ -35,157 +35,155 @@ type Filter struct { // as CREATOR_REQUEST_ID. Uppercase values for Name are still supported. // ListResolverEndpoints Valid values for Name include the following: // - // * + // * // CreatorRequestId: The value that you specified when you created the Resolver // endpoint. // - // * Direction: Whether you want to return inbound or outbound - // Resolver endpoints. If you specify DIRECTION for Name, specify INBOUND or - // OUTBOUND for Values. + // * Direction: Whether you want to return inbound or outbound Resolver + // endpoints. If you specify DIRECTION for Name, specify INBOUND or OUTBOUND for + // Values. // - // * HostVpcId: The ID of the VPC that inbound DNS - // queries pass through on the way from your network to your VPCs in a region, or - // the VPC that outbound queries pass through on the way from your VPCs to your - // network. In a CreateResolverEndpoint + // * HostVpcId: The ID of the VPC that inbound DNS queries pass through on + // the way from your network to your VPCs in a region, or the VPC that outbound + // queries pass through on the way from your VPCs to your network. In a + // CreateResolverEndpoint // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html) // request, SubnetId indirectly identifies the VPC. In a GetResolverEndpoint // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) // request, the VPC ID for a Resolver endpoint is returned in the HostVPCId // element. // - // * IpAddressCount: The number of IP addresses that you have - // associated with the Resolver endpoint. + // * IpAddressCount: The number of IP addresses that you have associated + // with the Resolver endpoint. // - // * Name: The name of the Resolver - // endpoint. + // * Name: The name of the Resolver endpoint. // - // * SecurityGroupIds: The IDs of the VPC security groups that you - // specified when you created the Resolver endpoint. + // * + // SecurityGroupIds: The IDs of the VPC security groups that you specified when you + // created the Resolver endpoint. // - // * Status: The status of - // the Resolver endpoint. If you specify Status for Name, specify one of the - // following status codes for Values: CREATING, OPERATIONAL, UPDATING, - // AUTO_RECOVERING, ACTION_NEEDED, or DELETING. For more information, see Status in - // ResolverEndpoint + // * Status: The status of the Resolver endpoint. + // If you specify Status for Name, specify one of the following status codes for + // Values: CREATING, OPERATIONAL, UPDATING, AUTO_RECOVERING, ACTION_NEEDED, or + // DELETING. For more information, see Status in ResolverEndpoint // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ResolverEndpoint.html). // // ListResolverRules // Valid values for Name include the following: // - // * CreatorRequestId: The value - // that you specified when you created the Resolver rule. + // * CreatorRequestId: The value that + // you specified when you created the Resolver rule. // - // * DomainName: The - // domain name for which Resolver is forwarding DNS queries to your network. In the - // value that you specify for Values, include a trailing dot (.) after the domain - // name. For example, if the domain name is example.com, specify the following - // value. Note the "." after com: example.com. + // * DomainName: The domain name + // for which Resolver is forwarding DNS queries to your network. In the value that + // you specify for Values, include a trailing dot (.) after the domain name. For + // example, if the domain name is example.com, specify the following value. Note + // the "." after com: example.com. // - // * Name: The name of the - // Resolver rule. - // - // * ResolverEndpointId: The ID of the Resolver endpoint that - // the Resolver rule is associated with. You can filter on the Resolver endpoint - // only for rules that have a value of FORWARD for RuleType. + // * Name: The name of the Resolver rule. // - // * Status: The - // status of the Resolver rule. If you specify Status for Name, specify one of the - // following status codes for Values: COMPLETE, DELETING, UPDATING, or FAILED. + // * + // ResolverEndpointId: The ID of the Resolver endpoint that the Resolver rule is + // associated with. You can filter on the Resolver endpoint only for rules that + // have a value of FORWARD for RuleType. // + // * Status: The status of the Resolver + // rule. If you specify Status for Name, specify one of the following status codes + // for Values: COMPLETE, DELETING, UPDATING, or FAILED. // - // * Type: The type of the Resolver rule. If you specify TYPE for Name, specify - // FORWARD or SYSTEM for Values. + // * Type: The type of the + // Resolver rule. If you specify TYPE for Name, specify FORWARD or SYSTEM for + // Values. // - // ListResolverRuleAssociations Valid values for - // Name include the following: + // ListResolverRuleAssociations Valid values for Name include the + // following: // - // * Name: The name of the Resolver rule - // association. + // * Name: The name of the Resolver rule association. // - // * ResolverRuleId: The ID of the Resolver rule that is - // associated with one or more VPCs. + // * + // ResolverRuleId: The ID of the Resolver rule that is associated with one or more + // VPCs. // - // * Status: The status of the Resolver rule - // association. If you specify Status for Name, specify one of the following status - // codes for Values: CREATING, COMPLETE, DELETING, or FAILED. + // * Status: The status of the Resolver rule association. If you specify + // Status for Name, specify one of the following status codes for Values: CREATING, + // COMPLETE, DELETING, or FAILED. // - // * VPCId: The ID - // of the VPC that the Resolver rule is associated - // with. + // * VPCId: The ID of the VPC that the Resolver + // rule is associated with. // - // ListResolverQueryLogConfigs Valid values for Name include the - // following: + // ListResolverQueryLogConfigs Valid values for Name + // include the following: // - // * Arn: The ARN for the query logging configuration. + // * Arn: The ARN for the query logging configuration. // - // * + // * // AssociationCount: The number of VPCs that are associated with the query logging // configuration. // - // * CreationTime: The date and time that the query logging + // * CreationTime: The date and time that the query logging // configuration was created, in Unix time format and Coordinated Universal Time // (UTC). // - // * CreatorRequestId: A unique string that identifies the request that + // * CreatorRequestId: A unique string that identifies the request that // created the query logging configuration. // - // * Destination: The AWS service - // that you want to forward query logs to. Valid values include the following: - // + // * Destination: The AWS service that + // you want to forward query logs to. Valid values include the following: // // * S3 // - // * CloudWatchLogs - // - // * KinesisFirehose + // * + // CloudWatchLogs // - // * DestinationArn: - // The ARN of the location that Resolver is sending query logs to. This value can - // be the ARN for an S3 bucket, a CloudWatch Logs log group, or a Kinesis Data - // Firehose delivery stream. + // * KinesisFirehose // - // * Id: The ID of the query logging configuration + // * DestinationArn: The ARN of the location + // that Resolver is sending query logs to. This value can be the ARN for an S3 + // bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery + // stream. // + // * Id: The ID of the query logging configuration // - // * Name: The name of the query logging configuration + // * Name: The name of + // the query logging configuration // - // * OwnerId: The AWS - // account ID for the account that created the query logging configuration. + // * OwnerId: The AWS account ID for the account + // that created the query logging configuration. // - // * - // ShareStatus: An indication of whether the query logging configuration is shared - // with other AWS accounts, or was shared with the current account by another AWS - // account. Valid values include: NOT_SHARED, SHARED_WITH_ME, or SHARED_BY_ME. + // * ShareStatus: An indication of + // whether the query logging configuration is shared with other AWS accounts, or + // was shared with the current account by another AWS account. Valid values + // include: NOT_SHARED, SHARED_WITH_ME, or SHARED_BY_ME. // - // - // * Status: The status of the query logging configuration. If you specify Status - // for Name, specify the applicable status code for Values: CREATING, CREATED, - // DELETING, or FAILED. For more information, see Status + // * Status: The status of + // the query logging configuration. If you specify Status for Name, specify the + // applicable status code for Values: CREATING, CREATED, DELETING, or FAILED. For + // more information, see Status // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ResolverQueryLogConfig.html#Route53Resolver-Type-route53resolver_ResolverQueryLogConfig-Status). // // ListResolverQueryLogConfigAssociations // Valid values for Name include the following: // - // * CreationTime: The date and - // time that the VPC was associated with the query logging configuration, in Unix - // time format and Coordinated Universal Time (UTC). - // - // * Error: If the value of - // Status is FAILED, specify the cause: DESTINATION_NOT_FOUND or ACCESS_DENIED. + // * CreationTime: The date and time + // that the VPC was associated with the query logging configuration, in Unix time + // format and Coordinated Universal Time (UTC). // + // * Error: If the value of Status is + // FAILED, specify the cause: DESTINATION_NOT_FOUND or ACCESS_DENIED. // - // * Id: The ID of the query logging association. + // * Id: The ID + // of the query logging association. // - // * ResolverQueryLogConfigId: - // The ID of the query logging configuration that a VPC is associated with. + // * ResolverQueryLogConfigId: The ID of the + // query logging configuration that a VPC is associated with. // - // * - // ResourceId: The ID of the Amazon VPC that is associated with the query logging - // configuration. + // * ResourceId: The ID + // of the Amazon VPC that is associated with the query logging configuration. // - // * Status: The status of the query logging association. If - // you specify Status for Name, specify the applicable status code for Values: - // CREATING, CREATED, DELETING, or FAILED. For more information, see Status + // * + // Status: The status of the query logging association. If you specify Status for + // Name, specify the applicable status code for Values: CREATING, CREATED, + // DELETING, or FAILED. For more information, see Status // (https://docs.aws.amazon.com/API_route53resolver_ResolverQueryLogConfigAssociation.html#Route53Resolver-Type-route53resolver_ResolverQueryLogConfigAssociation-Status). Name *string @@ -291,9 +289,9 @@ type ResolverEndpoint struct { // Indicates whether the Resolver endpoint allows inbound or outbound DNS // queries: // - // * INBOUND: allows DNS queries to your VPC from your network + // * INBOUND: allows DNS queries to your VPC from your network // - // * + // * // OUTBOUND: allows DNS queries from your VPC to your network Direction ResolverEndpointDirection @@ -326,41 +324,41 @@ type ResolverEndpoint struct { // A code that specifies the current status of the Resolver endpoint. Valid values // include the following: // - // * CREATING: Resolver is creating and configuring one - // or more Amazon VPC network interfaces for this endpoint. + // * CREATING: Resolver is creating and configuring one or + // more Amazon VPC network interfaces for this endpoint. // - // * OPERATIONAL: The - // Amazon VPC network interfaces for this endpoint are correctly configured and - // able to pass inbound or outbound DNS queries between your network and - // Resolver. + // * OPERATIONAL: The Amazon + // VPC network interfaces for this endpoint are correctly configured and able to + // pass inbound or outbound DNS queries between your network and Resolver. // - // * UPDATING: Resolver is associating or disassociating one or more - // network interfaces with this endpoint. + // * + // UPDATING: Resolver is associating or disassociating one or more network + // interfaces with this endpoint. // - // * AUTO_RECOVERING: Resolver is - // trying to recover one or more of the network interfaces that are associated with - // this endpoint. During the recovery process, the endpoint functions with limited - // capacity because of the limit on the number of DNS queries per IP address (per - // network interface). For the current limit, see Limits on Route 53 Resolver + // * AUTO_RECOVERING: Resolver is trying to recover + // one or more of the network interfaces that are associated with this endpoint. + // During the recovery process, the endpoint functions with limited capacity + // because of the limit on the number of DNS queries per IP address (per network + // interface). For the current limit, see Limits on Route 53 Resolver // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities-resolver). // - // - // * ACTION_NEEDED: This endpoint is unhealthy, and Resolver can't automatically + // * + // ACTION_NEEDED: This endpoint is unhealthy, and Resolver can't automatically // recover it. To resolve the problem, we recommend that you check each IP address // that you associated with the endpoint. For each IP address that isn't available, // add another IP address and then delete the IP address that isn't available. (An // endpoint must always include at least two IP addresses.) A status of // ACTION_NEEDED can have a variety of causes. Here are two common causes: // + // * One + // or more of the network interfaces that are associated with the endpoint were + // deleted using Amazon VPC. // - // * One or more of the network interfaces that are associated with the endpoint - // were deleted using Amazon VPC. + // * The network interface couldn't be created for some + // reason that's outside the control of Resolver. // - // * The network interface couldn't be - // created for some reason that's outside the control of Resolver. - // - // * DELETING: - // Resolver is deleting this endpoint and the associated network interfaces. + // * DELETING: Resolver is deleting + // this endpoint and the associated network interfaces. Status ResolverEndpointStatus // A detailed description of the status of the Resolver endpoint. @@ -415,24 +413,24 @@ type ResolverQueryLogConfig struct { // The status of the specified query logging configuration. Valid values include // the following: // - // * CREATING: Resolver is creating the query logging + // * CREATING: Resolver is creating the query logging // configuration. // - // * CREATED: The query logging configuration was successfully + // * CREATED: The query logging configuration was successfully // created. Resolver is logging queries that originate in the specified VPC. // - // * + // * // DELETING: Resolver is deleting this query logging configuration. // - // * FAILED: + // * FAILED: // Resolver can't deliver logs to the location that is specified in the query // logging configuration. Here are two common causes: // - // * The specified - // destination (for example, an Amazon S3 bucket) was deleted. + // * The specified destination + // (for example, an Amazon S3 bucket) was deleted. // - // * - // Permissions don't allow sending logs to the destination. + // * Permissions don't allow + // sending logs to the destination. Status ResolverQueryLogConfigStatus } @@ -454,15 +452,15 @@ type ResolverQueryLogConfigAssociation struct { // If the value of Status is FAILED, the value of Error indicates the cause: // - // * + // * // DESTINATION_NOT_FOUND: The specified destination (for example, an Amazon S3 // bucket) was deleted. // - // * ACCESS_DENIED: Permissions don't allow sending logs - // to the destination. + // * ACCESS_DENIED: Permissions don't allow sending logs to + // the destination. // - // If the value of Status is a value other than FAILED, Error - // is null. + // If the value of Status is a value other than FAILED, Error is + // null. Error ResolverQueryLogConfigAssociationError // Contains additional information about the error. If the value or Error is null, @@ -482,19 +480,18 @@ type ResolverQueryLogConfigAssociation struct { // The status of the specified query logging association. Valid values include the // following: // - // * CREATING: Resolver is creating an association between an - // Amazon VPC and a query logging configuration. + // * CREATING: Resolver is creating an association between an Amazon + // VPC and a query logging configuration. // - // * CREATED: The association - // between an Amazon VPC and a query logging configuration was successfully - // created. Resolver is logging queries that originate in the specified VPC. + // * CREATED: The association between an + // Amazon VPC and a query logging configuration was successfully created. Resolver + // is logging queries that originate in the specified VPC. // - // * - // DELETING: Resolver is deleting this query logging association. + // * DELETING: Resolver is + // deleting this query logging association. // - // * FAILED: - // Resolver either couldn't create or couldn't delete the query logging - // association. + // * FAILED: Resolver either couldn't + // create or couldn't delete the query logging association. Status ResolverQueryLogConfigAssociationStatus } diff --git a/service/s3/api_op_AbortMultipartUpload.go b/service/s3/api_op_AbortMultipartUpload.go index 54542a7a4c9..bb05eae957f 100644 --- a/service/s3/api_op_AbortMultipartUpload.go +++ b/service/s3/api_op_AbortMultipartUpload.go @@ -26,23 +26,23 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The // following operations are related to AbortMultipartUpload: // -// * +// * // CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * UploadPart +// * +// UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // ListMultipartUploads // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { diff --git a/service/s3/api_op_CompleteMultipartUpload.go b/service/s3/api_op_CompleteMultipartUpload.go index fe73e513ef5..7eacaf65fa0 100644 --- a/service/s3/api_op_CompleteMultipartUpload.go +++ b/service/s3/api_op_CompleteMultipartUpload.go @@ -39,61 +39,60 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // CompleteMultipartUpload has the following special errors: // -// * Error code: +// * Error code: // EntityTooSmall // -// * Description: Your proposed upload is smaller than the -// minimum allowed object size. Each part must be at least 5 MB in size, except the -// last part. +// * Description: Your proposed upload is smaller than the minimum +// allowed object size. Each part must be at least 5 MB in size, except the last +// part. // -// * 400 Bad Request +// * 400 Bad Request // -// * Error code: InvalidPart +// * Error code: InvalidPart // -// * -// Description: One or more of the specified parts could not be found. The part -// might not have been uploaded, or the specified entity tag might not have matched -// the part's entity tag. +// * Description: One or more +// of the specified parts could not be found. The part might not have been +// uploaded, or the specified entity tag might not have matched the part's entity +// tag. // -// * 400 Bad Request +// * 400 Bad Request // -// * Error code: -// InvalidPartOrder +// * Error code: InvalidPartOrder // -// * Description: The list of parts was not in ascending -// order. The parts list must be specified in order by part number. +// * Description: The list +// of parts was not in ascending order. The parts list must be specified in order +// by part number. // -// * 400 -// Bad Request +// * 400 Bad Request // -// * Error code: NoSuchUpload +// * Error code: NoSuchUpload // -// * Description: The -// specified multipart upload does not exist. The upload ID might be invalid, or -// the multipart upload might have been aborted or completed. +// * Description: +// The specified multipart upload does not exist. The upload ID might be invalid, +// or the multipart upload might have been aborted or completed. // -// * 404 Not +// * 404 Not // Found // // The following operations are related to CompleteMultipartUpload: // -// * +// * // CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * UploadPart +// * +// UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // ListMultipartUploads // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { diff --git a/service/s3/api_op_CopyObject.go b/service/s3/api_op_CopyObject.go index d6cc537747f..4ed3f8dea02 100644 --- a/service/s3/api_op_CopyObject.go +++ b/service/s3/api_op_CopyObject.go @@ -61,37 +61,36 @@ import ( // such as whether the Etag matches or whether the object was modified before or // after a specified date, use the following request parameters: // -// * +// * // x-amz-copy-source-if-match // -// * x-amz-copy-source-if-none-match +// * x-amz-copy-source-if-none-match // -// * +// * // x-amz-copy-source-if-unmodified-since // -// * -// x-amz-copy-source-if-modified-since +// * x-amz-copy-source-if-modified-since // -// If both the x-amz-copy-source-if-match and -// x-amz-copy-source-if-unmodified-since headers are present in the request and -// evaluate as follows, Amazon S3 returns 200 OK and copies the data: +// If +// both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: // -// * -// x-amz-copy-source-if-match condition evaluates to true +// * x-amz-copy-source-if-match condition evaluates to +// true // -// * -// x-amz-copy-source-if-unmodified-since condition evaluates to false +// * x-amz-copy-source-if-unmodified-since condition evaluates to false // -// If both the -// x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers -// are present in the request and evaluate as follows, Amazon S3 returns the 412 -// Precondition Failed response code: +// If +// both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: // -// * x-amz-copy-source-if-none-match +// * x-amz-copy-source-if-none-match // condition evaluates to false // -// * x-amz-copy-source-if-modified-since -// condition evaluates to true +// * x-amz-copy-source-if-modified-since condition +// evaluates to true // // All headers with the x-amz- prefix, including // x-amz-copy-source, must be signed. Encryption The source object that you are @@ -137,10 +136,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). The // following operations are related to CopyObject: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -189,13 +188,13 @@ type CopyObjectInput struct { // through an access point // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): // - // * For + // * For // objects not accessed through an access point, specify the name of the source // bucket and the key of the source object, separated by a slash (/). For example, // to copy the object reports/january.pdf from the bucket awsexamplebucket, use // awsexamplebucket/reports/january.pdf. The value must be URL encoded. // - // * For + // * For // objects accessed through access points, specify the Amazon Resource Name (ARN) // of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/. For example, to copy the object diff --git a/service/s3/api_op_CreateBucket.go b/service/s3/api_op_CreateBucket.go index 06f9c886cd0..04ce4df3efd 100644 --- a/service/s3/api_op_CreateBucket.go +++ b/service/s3/api_op_CreateBucket.go @@ -38,14 +38,14 @@ import ( // or groups that should be granted specific permissions on the bucket. There are // two ways to grant the appropriate permissions using the request headers. // -// * +// * // Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. For more information, see Canned ACL // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). // -// -// * Specify access permissions explicitly using the x-amz-grant-read, +// * +// Specify access permissions explicitly using the x-amz-grant-read, // x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and // x-amz-grant-full-control headers. These headers map to the set of permissions // Amazon S3 supports in an ACL. For more information, see Access control list @@ -53,55 +53,52 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify // each grantee as a type=value pair, where the type is one of the following: // +// * id +// – if the value specified is the canonical user ID of an AWS account // -// * id – if the value specified is the canonical user ID of an AWS account -// -// -// * uri – if you are granting permissions to a predefined group +// * uri – if +// you are granting permissions to a predefined group // -// * -// emailAddress – if the value specified is the email address of an AWS account -// Using email addresses to specify a grantee is only supported in the following -// AWS Regions: +// * emailAddress – if the +// value specified is the email address of an AWS account Using email addresses to +// specify a grantee is only supported in the following AWS Regions: // -// * US East (N. Virginia) +// * US East (N. +// Virginia) // -// * US West (N. -// California) +// * US West (N. California) // -// * US West (Oregon) +// * US West (Oregon) // -// * Asia Pacific +// * Asia Pacific // (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific (Sydney) // -// * Asia Pacific -// (Tokyo) +// * Asia Pacific (Tokyo) // -// * Europe (Ireland) +// * Europe +// (Ireland) // -// * South America (São -// Paulo) +// * South America (São Paulo) // -// For a list of all the Amazon S3 supported Regions and endpoints, -// see Regions and Endpoints +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. // -// For example, the following x-amz-grant-read header -// grants the AWS accounts identified by account IDs permissions to read object -// data and its metadata: x-amz-grant-read: id="11112222333", -// id="444455556666" +// For example, the following x-amz-grant-read header grants +// the AWS accounts identified by account IDs permissions to read object data and +// its metadata: x-amz-grant-read: id="11112222333", id="444455556666" // -// You can use either a canned ACL or specify access permissions -// explicitly. You cannot do both. The following operations are related to -// CreateBucket: +// You can use +// either a canned ACL or specify access permissions explicitly. You cannot do +// both. The following operations are related to CreateBucket: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { diff --git a/service/s3/api_op_CreateMultipartUpload.go b/service/s3/api_op_CreateMultipartUpload.go index dd46243ff9a..0d528f55051 100644 --- a/service/s3/api_op_CreateMultipartUpload.go +++ b/service/s3/api_op_CreateMultipartUpload.go @@ -62,15 +62,15 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). // Access Permissions When copying an object, you can optionally specify the // accounts or groups that should be granted specific permissions on the new -// object. There are two ways to grant the permissions using the request headers: +// object. There are two ways to grant the permissions using the request +// headers: // -// -// * Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL +// * Specify a canned ACL with the x-amz-acl request header. For more +// information, see Canned ACL // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). // -// -// * Specify access permissions explicitly with the x-amz-grant-read, +// * +// Specify access permissions explicitly with the x-amz-grant-read, // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. These parameters map to the set of permissions that Amazon S3 supports // in an ACL. For more information, see Access Control List (ACL) Overview @@ -85,20 +85,20 @@ import ( // option you use depends on whether you want to use AWS managed encryption keys or // provide your own encryption key. // -// * Use encryption keys managed by Amazon S3 -// or customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – -// If you want AWS to manage the keys used to encrypt data, specify the following +// * Use encryption keys managed by Amazon S3 or +// customer master keys (CMKs) stored in AWS Key Management Service (AWS KMS) – If +// you want AWS to manage the keys used to encrypt data, specify the following // headers in the request. // -// * x-amz-server-side-encryption +// * x-amz-server-side-encryption // -// * +// * // x-amz-server-side-encryption-aws-kms-key-id // -// * +// * // x-amz-server-side-encryption-context // -// If you specify +// If you specify // x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS managed CMK // in AWS KMS to protect the data. All GET and PUT requests for an object protected @@ -107,20 +107,20 @@ import ( // see Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // -// -// * Use customer-provided encryption keys – If you want to manage your own +// * +// Use customer-provided encryption keys – If you want to manage your own // encryption keys, provide all the following headers in the request. // -// * +// * // x-amz-server-side-encryption-customer-algorithm // -// * +// * // x-amz-server-side-encryption-customer-key // -// * +// * // x-amz-server-side-encryption-customer-key-MD5 // -// For more information about +// For more information about // server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting // Data Using Server-Side Encryption with CMKs stored in AWS KMS // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). @@ -136,13 +136,13 @@ import ( // this operation, you can grant access permissions using one of the following two // methods: // -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of // predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of // grantees and permissions. For more information, see Canned ACL // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). // -// -// * Specify access permissions explicitly — To explicitly grant access permissions +// * +// Specify access permissions explicitly — To explicitly grant access permissions // to specific AWS accounts or groups, use the following headers. Each header maps // to specific permissions that Amazon S3 supports in an ACL. For more information, // see Access Control List (ACL) Overview @@ -150,81 +150,77 @@ import ( // header, you specify a list of grantees who get the specific permission. To grant // permissions explicitly, use: // -// * x-amz-grant-read +// * x-amz-grant-read // -// * -// x-amz-grant-write +// * x-amz-grant-write // -// * x-amz-grant-read-acp +// * +// x-amz-grant-read-acp // -// * -// x-amz-grant-write-acp +// * x-amz-grant-write-acp // -// * x-amz-grant-full-control +// * x-amz-grant-full-control // -// You specify each -// grantee as a type=value pair, where the type is one of the following: -// -// * -// id – if the value specified is the canonical user ID of an AWS account +// You +// specify each grantee as a type=value pair, where the type is one of the +// following: // +// * id – if the value specified is the canonical user ID of an AWS +// account // // * uri – if you are granting permissions to a predefined group // -// * +// * // emailAddress – if the value specified is the email address of an AWS account // Using email addresses to specify a grantee is only supported in the following // AWS Regions: // -// * US East (N. Virginia) +// * US East (N. Virginia) // -// * US West (N. -// California) +// * US West (N. California) // -// * US West (Oregon) +// * US West +// (Oregon) // -// * Asia Pacific -// (Singapore) +// * Asia Pacific (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific (Sydney) // -// * Asia Pacific +// * Asia Pacific // (Tokyo) // -// * Europe (Ireland) +// * Europe (Ireland) // -// * South America (São -// Paulo) +// * South America (São Paulo) // -// For a list of all the Amazon S3 supported Regions and endpoints, -// see Regions and Endpoints +// For a list of all the +// Amazon S3 supported Regions and endpoints, see Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. // -// For example, the following x-amz-grant-read header -// grants the AWS accounts identified by account IDs permissions to read object -// data and its metadata: x-amz-grant-read: id="11112222333", -// id="444455556666" +// For example, the following x-amz-grant-read header grants +// the AWS accounts identified by account IDs permissions to read object data and +// its metadata: x-amz-grant-read: id="11112222333", id="444455556666" // -// The following operations are related to -// CreateMultipartUpload: +// The +// following operations are related to CreateMultipartUpload: // -// * UploadPart +// * UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * AbortMultipartUpload +// * +// AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // ListMultipartUploads // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { diff --git a/service/s3/api_op_DeleteBucket.go b/service/s3/api_op_DeleteBucket.go index 3cd953a7012..022deddc19c 100644 --- a/service/s3/api_op_DeleteBucket.go +++ b/service/s3/api_op_DeleteBucket.go @@ -14,10 +14,10 @@ import ( // markers) in the bucket must be deleted before the bucket itself can be deleted. // Related Resources // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { diff --git a/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index dcb2ccf1b7e..46539166f96 100644 --- a/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -24,16 +24,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). // The following operations are related to DeleteBucketAnalyticsConfiguration: // -// -// * GetBucketAnalyticsConfiguration +// * +// GetBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // -// -// * ListBucketAnalyticsConfigurations +// * +// ListBucketAnalyticsConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) // -// -// * PutBucketAnalyticsConfiguration +// * +// PutBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketCors.go b/service/s3/api_op_DeleteBucketCors.go index 586eedc2111..cb00ce1e156 100644 --- a/service/s3/api_op_DeleteBucketCors.go +++ b/service/s3/api_op_DeleteBucketCors.go @@ -17,10 +17,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon Simple // Storage Service Developer Guide. Related Resources: // -// * PutBucketCors +// * PutBucketCors // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // -// * +// * // RESTOPTIONSobject // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { diff --git a/service/s3/api_op_DeleteBucketEncryption.go b/service/s3/api_op_DeleteBucketEncryption.go index 9f568c25bba..ef640ab47a4 100644 --- a/service/s3/api_op_DeleteBucketEncryption.go +++ b/service/s3/api_op_DeleteBucketEncryption.go @@ -24,12 +24,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the // Amazon Simple Storage Service Developer Guide. Related Resources // -// * +// * // PutBucketEncryption // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) // -// -// * GetBucketEncryption +// * +// GetBucketEncryption // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 189a41b0846..b123e557601 100644 --- a/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -23,16 +23,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). // Operations related to DeleteBucketInventoryConfiguration include: // -// * +// * // GetBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // -// -// * PutBucketInventoryConfiguration +// * +// PutBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) // -// -// * ListBucketInventoryConfigurations +// * +// ListBucketInventoryConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketLifecycle.go b/service/s3/api_op_DeleteBucketLifecycle.go index 9c6b6ac28ef..5ef4cb180b9 100644 --- a/service/s3/api_op_DeleteBucketLifecycle.go +++ b/service/s3/api_op_DeleteBucketLifecycle.go @@ -23,11 +23,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). // Related actions include: // -// * PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // -// -// * GetBucketLifecycleConfiguration +// * +// GetBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/service/s3/api_op_DeleteBucketMetricsConfiguration.go index 9d37dc32772..8bfa685dff1 100644 --- a/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -25,20 +25,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). // The following operations are related to DeleteBucketMetricsConfiguration: // -// * +// * // GetBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // -// -// * PutBucketMetricsConfiguration +// * +// PutBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// -// * ListBucketMetricsConfigurations +// * +// ListBucketMetricsConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // -// -// * Monitoring Metrics with Amazon CloudWatch +// * +// Monitoring Metrics with Amazon CloudWatch // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketOwnershipControls.go b/service/s3/api_op_DeleteBucketOwnershipControls.go index 603c5d7ef94..78b98132c0a 100644 --- a/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -18,10 +18,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // The following operations are related to DeleteBucketOwnershipControls: // -// * +// * // GetBucketOwnershipControls // -// * PutBucketOwnershipControls +// * PutBucketOwnershipControls func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { if params == nil { params = &DeleteBucketOwnershipControlsInput{} diff --git a/service/s3/api_op_DeleteBucketPolicy.go b/service/s3/api_op_DeleteBucketPolicy.go index 8cb36c11b0d..ad0e3dd1fd4 100644 --- a/service/s3/api_op_DeleteBucketPolicy.go +++ b/service/s3/api_op_DeleteBucketPolicy.go @@ -25,10 +25,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The // following operations are related to DeleteBucketPolicy // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { diff --git a/service/s3/api_op_DeleteBucketReplication.go b/service/s3/api_op_DeleteBucketReplication.go index 05cfddacca0..9917bd2d534 100644 --- a/service/s3/api_op_DeleteBucketReplication.go +++ b/service/s3/api_op_DeleteBucketReplication.go @@ -24,11 +24,11 @@ import ( // S3 Developer Guide. The following operations are related to // DeleteBucketReplication: // -// * PutBucketReplication +// * PutBucketReplication // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) // -// -// * GetBucketReplication +// * +// GetBucketReplication // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketTagging.go b/service/s3/api_op_DeleteBucketTagging.go index fc7fb960cd0..59c2eff94c6 100644 --- a/service/s3/api_op_DeleteBucketTagging.go +++ b/service/s3/api_op_DeleteBucketTagging.go @@ -15,11 +15,11 @@ import ( // owner has this permission and can grant this permission to others. The following // operations are related to DeleteBucketTagging: // -// * GetBucketTagging +// * GetBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) // -// -// * PutBucketTagging +// * +// PutBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteBucketWebsite.go b/service/s3/api_op_DeleteBucketWebsite.go index 1e51cad3c5e..29d6d0af41e 100644 --- a/service/s3/api_op_DeleteBucketWebsite.go +++ b/service/s3/api_op_DeleteBucketWebsite.go @@ -24,11 +24,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). The // following operations are related to DeleteBucketWebsite: // -// * GetBucketWebsite +// * GetBucketWebsite // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) // -// -// * PutBucketWebsite +// * +// PutBucketWebsite // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteObject.go b/service/s3/api_op_DeleteObject.go index 83c54cbd22d..1ed65a40b80 100644 --- a/service/s3/api_op_DeleteObject.go +++ b/service/s3/api_op_DeleteObject.go @@ -32,7 +32,7 @@ import ( // the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. The following operation is related to DeleteObject: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteObjectTagging.go b/service/s3/api_op_DeleteObjectTagging.go index 6db18301357..8929d51d40f 100644 --- a/service/s3/api_op_DeleteObjectTagging.go +++ b/service/s3/api_op_DeleteObjectTagging.go @@ -19,11 +19,11 @@ import ( // s3:DeleteObjectVersionTagging action. The following operations are related to // DeleteBucketMetricsConfiguration: // -// * PutObjectTagging +// * PutObjectTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // -// -// * GetObjectTagging +// * +// GetObjectTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { if params == nil { diff --git a/service/s3/api_op_DeleteObjects.go b/service/s3/api_op_DeleteObjects.go index 03ee1c94814..c9333809653 100644 --- a/service/s3/api_op_DeleteObjects.go +++ b/service/s3/api_op_DeleteObjects.go @@ -38,22 +38,22 @@ import ( // not been altered in transit. The following operations are related to // DeleteObjects: // -// * CreateMultipartUpload +// * CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * UploadPart +// * +// UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { diff --git a/service/s3/api_op_DeletePublicAccessBlock.go b/service/s3/api_op_DeletePublicAccessBlock.go index f87fd177cd1..f7d51d30080 100644 --- a/service/s3/api_op_DeletePublicAccessBlock.go +++ b/service/s3/api_op_DeletePublicAccessBlock.go @@ -19,20 +19,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The // following operations are related to DeletePublicAccessBlock: // -// * Using Amazon -// S3 Block Public Access +// * Using Amazon S3 +// Block Public Access // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // -// -// * GetPublicAccessBlock +// * +// GetPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// -// * PutPublicAccessBlock +// * +// PutPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) // -// -// * GetBucketPolicyStatus +// * +// GetBucketPolicyStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketAccelerateConfiguration.go b/service/s3/api_op_GetBucketAccelerateConfiguration.go index 1c29f786676..208a1d9df33 100644 --- a/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -33,7 +33,7 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in // the Amazon Simple Storage Service Developer Guide. Related Resources // -// * +// * // PutBucketAccelerateConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { diff --git a/service/s3/api_op_GetBucketAcl.go b/service/s3/api_op_GetBucketAcl.go index fe17b117b83..78aa19fcd89 100644 --- a/service/s3/api_op_GetBucketAcl.go +++ b/service/s3/api_op_GetBucketAcl.go @@ -17,7 +17,7 @@ import ( // granted to the anonymous user, you can return the ACL of the bucket without // using an authorization header. Related Resources // -// * ListObjects +// * ListObjects // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/service/s3/api_op_GetBucketAnalyticsConfiguration.go index ede401c4b37..c0ede2518c2 100644 --- a/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -25,16 +25,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) // in the Amazon Simple Storage Service Developer Guide. Related Resources // -// * +// * // DeleteBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // -// -// * ListBucketAnalyticsConfigurations +// * +// ListBucketAnalyticsConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) // -// -// * PutBucketAnalyticsConfiguration +// * +// PutBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketCors.go b/service/s3/api_op_GetBucketCors.go index e2c3e4fa369..b04dc6f16a9 100644 --- a/service/s3/api_op_GetBucketCors.go +++ b/service/s3/api_op_GetBucketCors.go @@ -18,10 +18,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). The following // operations are related to GetBucketCors: // -// * PutBucketCors +// * PutBucketCors // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // -// * +// * // DeleteBucketCors // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { diff --git a/service/s3/api_op_GetBucketEncryption.go b/service/s3/api_op_GetBucketEncryption.go index b82818a737f..6a427ecbfdc 100644 --- a/service/s3/api_op_GetBucketEncryption.go +++ b/service/s3/api_op_GetBucketEncryption.go @@ -25,12 +25,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The // following operations are related to GetBucketEncryption: // -// * -// PutBucketEncryption +// * PutBucketEncryption // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) // -// -// * DeleteBucketEncryption +// * +// DeleteBucketEncryption // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketInventoryConfiguration.go b/service/s3/api_op_GetBucketInventoryConfiguration.go index f176399dd79..173715f0d61 100644 --- a/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -23,16 +23,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). The // following operations are related to GetBucketInventoryConfiguration: // -// * +// * // DeleteBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// -// * ListBucketInventoryConfigurations +// * +// ListBucketInventoryConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) // -// -// * PutBucketInventoryConfiguration +// * +// PutBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketLifecycleConfiguration.go b/service/s3/api_op_GetBucketLifecycleConfiguration.go index ccb74ff5018..1462a1712a9 100644 --- a/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -32,29 +32,29 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // GetBucketLifecycleConfiguration has the following special error: // -// * Error -// code: NoSuchLifecycleConfiguration +// * Error code: +// NoSuchLifecycleConfiguration // -// * Description: The lifecycle -// configuration does not exist. +// * Description: The lifecycle configuration does +// not exist. // -// * HTTP Status Code: 404 Not Found +// * HTTP Status Code: 404 Not Found // -// -// * SOAP Fault Code Prefix: Client +// * SOAP Fault Code Prefix: +// Client // // The following operations are related to // GetBucketLifecycleConfiguration: // -// * GetBucketLifecycle +// * GetBucketLifecycle // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) // -// -// * PutBucketLifecycle +// * +// PutBucketLifecycle // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) // -// -// * DeleteBucketLifecycle +// * +// DeleteBucketLifecycle // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketLocation.go b/service/s3/api_op_GetBucketLocation.go index 7769d697cbd..9d0bd580d41 100644 --- a/service/s3/api_op_GetBucketLocation.go +++ b/service/s3/api_op_GetBucketLocation.go @@ -18,10 +18,10 @@ import ( // this implementation of the operation, you must be the bucket owner. The // following operations are related to GetBucketLocation: // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * +// * // CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { diff --git a/service/s3/api_op_GetBucketLogging.go b/service/s3/api_op_GetBucketLogging.go index 0f30f400e13..6d1c8f29951 100644 --- a/service/s3/api_op_GetBucketLogging.go +++ b/service/s3/api_op_GetBucketLogging.go @@ -15,10 +15,10 @@ import ( // and modify that status. To use GET, you must be the bucket owner. The following // operations are related to GetBucketLogging: // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // PutBucketLogging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { diff --git a/service/s3/api_op_GetBucketMetricsConfiguration.go b/service/s3/api_op_GetBucketMetricsConfiguration.go index b1f4396a9fb..b57da06786b 100644 --- a/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -26,20 +26,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). // The following operations are related to GetBucketMetricsConfiguration: // -// * +// * // PutBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// -// * DeleteBucketMetricsConfiguration +// * +// DeleteBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // -// -// * ListBucketMetricsConfigurations +// * +// ListBucketMetricsConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // -// -// * Monitoring Metrics with Amazon CloudWatch +// * +// Monitoring Metrics with Amazon CloudWatch // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketNotificationConfiguration.go b/service/s3/api_op_GetBucketNotificationConfiguration.go index ba84f21ffc2..1a1462f2724 100644 --- a/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -24,7 +24,7 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The // following operation is related to GetBucketNotification: // -// * +// * // PutBucketNotification // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { diff --git a/service/s3/api_op_GetBucketOwnershipControls.go b/service/s3/api_op_GetBucketOwnershipControls.go index 2c50d9955d6..c02eff95d13 100644 --- a/service/s3/api_op_GetBucketOwnershipControls.go +++ b/service/s3/api_op_GetBucketOwnershipControls.go @@ -19,10 +19,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // The following operations are related to GetBucketOwnershipControls: // -// * +// * // PutBucketOwnershipControls // -// * DeleteBucketOwnershipControls +// * DeleteBucketOwnershipControls func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { if params == nil { params = &GetBucketOwnershipControlsInput{} diff --git a/service/s3/api_op_GetBucketPolicy.go b/service/s3/api_op_GetBucketPolicy.go index 78e0b10675e..242a89e8a34 100644 --- a/service/s3/api_op_GetBucketPolicy.go +++ b/service/s3/api_op_GetBucketPolicy.go @@ -24,7 +24,7 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The // following operation is related to GetBucketPolicy: // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketPolicyStatus.go b/service/s3/api_op_GetBucketPolicyStatus.go index 9a5aed8a935..2f39e7ee416 100644 --- a/service/s3/api_op_GetBucketPolicyStatus.go +++ b/service/s3/api_op_GetBucketPolicyStatus.go @@ -21,20 +21,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). // The following operations are related to GetBucketPolicyStatus: // -// * Using -// Amazon S3 Block Public Access +// * Using Amazon +// S3 Block Public Access // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // -// -// * GetPublicAccessBlock +// * +// GetPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// -// * PutPublicAccessBlock +// * +// PutPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) // -// -// * DeletePublicAccessBlock +// * +// DeletePublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketReplication.go b/service/s3/api_op_GetBucketReplication.go index 7700e0d749e..a189590951a 100644 --- a/service/s3/api_op_GetBucketReplication.go +++ b/service/s3/api_op_GetBucketReplication.go @@ -27,12 +27,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) // The following operations are related to GetBucketReplication: // -// * +// * // PutBucketReplication // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) // -// -// * DeleteBucketReplication +// * +// DeleteBucketReplication // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketRequestPayment.go b/service/s3/api_op_GetBucketRequestPayment.go index dfa112e53f4..be17ac23b61 100644 --- a/service/s3/api_op_GetBucketRequestPayment.go +++ b/service/s3/api_op_GetBucketRequestPayment.go @@ -17,7 +17,7 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The // following operations are related to GetBucketRequestPayment: // -// * ListObjects +// * ListObjects // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketTagging.go b/service/s3/api_op_GetBucketTagging.go index 50f204bd21f..b57eb147f27 100644 --- a/service/s3/api_op_GetBucketTagging.go +++ b/service/s3/api_op_GetBucketTagging.go @@ -16,20 +16,20 @@ import ( // bucket owner has this permission and can grant this permission to others. // GetBucketTagging has the following special error: // -// * Error code: +// * Error code: // NoSuchTagSetError // -// * Description: There is no tag set associated with -// the bucket. +// * Description: There is no tag set associated with the +// bucket. // // The following operations are related to GetBucketTagging: // -// * +// * // PutBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) // -// -// * DeleteBucketTagging +// * +// DeleteBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetBucketVersioning.go b/service/s3/api_op_GetBucketVersioning.go index 25aae103460..76137864fc3 100644 --- a/service/s3/api_op_GetBucketVersioning.go +++ b/service/s3/api_op_GetBucketVersioning.go @@ -17,14 +17,15 @@ import ( // bucket owner must use an authentication device to change the versioning state of // the bucket. The following operations are related to GetBucketVersioning: // -// * -// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// -// * PutObject +// * +// PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { diff --git a/service/s3/api_op_GetBucketWebsite.go b/service/s3/api_op_GetBucketWebsite.go index 978a6915b23..f3623a455d8 100644 --- a/service/s3/api_op_GetBucketWebsite.go +++ b/service/s3/api_op_GetBucketWebsite.go @@ -21,11 +21,11 @@ import ( // policy granting them the S3:GetBucketWebsite permission. The following // operations are related to DeleteBucketWebsite: // -// * DeleteBucketWebsite +// * DeleteBucketWebsite // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) // -// -// * PutBucketWebsite +// * +// PutBucketWebsite // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetObject.go b/service/s3/api_op_GetObject.go index a405a68f36f..a6a960902a8 100644 --- a/service/s3/api_op_GetObject.go +++ b/service/s3/api_op_GetObject.go @@ -49,12 +49,12 @@ import ( // object in Amazon S3, then when you GET the object, you must use the following // headers: // -// * x-amz-server-side-encryption-customer-algorithm +// * x-amz-server-side-encryption-customer-algorithm // -// * +// * // x-amz-server-side-encryption-customer-key // -// * +// * // x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, @@ -72,11 +72,11 @@ import ( // the object you request does not exist, the error Amazon S3 returns depends on // whether you also have the s3:ListBucket permission. // -// * If you have the +// * If you have the // s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status // code 404 ("no such key") error. // -// * If you don’t have the s3:ListBucket +// * If you don’t have the s3:ListBucket // permission, Amazon S3 will return an HTTP status code 403 ("access denied") // error. // @@ -100,37 +100,36 @@ import ( // request, either using an Authorization header or a presigned URL, when using // these parameters. They cannot be used with an unsigned (anonymous) request. // +// * +// response-content-type // -// * response-content-type +// * response-content-language // -// * response-content-language +// * response-expires // -// * -// response-expires +// * +// response-cache-control // -// * response-cache-control +// * response-content-disposition // -// * -// response-content-disposition +// * +// response-content-encoding // -// * response-content-encoding -// -// Additional -// Considerations about Request Headers If both of the If-Match and -// If-Unmodified-Since headers are present in the request as follows: If-Match -// condition evaluates to true, and; If-Unmodified-Since condition evaluates to -// false; then, S3 returns 200 OK and the data requested. If both of the -// If-None-Match and If-Modified-Since headers are present in the request as -// follows: If-None-Match condition evaluates to false, and; If-Modified-Since -// condition evaluates to true; then, S3 returns 304 Not Modified response code. -// For more information about conditional requests, see RFC 7232 +// Additional Considerations about Request Headers If +// both of the If-Match and If-Unmodified-Since headers are present in the request +// as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. If +// both of the If-None-Match and If-Modified-Since headers are present in the +// request as follows: If-None-Match condition evaluates to false, and; +// If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified +// response code. For more information about conditional requests, see RFC 7232 // (https://tools.ietf.org/html/rfc7232). The following operations are related to // GetObject: // -// * ListBuckets +// * ListBuckets // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) // -// * +// * // GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { diff --git a/service/s3/api_op_GetObjectAcl.go b/service/s3/api_op_GetObjectAcl.go index ae13b96f374..204b6835c07 100644 --- a/service/s3/api_op_GetObjectAcl.go +++ b/service/s3/api_op_GetObjectAcl.go @@ -18,14 +18,14 @@ import ( // version, use the versionId subresource. The following operations are related to // GetObjectAcl: // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * +// * // DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // -// * +// * // PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetObjectTagging.go b/service/s3/api_op_GetObjectTagging.go index adf8e65d5e2..df734b84778 100644 --- a/service/s3/api_op_GetObjectTagging.go +++ b/service/s3/api_op_GetObjectTagging.go @@ -23,7 +23,7 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). The // following operation is related to GetObjectTagging: // -// * PutObjectTagging +// * PutObjectTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetObjectTorrent.go b/service/s3/api_op_GetObjectTorrent.go index 6936df71b5e..4cbfe049dd0 100644 --- a/service/s3/api_op_GetObjectTorrent.go +++ b/service/s3/api_op_GetObjectTorrent.go @@ -22,7 +22,7 @@ import ( // supported by Amazon S3 on Outposts. The following operation is related to // GetObjectTorrent: // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { diff --git a/service/s3/api_op_GetPublicAccessBlock.go b/service/s3/api_op_GetPublicAccessBlock.go index 4afb0d5e8e8..925ff5dcb7b 100644 --- a/service/s3/api_op_GetPublicAccessBlock.go +++ b/service/s3/api_op_GetPublicAccessBlock.go @@ -26,20 +26,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). // The following operations are related to GetPublicAccessBlock: // -// * Using -// Amazon S3 Block Public Access +// * Using Amazon S3 +// Block Public Access // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // -// -// * PutPublicAccessBlock +// * +// PutPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) // -// -// * GetPublicAccessBlock +// * +// GetPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// -// * DeletePublicAccessBlock +// * +// DeletePublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { diff --git a/service/s3/api_op_HeadObject.go b/service/s3/api_op_HeadObject.go index b9ff8fd6b0a..46565ab6235 100644 --- a/service/s3/api_op_HeadObject.go +++ b/service/s3/api_op_HeadObject.go @@ -21,13 +21,13 @@ import ( // (SSE-C) when you store the object in Amazon S3, then when you retrieve the // metadata from the object, you must use the following headers: // -// * +// * // x-amz-server-side-encryption-customer-algorithm // -// * +// * // x-amz-server-side-encryption-customer-key // -// * +// * // x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, @@ -42,49 +42,49 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). // Consider the following when using request headers: // -// * Consideration 1 – If -// both of the If-Match and If-Unmodified-Since headers are present in the request -// as follows: +// * Consideration 1 – If both +// of the If-Match and If-Unmodified-Since headers are present in the request as +// follows: // -// * If-Match condition evaluates to true, and; +// * If-Match condition evaluates to true, and; // -// * -// If-Unmodified-Since condition evaluates to false; +// * If-Unmodified-Since +// condition evaluates to false; // -// Then Amazon S3 returns -// 200 OK and the data requested. +// Then Amazon S3 returns 200 OK and the data +// requested. // -// * Consideration 2 – If both of the -// If-None-Match and If-Modified-Since headers are present in the request as -// follows: +// * Consideration 2 – If both of the If-None-Match and +// If-Modified-Since headers are present in the request as follows: // -// * If-None-Match condition evaluates to false, and; +// * +// If-None-Match condition evaluates to false, and; // -// * -// If-Modified-Since condition evaluates to true; +// * If-Modified-Since condition +// evaluates to true; // -// Then Amazon S3 returns the -// 304 Not Modified response code. +// Then Amazon S3 returns the 304 Not Modified response +// code. // -// For more information about conditional -// requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). Permissions You -// need the s3:GetObject permission for this operation. For more information, see -// Specifying Permissions in a Policy +// For more information about conditional requests, see RFC 7232 +// (https://tools.ietf.org/html/rfc7232). Permissions You need the s3:GetObject +// permission for this operation. For more information, see Specifying Permissions +// in a Policy // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If // the object you request does not exist, the error Amazon S3 returns depends on // whether you also have the s3:ListBucket permission. // -// * If you have the +// * If you have the // s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code // 404 ("no such key") error. // -// * If you don’t have the s3:ListBucket -// permission, Amazon S3 returns an HTTP status code 403 ("access denied") -// error. +// * If you don’t have the s3:ListBucket permission, +// Amazon S3 returns an HTTP status code 403 ("access denied") error. // -// The following operation is related to HeadObject: +// The +// following operation is related to HeadObject: // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { if params == nil { @@ -271,8 +271,8 @@ type HeadObjectOutput struct { // object metadata (HeadObject) from these buckets, Amazon S3 will return the // x-amz-replication-status header in the response as follows: // - // * If requesting - // an object from the source bucket — Amazon S3 will return the + // * If requesting an + // object from the source bucket — Amazon S3 will return the // x-amz-replication-status header if the object in your request is eligible for // replication. For example, suppose that in your replication configuration, you // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key @@ -281,13 +281,12 @@ type HeadObjectOutput struct { // this key name prefix, Amazon S3 will return the x-amz-replication-status header // with value PENDING, COMPLETED or FAILED indicating object replication status. // + // * + // If requesting an object from the destination bucket — Amazon S3 will return the + // x-amz-replication-status header with value REPLICA if the object in your request + // is a replica that Amazon S3 created. // - // * If requesting an object from the destination bucket — Amazon S3 will return - // the x-amz-replication-status header with value REPLICA if the object in your - // request is a replica that Amazon S3 created. - // - // For more information, see - // Replication + // For more information, see Replication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus types.ReplicationStatus diff --git a/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 11107bcf0f7..b22868d7de9 100644 --- a/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -32,16 +32,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). // The following operations are related to ListBucketAnalyticsConfigurations: // -// -// * GetBucketAnalyticsConfiguration +// * +// GetBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // -// -// * DeleteBucketAnalyticsConfiguration +// * +// DeleteBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // -// -// * PutBucketAnalyticsConfiguration +// * +// PutBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { if params == nil { diff --git a/service/s3/api_op_ListBucketInventoryConfigurations.go b/service/s3/api_op_ListBucketInventoryConfigurations.go index 5e25fc533b2..62094ad6412 100644 --- a/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -31,16 +31,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) The // following operations are related to ListBucketInventoryConfigurations: // -// * +// * // GetBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // -// -// * DeleteBucketInventoryConfiguration +// * +// DeleteBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// -// * PutBucketInventoryConfiguration +// * +// PutBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { if params == nil { diff --git a/service/s3/api_op_ListBucketMetricsConfigurations.go b/service/s3/api_op_ListBucketMetricsConfigurations.go index f193c81edbe..0960926ff45 100644 --- a/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -33,16 +33,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). // The following operations are related to ListBucketMetricsConfigurations: // -// * +// * // PutBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// -// * GetBucketMetricsConfiguration +// * +// GetBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // -// -// * DeleteBucketMetricsConfiguration +// * +// DeleteBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { if params == nil { diff --git a/service/s3/api_op_ListMultipartUploads.go b/service/s3/api_op_ListMultipartUploads.go index 40802320663..9daa57d9b94 100644 --- a/service/s3/api_op_ListMultipartUploads.go +++ b/service/s3/api_op_ListMultipartUploads.go @@ -32,23 +32,23 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The // following operations are related to ListMultipartUploads: // -// * +// * // CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * UploadPart +// * +// UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { diff --git a/service/s3/api_op_ListObjectVersions.go b/service/s3/api_op_ListObjectVersions.go index 32f22531732..dfe2bda6c5c 100644 --- a/service/s3/api_op_ListObjectVersions.go +++ b/service/s3/api_op_ListObjectVersions.go @@ -19,17 +19,18 @@ import ( // bucket. This action is not supported by Amazon S3 on Outposts. The following // operations are related to ListObjectVersions: // -// * ListObjectsV2 +// * ListObjectsV2 // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) // -// * -// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// -// * PutObject +// * +// PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { diff --git a/service/s3/api_op_ListObjects.go b/service/s3/api_op_ListObjects.go index 2feeb66661b..99d7479be4c 100644 --- a/service/s3/api_op_ListObjects.go +++ b/service/s3/api_op_ListObjects.go @@ -21,22 +21,23 @@ import ( // developing applications. For backward compatibility, Amazon S3 continues to // support ListObjects. The following operations are related to ListObjects: // -// * +// * // ListObjectsV2 // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) // -// * -// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// -// * PutObject +// * +// PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // ListBuckets // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { diff --git a/service/s3/api_op_ListObjectsV2.go b/service/s3/api_op_ListObjectsV2.go index 4db3c35d10b..e8ec6c0649b 100644 --- a/service/s3/api_op_ListObjectsV2.go +++ b/service/s3/api_op_ListObjectsV2.go @@ -32,14 +32,15 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). The // following operations are related to ListObjectsV2: // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * -// PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// * +// PutObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// -// * CreateBucket +// * +// CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { if params == nil { diff --git a/service/s3/api_op_ListParts.go b/service/s3/api_op_ListParts.go index 6f2d5e3a437..4055f02c66d 100644 --- a/service/s3/api_op_ListParts.go +++ b/service/s3/api_op_ListParts.go @@ -30,23 +30,23 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The // following operations are related to ListParts: // -// * CreateMultipartUpload +// * CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * UploadPart +// * +// UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * AbortMultipartUpload +// * +// AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// -// * ListMultipartUploads +// * +// ListMultipartUploads // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketAccelerateConfiguration.go b/service/s3/api_op_PutBucketAccelerateConfiguration.go index 69c3610a0c3..64661d39c08 100644 --- a/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -24,10 +24,10 @@ import ( // Transfer Acceleration state of a bucket can be set to one of the following two // values: // -// * Enabled – Enables accelerated data transfers to the bucket. +// * Enabled – Enables accelerated data transfers to the bucket. // -// -// * Suspended – Disables accelerated data transfers to the bucket. +// * +// Suspended – Disables accelerated data transfers to the bucket. // // The // GetBucketAccelerateConfiguration @@ -41,12 +41,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). // The following operations are related to PutBucketAccelerateConfiguration: // -// * +// * // GetBucketAccelerateConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) // -// -// * CreateBucket +// * +// CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketAcl.go b/service/s3/api_op_PutBucketAcl.go index 42de71c5dc6..4ab7dc986d0 100644 --- a/service/s3/api_op_PutBucketAcl.go +++ b/service/s3/api_op_PutBucketAcl.go @@ -17,31 +17,31 @@ import ( // the ACL of a bucket, you must have WRITE_ACP permission. You can use one of the // following two ways to set a bucket's permissions: // -// * Specify the ACL in the +// * Specify the ACL in the // request body // -// * Specify permissions using request headers +// * Specify permissions using request headers // -// You cannot -// specify access permission using both the body and the request headers. Depending -// on your application needs, you may choose to set the ACL on a bucket using -// either the request body or the headers. For example, if you have an existing -// application that updates a bucket ACL using the request body, then you can -// continue to use that approach. Access Permissions +// You cannot specify +// access permission using both the body and the request headers. Depending on your +// application needs, you may choose to set the ACL on a bucket using either the +// request body or the headers. For example, if you have an existing application +// that updates a bucket ACL using the request body, then you can continue to use +// that approach. Access Permissions // -// You can set access -// permissions using one of the following methods: +// You can set access permissions using one of +// the following methods: // -// * Specify a canned ACL with -// the x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known -// as canned ACLs. Each canned ACL has a predefined set of grantees and -// permissions. Specify the canned ACL name as the value of x-amz-acl. If you use -// this header, you cannot use other access control-specific headers in your -// request. For more information, see Canned ACL +// * Specify a canned ACL with the x-amz-acl request +// header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each +// canned ACL has a predefined set of grantees and permissions. Specify the canned +// ACL name as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, see +// Canned ACL // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). // -// -// * Specify access permissions explicitly with the x-amz-grant-read, +// * +// Specify access permissions explicitly with the x-amz-grant-read, // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If @@ -51,102 +51,99 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify // each grantee as a type=value pair, where the type is one of the following: // +// * id +// – if the value specified is the canonical user ID of an AWS account // -// * id – if the value specified is the canonical user ID of an AWS account -// +// * uri – if +// you are granting permissions to a predefined group // -// * uri – if you are granting permissions to a predefined group +// * emailAddress – if the +// value specified is the email address of an AWS account Using email addresses to +// specify a grantee is only supported in the following AWS Regions: // -// * -// emailAddress – if the value specified is the email address of an AWS account -// Using email addresses to specify a grantee is only supported in the following -// AWS Regions: +// * US East (N. +// Virginia) // -// * US East (N. Virginia) +// * US West (N. California) // -// * US West (N. -// California) +// * US West (Oregon) // -// * US West (Oregon) -// -// * Asia Pacific +// * Asia Pacific // (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific (Sydney) // -// * Asia Pacific -// (Tokyo) +// * Asia Pacific (Tokyo) // -// * Europe (Ireland) +// * Europe +// (Ireland) // -// * South America (São -// Paulo) +// * South America (São Paulo) // -// For a list of all the Amazon S3 supported Regions and endpoints, -// see Regions and Endpoints +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. // -// For example, the following x-amz-grant-write header -// grants create, overwrite, and delete objects permission to LogDelivery group -// predefined by Amazon S3 and two AWS accounts identified by their email -// addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" +// For example, the following x-amz-grant-write header grants +// create, overwrite, and delete objects permission to LogDelivery group predefined +// by Amazon S3 and two AWS accounts identified by their email addresses. +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" // -// You can use either a canned ACL or specify access permissions -// explicitly. You cannot do both. Grantee Values +// You can use either a canned ACL or specify +// access permissions explicitly. You cannot do both. Grantee Values // -// You can specify the person -// (grantee) to whom you're assigning access rights (using request elements) in the -// following ways: +// You can +// specify the person (grantee) to whom you're assigning access rights (using +// request elements) in the following ways: // -// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName -// is optional and ignored in the request +// * By the person's ID: +// <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the request // -// * By URI: -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// * +// By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // -// * By Email -// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the +// * By +// Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the // CanonicalUser and, in a response to a GET Object acl request, appears as the // CanonicalUser. Using email addresses to specify a grantee is only supported in // the following AWS Regions: // -// * US East (N. Virginia) +// * US East (N. Virginia) // -// * US West -// (N. California) -// -// * US West (Oregon) +// * US West (N. +// California) // -// * Asia Pacific -// (Singapore) +// * US West (Oregon) // -// * Asia Pacific (Sydney) +// * Asia Pacific (Singapore) // -// * Asia Pacific (Tokyo) +// * Asia Pacific +// (Sydney) // +// * Asia Pacific (Tokyo) // // * Europe (Ireland) // -// * South America (São Paulo) +// * South America (São +// Paulo) // -// For a list of all -// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. // // Related Resources // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// * +// * // GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { diff --git a/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 8f1952eeb1f..20a5faf5329 100644 --- a/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -38,43 +38,42 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // Special Errors // -// * HTTP Error: HTTP 400 Bad Request +// * HTTP Error: HTTP 400 Bad Request // -// * Code: -// InvalidArgument +// * Code: InvalidArgument // -// * Cause: Invalid argument. +// * +// Cause: Invalid argument. // -// * HTTP Error: HTTP -// 400 Bad Request +// * HTTP Error: HTTP 400 Bad Request // -// * Code: TooManyConfigurations +// * Code: +// TooManyConfigurations // -// * Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. +// * Cause: You are attempting to create a new configuration +// but have already reached the 1,000-configuration limit. // -// * HTTP Error: HTTP 403 Forbidden +// * HTTP Error: HTTP 403 +// Forbidden // -// * -// Code: AccessDenied +// * Code: AccessDenied // -// * Cause: You are not the owner of the specified +// * Cause: You are not the owner of the specified // bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to // set the configuration on the bucket. // // Related Resources // -// * +// * // GetBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // -// -// * DeleteBucketAnalyticsConfiguration +// * +// DeleteBucketAnalyticsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // -// -// * ListBucketAnalyticsConfigurations +// * +// ListBucketAnalyticsConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketCors.go b/service/s3/api_op_PutBucketCors.go index 49f38172ef8..caf7db2f4ad 100644 --- a/service/s3/api_op_PutBucketCors.go +++ b/service/s3/api_op_PutBucketCors.go @@ -27,32 +27,31 @@ import ( // CORSRule rule that matches the incoming browser request to enable a cross-origin // request. For a rule to match, the following conditions must be met: // -// * The +// * The // request's Origin header must match AllowedOrigin elements. // -// * The request -// method (for example, GET, PUT, HEAD, and so on) or the -// Access-Control-Request-Method header in case of a pre-flight OPTIONS request -// must be one of the AllowedMethod elements. +// * The request method +// (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. // -// * Every header specified in the -// Access-Control-Request-Headers request header of a pre-flight request must match -// an AllowedHeader element. +// * Every header specified in the Access-Control-Request-Headers +// request header of a pre-flight request must match an AllowedHeader element. // -// For more information about CORS, go to Enabling -// Cross-Origin Resource Sharing +// For +// more information about CORS, go to Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon Simple // Storage Service Developer Guide. Related Resources // -// * GetBucketCors +// * GetBucketCors // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) // -// * +// * // DeleteBucketCors // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) // -// -// * RESTOPTIONSobject +// * +// RESTOPTIONSobject // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketEncryption.go b/service/s3/api_op_PutBucketEncryption.go index 38f3ee974cf..7e4ecce1f2c 100644 --- a/service/s3/api_op_PutBucketEncryption.go +++ b/service/s3/api_op_PutBucketEncryption.go @@ -29,12 +29,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the // Amazon Simple Storage Service Developer Guide. Related Resources // -// * +// * // GetBucketEncryption // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) // -// -// * DeleteBucketEncryption +// * +// DeleteBucketEncryption // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketInventoryConfiguration.go b/service/s3/api_op_PutBucketInventoryConfiguration.go index 1a325f9c1bd..571ab7b649b 100644 --- a/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -38,43 +38,42 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in the // Amazon Simple Storage Service Developer Guide. Special Errors // -// * HTTP 400 -// Bad Request Error +// * HTTP 400 Bad +// Request Error // -// * Code: InvalidArgument +// * Code: InvalidArgument // -// * Cause: Invalid -// Argument +// * Cause: Invalid Argument // -// * HTTP 400 Bad Request Error +// * HTTP 400 +// Bad Request Error // -// * Code: -// TooManyConfigurations +// * Code: TooManyConfigurations // -// * Cause: You are attempting to create a new -// configuration but have already reached the 1,000-configuration limit. +// * Cause: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. // -// * -// HTTP 403 Forbidden Error +// * HTTP 403 Forbidden Error // -// * Code: AccessDenied +// * Code: AccessDenied // -// * Cause: You are -// not the owner of the specified bucket, or you do not have the +// * Cause: You are not +// the owner of the specified bucket, or you do not have the // s3:PutInventoryConfiguration bucket permission to set the configuration on the // bucket. // // Related Resources // -// * GetBucketInventoryConfiguration +// * GetBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // -// -// * DeleteBucketInventoryConfiguration +// * +// DeleteBucketInventoryConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// -// * ListBucketInventoryConfigurations +// * +// ListBucketInventoryConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketLifecycleConfiguration.go b/service/s3/api_op_PutBucketLifecycleConfiguration.go index 0deea29f1ca..c7a50817767 100644 --- a/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -25,22 +25,21 @@ import ( // lifecycle configuration is specified as XML consisting of one or more rules. // Each rule consists of the following: // -// * Filter identifying a subset of -// objects to which the rule applies. The filter can be based on a key name prefix, -// object tags, or a combination of both. +// * Filter identifying a subset of objects +// to which the rule applies. The filter can be based on a key name prefix, object +// tags, or a combination of both. // -// * Status whether the rule is in -// effect. +// * Status whether the rule is in effect. // -// * One or more lifecycle transition and expiration actions that you -// want Amazon S3 to perform on the objects identified by the filter. If the state -// of your bucket is versioning-enabled or versioning-suspended, you can have many -// versions of the same object (one current version and zero or more noncurrent -// versions). Amazon S3 provides predefined actions that you can specify for -// current and noncurrent object versions. +// * One +// or more lifecycle transition and expiration actions that you want Amazon S3 to +// perform on the objects identified by the filter. If the state of your bucket is +// versioning-enabled or versioning-suspended, you can have many versions of the +// same object (one current version and zero or more noncurrent versions). Amazon +// S3 provides predefined actions that you can specify for current and noncurrent +// object versions. // -// For more information, see Object -// Lifecycle Management +// For more information, see Object Lifecycle Management // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) and // Lifecycle Configuration Elements // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). @@ -55,11 +54,11 @@ import ( // objects from your bucket, you must deny them permissions for the following // actions: // -// * s3:DeleteObject +// * s3:DeleteObject // -// * s3:DeleteObjectVersion +// * s3:DeleteObjectVersion // -// * +// * // s3:PutLifecycleConfiguration // // For more information about permissions, see @@ -67,16 +66,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). The // following are related to PutBucketLifecycleConfiguration: // -// * Examples of +// * Examples of // Lifecycle Configuration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) // -// -// * GetBucketLifecycleConfiguration +// * +// GetBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // -// -// * DeleteBucketLifecycle +// * +// DeleteBucketLifecycle // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketLogging.go b/service/s3/api_op_PutBucketLogging.go index db60dee8f93..7da5dd3f268 100644 --- a/service/s3/api_op_PutBucketLogging.go +++ b/service/s3/api_op_PutBucketLogging.go @@ -18,17 +18,17 @@ import ( // to all logs. You use the Grantee request element to grant access to other // people. The Permissions request element specifies the kind of access the grantee // has to the logs. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: +// you're assigning access rights (using request elements) in the following +// ways: // +// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional +// and ignored in the request. // -// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// -// * By Email address: <>Grantees@email.com<> The +// * By Email address: <>Grantees@email.com<> The // grantee is resolved to the CanonicalUser and, in a response to a GET Object acl // request, appears as the CanonicalUser. // -// * By URI: +// * By URI: // <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // To enable @@ -43,18 +43,18 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). The // following operations are related to PutBucketLogging: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// * +// * // CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // GetBucketLogging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { diff --git a/service/s3/api_op_PutBucketMetricsConfiguration.go b/service/s3/api_op_PutBucketMetricsConfiguration.go index a2acfea5428..8ef174e1833 100644 --- a/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -28,29 +28,28 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). // The following operations are related to PutBucketMetricsConfiguration: // -// * +// * // DeleteBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // -// -// * PutBucketMetricsConfiguration +// * +// PutBucketMetricsConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// -// * ListBucketMetricsConfigurations +// * +// ListBucketMetricsConfigurations // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // // GetBucketLifecycle // has the following special error: // -// * Error code: TooManyConfigurations -// +// * Error code: TooManyConfigurations // -// * Description: You are attempting to create a new configuration but have already +// * +// Description: You are attempting to create a new configuration but have already // reached the 1,000-configuration limit. // -// * HTTP Status Code: HTTP 400 Bad -// Request +// * HTTP Status Code: HTTP 400 Bad Request func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { if params == nil { params = &PutBucketMetricsConfigurationInput{} diff --git a/service/s3/api_op_PutBucketNotificationConfiguration.go b/service/s3/api_op_PutBucketNotificationConfiguration.go index 87859799a90..7b8325f7fc5 100644 --- a/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -44,7 +44,7 @@ import ( // the test notification sent to the topic. The following operation is related to // PutBucketNotificationConfiguration: // -// * GetBucketNotificationConfiguration +// * GetBucketNotificationConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketOwnershipControls.go b/service/s3/api_op_PutBucketOwnershipControls.go index faae83f0e60..e9433149e0f 100644 --- a/service/s3/api_op_PutBucketOwnershipControls.go +++ b/service/s3/api_op_PutBucketOwnershipControls.go @@ -19,10 +19,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // The following operations are related to GetBucketOwnershipControls: // -// * +// * // GetBucketOwnershipControls // -// * DeleteBucketOwnershipControls +// * DeleteBucketOwnershipControls func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { if params == nil { params = &PutBucketOwnershipControlsInput{} diff --git a/service/s3/api_op_PutBucketPolicy.go b/service/s3/api_op_PutBucketPolicy.go index 7b33488d9a9..0ac4dca4056 100644 --- a/service/s3/api_op_PutBucketPolicy.go +++ b/service/s3/api_op_PutBucketPolicy.go @@ -24,10 +24,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The // following operations are related to PutBucketPolicy: // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { diff --git a/service/s3/api_op_PutBucketReplication.go b/service/s3/api_op_PutBucketReplication.go index 2507987a98f..54f867c722d 100644 --- a/service/s3/api_op_PutBucketReplication.go +++ b/service/s3/api_op_PutBucketReplication.go @@ -58,12 +58,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) // The following operations are related to PutBucketReplication: // -// * +// * // GetBucketReplication // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) // -// -// * DeleteBucketReplication +// * +// DeleteBucketReplication // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketRequestPayment.go b/service/s3/api_op_PutBucketRequestPayment.go index 040838a692d..db03c834db8 100644 --- a/service/s3/api_op_PutBucketRequestPayment.go +++ b/service/s3/api_op_PutBucketRequestPayment.go @@ -18,10 +18,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The // following operations are related to PutBucketRequestPayment: // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // GetBucketRequestPayment // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { diff --git a/service/s3/api_op_PutBucketTagging.go b/service/s3/api_op_PutBucketTagging.go index bd6af9a3fb0..3993faf5df8 100644 --- a/service/s3/api_op_PutBucketTagging.go +++ b/service/s3/api_op_PutBucketTagging.go @@ -32,42 +32,41 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // PutBucketTagging has the following special errors: // -// * Error code: +// * Error code: // InvalidTagError // -// * Description: The tag provided was not a valid tag. -// This error can occur if the tag did not pass input validation. For information -// about tag restrictions, see User-Defined Tag Restrictions +// * Description: The tag provided was not a valid tag. This error +// can occur if the tag did not pass input validation. For information about tag +// restrictions, see User-Defined Tag Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) // and AWS-Generated Cost Allocation Tag Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). // +// * +// Error code: MalformedXMLError // -// * Error code: MalformedXMLError +// * Description: The XML provided does not match +// the schema. // -// * Description: The XML provided does -// not match the schema. +// * Error code: OperationAbortedError // -// * Error code: OperationAbortedError +// * Description: A conflicting +// conditional operation is currently in progress against this resource. Please try +// again. // -// * -// Description: A conflicting conditional operation is currently in progress -// against this resource. Please try again. +// * Error code: InternalError // -// * Error code: InternalError +// * Description: The service was unable to +// apply the provided tag to the bucket. // +// The following operations are related to +// PutBucketTagging: // -// * Description: The service was unable to apply the provided tag to the -// bucket. -// -// The following operations are related to PutBucketTagging: -// -// * -// GetBucketTagging +// * GetBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) // -// -// * DeleteBucketTagging +// * +// DeleteBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutBucketVersioning.go b/service/s3/api_op_PutBucketVersioning.go index 84d6c741d91..43f7f8603f6 100644 --- a/service/s3/api_op_PutBucketVersioning.go +++ b/service/s3/api_op_PutBucketVersioning.go @@ -32,14 +32,14 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). // Related Resources // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * // DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// * +// * // GetBucketVersioning // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { diff --git a/service/s3/api_op_PutBucketWebsite.go b/service/s3/api_op_PutBucketWebsite.go index 151ca55b5a2..0f834136f96 100644 --- a/service/s3/api_op_PutBucketWebsite.go +++ b/service/s3/api_op_PutBucketWebsite.go @@ -23,16 +23,16 @@ import ( // bucket policy that grants them the S3:PutBucketWebsite permission. To redirect // all website requests sent to the bucket's website endpoint, you add a website // configuration with the following elements. Because all requests are sent to -// another website, you don't need to provide index document name for the bucket. -// +// another website, you don't need to provide index document name for the +// bucket. // // * WebsiteConfiguration // -// * RedirectAllRequestsTo +// * RedirectAllRequestsTo // -// * HostName +// * HostName // -// * +// * // Protocol // // If you want granular control over redirects, you can use the following @@ -41,46 +41,45 @@ import ( // configuration must provide an index document for the bucket, because some // requests might not be redirected. // -// * WebsiteConfiguration +// * WebsiteConfiguration // -// * -// IndexDocument +// * IndexDocument // -// * Suffix +// * +// Suffix // -// * ErrorDocument +// * ErrorDocument // -// * Key +// * Key // -// * -// RoutingRules +// * RoutingRules // -// * RoutingRule +// * RoutingRule // -// * Condition +// * Condition // -// * +// * // HttpErrorCodeReturnedEquals // -// * KeyPrefixEquals +// * KeyPrefixEquals // -// * Redirect -// -// * -// Protocol +// * Redirect // -// * HostName +// * Protocol // -// * ReplaceKeyPrefixWith +// * +// HostName // -// * ReplaceKeyWith +// * ReplaceKeyPrefixWith // +// * ReplaceKeyWith // // * HttpRedirectCode // -// Amazon S3 has a limitation of 50 routing rules per website -// configuration. If you require more than 50 routing rules, you can use object -// redirect. For more information, see Configuring an Object Redirect +// Amazon +// S3 has a limitation of 50 routing rules per website configuration. If you +// require more than 50 routing rules, you can use object redirect. For more +// information, see Configuring an Object Redirect // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) in // the Amazon Simple Storage Service Developer Guide. func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { diff --git a/service/s3/api_op_PutObject.go b/service/s3/api_op_PutObject.go index ebf787ff936..adbd9fc70e8 100644 --- a/service/s3/api_op_PutObject.go +++ b/service/s3/api_op_PutObject.go @@ -64,10 +64,10 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). // Related Resources // -// * CopyObject +// * CopyObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // -// * +// * // DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { diff --git a/service/s3/api_op_PutObjectAcl.go b/service/s3/api_op_PutObjectAcl.go index 5549302154f..7448a5130b2 100644 --- a/service/s3/api_op_PutObjectAcl.go +++ b/service/s3/api_op_PutObjectAcl.go @@ -28,16 +28,16 @@ import ( // You can set access permissions // using one of the following methods: // -// * Specify a canned ACL with the -// x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as -// canned ACLs. Each canned ACL has a predefined set of grantees and permissions. -// Specify the canned ACL name as the value of x-amz-acl. If you use this header, -// you cannot use other access control-specific headers in your request. For more +// * Specify a canned ACL with the x-amz-acl +// request header. Amazon S3 supports a set of predefined ACLs, known as canned +// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify +// the canned ACL name as the value of x-amz-acl. If you use this header, you +// cannot use other access control-specific headers in your request. For more // information, see Canned ACL // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). // -// -// * Specify access permissions explicitly with the x-amz-grant-read, +// * +// Specify access permissions explicitly with the x-amz-grant-read, // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If @@ -47,44 +47,42 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify // each grantee as a type=value pair, where the type is one of the following: // +// * id +// – if the value specified is the canonical user ID of an AWS account // -// * id – if the value specified is the canonical user ID of an AWS account -// +// * uri – if +// you are granting permissions to a predefined group // -// * uri – if you are granting permissions to a predefined group +// * emailAddress – if the +// value specified is the email address of an AWS account Using email addresses to +// specify a grantee is only supported in the following AWS Regions: // -// * -// emailAddress – if the value specified is the email address of an AWS account -// Using email addresses to specify a grantee is only supported in the following -// AWS Regions: +// * US East (N. +// Virginia) // -// * US East (N. Virginia) +// * US West (N. California) // -// * US West (N. -// California) +// * US West (Oregon) // -// * US West (Oregon) -// -// * Asia Pacific +// * Asia Pacific // (Singapore) // -// * Asia Pacific (Sydney) +// * Asia Pacific (Sydney) // -// * Asia Pacific -// (Tokyo) +// * Asia Pacific (Tokyo) // -// * Europe (Ireland) +// * Europe +// (Ireland) // -// * South America (São -// Paulo) +// * South America (São Paulo) // -// For a list of all the Amazon S3 supported Regions and endpoints, -// see Regions and Endpoints +// For a list of all the Amazon S3 +// supported Regions and endpoints, see Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. // -// For example, the following x-amz-grant-read header -// grants list objects permission to the two AWS accounts identified by their email +// For example, the following x-amz-grant-read header grants +// list objects permission to the two AWS accounts identified by their email // addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", // emailAddress="abc@amazon.com" // @@ -95,39 +93,39 @@ import ( // person (grantee) to whom you're assigning access rights (using request elements) // in the following ways: // -// * By the person's ID: <>ID<><>GranteesEmail<> +// * By the person's ID: <>ID<><>GranteesEmail<> // DisplayName is optional and ignored in the request. // -// * By URI: +// * By URI: // <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // -// * By Email +// * By Email // address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the // CanonicalUser and, in a response to a GET Object acl request, appears as the // CanonicalUser. Using email addresses to specify a grantee is only supported in // the following AWS Regions: // -// * US East (N. Virginia) +// * US East (N. Virginia) // -// * US West -// (N. California) -// -// * US West (Oregon) +// * US West (N. +// California) // -// * Asia Pacific -// (Singapore) +// * US West (Oregon) // -// * Asia Pacific (Sydney) +// * Asia Pacific (Singapore) // -// * Asia Pacific (Tokyo) +// * Asia Pacific +// (Sydney) // +// * Asia Pacific (Tokyo) // // * Europe (Ireland) // -// * South America (São Paulo) +// * South America (São +// Paulo) // -// For a list of all -// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// For a list of all the Amazon S3 supported Regions and endpoints, see +// Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. // @@ -136,10 +134,10 @@ import ( // the ACL of a different version, use the versionId subresource. Related // Resources // -// * CopyObject +// * CopyObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // -// * +// * // GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutObjectLegalHold.go b/service/s3/api_op_PutObjectLegalHold.go index 039a36d0a05..119c29ef4a5 100644 --- a/service/s3/api_op_PutObjectLegalHold.go +++ b/service/s3/api_op_PutObjectLegalHold.go @@ -14,7 +14,7 @@ import ( // Applies a Legal Hold configuration to the specified object. This action is not // supported by Amazon S3 on Outposts. Related Resources // -// * Locking Objects +// * Locking Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutObjectLockConfiguration.go b/service/s3/api_op_PutObjectLockConfiguration.go index 09bc8a4a5a9..08dd5806a1f 100644 --- a/service/s3/api_op_PutObjectLockConfiguration.go +++ b/service/s3/api_op_PutObjectLockConfiguration.go @@ -16,8 +16,8 @@ import ( // placed in the specified bucket. DefaultRetention requires either Days or Years. // You can't specify both at the same time. Related Resources // -// * Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// * Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} diff --git a/service/s3/api_op_PutObjectRetention.go b/service/s3/api_op_PutObjectRetention.go index dfe4a6a32d3..35a72fe41ac 100644 --- a/service/s3/api_op_PutObjectRetention.go +++ b/service/s3/api_op_PutObjectRetention.go @@ -14,7 +14,7 @@ import ( // Places an Object Retention configuration on an object. This action is not // supported by Amazon S3 on Outposts. Related Resources // -// * Locking Objects +// * Locking Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutObjectTagging.go b/service/s3/api_op_PutObjectTagging.go index c7083b7f17c..e5ce7661a82 100644 --- a/service/s3/api_op_PutObjectTagging.go +++ b/service/s3/api_op_PutObjectTagging.go @@ -29,34 +29,33 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). Special // Errors // -// * Code: InvalidTagError +// * Code: InvalidTagError // -// * Cause: The tag provided was -// not a valid tag. This error can occur if the tag did not pass input validation. -// For more information, see Object Tagging +// * Cause: The tag provided was not a valid tag. +// This error can occur if the tag did not pass input validation. For more +// information, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // +// * Code: +// MalformedXMLError // -// * Code: MalformedXMLError +// * Cause: The XML provided does not match the schema. // -// * Cause: The XML provided does not match the -// schema. +// * Code: +// OperationAbortedError // -// * Code: OperationAbortedError +// * Cause: A conflicting conditional operation is currently +// in progress against this resource. Please try again. // -// * Cause: A conflicting -// conditional operation is currently in progress against this resource. Please try -// again. +// * Code: InternalError // -// * Code: InternalError +// * +// Cause: The service was unable to apply the provided tag to the object. // -// * Cause: The service was unable -// to apply the provided tag to the object. +// Related +// Resources // -// Related Resources -// -// * -// GetObjectTagging +// * GetObjectTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { if params == nil { diff --git a/service/s3/api_op_PutPublicAccessBlock.go b/service/s3/api_op_PutPublicAccessBlock.go index 0d93d4e6aae..37a4b3c05ef 100644 --- a/service/s3/api_op_PutPublicAccessBlock.go +++ b/service/s3/api_op_PutPublicAccessBlock.go @@ -26,19 +26,19 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). // Related Resources // -// * GetPublicAccessBlock +// * GetPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// -// * DeletePublicAccessBlock +// * +// DeletePublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // -// -// * GetBucketPolicyStatus +// * +// GetBucketPolicyStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) // -// -// * Using Amazon S3 Block Public Access +// * +// Using Amazon S3 Block Public Access // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { diff --git a/service/s3/api_op_RestoreObject.go b/service/s3/api_op_RestoreObject.go index 5f53c12d9d6..327f91d87bd 100644 --- a/service/s3/api_op_RestoreObject.go +++ b/service/s3/api_op_RestoreObject.go @@ -15,14 +15,14 @@ import ( // supported by Amazon S3 on Outposts. This action performs the following types of // requests: // -// * select - Perform a select query on an archived object +// * select - Perform a select query on an archived object // -// * -// restore an archive - Restore an archived object +// * restore an +// archive - Restore an archived object // -// To use this operation, you must -// have permissions to perform the s3:RestoreObject action. The bucket owner has -// this permission by default and can grant this permission to others. For more +// To use this operation, you must have +// permissions to perform the s3:RestoreObject action. The bucket owner has this +// permission by default and can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource // Operations // (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) @@ -39,75 +39,75 @@ import ( // in the Amazon Simple Storage Service Developer Guide. When making a select // request, do the following: // -// * Define an output location for the select -// query's output. This must be an Amazon S3 bucket in the same AWS Region as the -// bucket that contains the archive object that is being queried. The AWS account -// that initiates the job must have permissions to write to the S3 bucket. You can +// * Define an output location for the select query's +// output. This must be an Amazon S3 bucket in the same AWS Region as the bucket +// that contains the archive object that is being queried. The AWS account that +// initiates the job must have permissions to write to the S3 bucket. You can // specify the storage class and encryption for the output objects stored in the // bucket. For more information about output, see Querying Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) // in the Amazon Simple Storage Service Developer Guide. For more information about // the S3 structure in the request body, see the following: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * -// Managing Access with ACLs +// * Managing +// Access with ACLs // (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) in the // Amazon Simple Storage Service Developer Guide // -// * Protecting Data Using +// * Protecting Data Using // Server-Side Encryption // (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in // the Amazon Simple Storage Service Developer Guide // -// * Define the SQL -// expression for the SELECT type of restoration for your query in the request -// body's SelectParameters structure. You can use expressions like the following +// * Define the SQL expression +// for the SELECT type of restoration for your query in the request body's +// SelectParameters structure. You can use expressions like the following // examples. // -// * The following expression returns all records from the -// specified object. SELECT * FROM Object +// * The following expression returns all records from the specified +// object. SELECT * FROM Object // -// * Assuming that you are not -// using any headers for data stored in the object, you can specify columns with -// positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > 100 +// * Assuming that you are not using any headers for +// data stored in the object, you can specify columns with positional headers. +// SELECT s._1, s._2 FROM Object s WHERE s._3 > 100 // -// * -// If you have headers and you set the fileHeaderInfo in the CSV structure in the -// request body to USE, you can specify headers in the query. (If you set the -// fileHeaderInfo field to IGNORE, the first row is skipped for the query.) You -// cannot mix ordinal positions with header column names. SELECT s.Id, s.FirstName, -// s.SSN FROM S3Object s +// * If you have headers and you +// set the fileHeaderInfo in the CSV structure in the request body to USE, you can +// specify headers in the query. (If you set the fileHeaderInfo field to IGNORE, +// the first row is skipped for the query.) You cannot mix ordinal positions with +// header column names. SELECT s.Id, s.FirstName, s.SSN FROM S3Object s // -// For more information about using SQL with S3 Glacier -// Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier Select +// For more +// information about using SQL with S3 Glacier Select restore, see SQL Reference +// for Amazon S3 Select and S3 Glacier Select // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) // in the Amazon Simple Storage Service Developer Guide. When making a select // request, you can also do the following: // -// * To expedite your queries, specify -// the Expedited tier. For more information about tiers, see "Restoring Archives," +// * To expedite your queries, specify the +// Expedited tier. For more information about tiers, see "Restoring Archives," // later in this topic. // -// * Specify details about the data serialization format -// of both the input object that is being queried and the serialization of the +// * Specify details about the data serialization format of +// both the input object that is being queried and the serialization of the // CSV-encoded query results. // // The following are additional important facts about // the select feature: // -// * The output results are new Amazon S3 objects. Unlike +// * The output results are new Amazon S3 objects. Unlike // archive retrievals, they are stored until explicitly deleted-manually or through // a lifecycle policy. // -// * You can issue more than one select request on the -// same Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing +// * You can issue more than one select request on the same +// Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing // duplicate requests. // -// * Amazon S3 accepts a select request even if the object -// has already been restored. A select request doesn’t return error response +// * Amazon S3 accepts a select request even if the object has +// already been restored. A select request doesn’t return error response // 409. // // Restoring Archives Objects in the GLACIER and DEEP_ARCHIVE storage classes @@ -124,31 +124,30 @@ import ( // the following data access tier options in the Tier element of the request // body: // -// * Expedited - Expedited retrievals allow you to quickly access your -// data stored in the GLACIER storage class when occasional urgent requests for a -// subset of archives are required. For all but the largest archived objects (250 -// MB+), data accessed using Expedited retrievals are typically made available -// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for -// Expedited retrievals is available when you need it. Expedited retrievals and -// provisioned capacity are not available for the DEEP_ARCHIVE storage class. -// -// -// * Standard - S3 Standard retrievals allow you to access any of your archived -// objects within several hours. This is the default option for the GLACIER and -// DEEP_ARCHIVE retrieval requests that do not specify the retrieval option. S3 -// Standard retrievals typically complete within 3-5 hours from the GLACIER storage -// class and typically complete within 12 hours from the DEEP_ARCHIVE storage -// class. -// -// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost -// retrieval option, enabling you to retrieve large amounts, even petabytes, of -// data inexpensively in a day. Bulk retrievals typically complete within 5-12 -// hours from the GLACIER storage class and typically complete within 48 hours from -// the DEEP_ARCHIVE storage class. -// -// For more information about archive retrieval -// options and provisioned capacity for Expedited data access, see Restoring -// Archived Objects +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the GLACIER storage class when occasional urgent requests for a subset +// of archives are required. For all but the largest archived objects (250 MB+), +// data accessed using Expedited retrievals are typically made available within 1–5 +// minutes. Provisioned capacity ensures that retrieval capacity for Expedited +// retrievals is available when you need it. Expedited retrievals and provisioned +// capacity are not available for the DEEP_ARCHIVE storage class. +// +// * Standard - S3 +// Standard retrievals allow you to access any of your archived objects within +// several hours. This is the default option for the GLACIER and DEEP_ARCHIVE +// retrieval requests that do not specify the retrieval option. S3 Standard +// retrievals typically complete within 3-5 hours from the GLACIER storage class +// and typically complete within 12 hours from the DEEP_ARCHIVE storage class. +// +// * +// Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval option, +// enabling you to retrieve large amounts, even petabytes, of data inexpensively in +// a day. Bulk retrievals typically complete within 5-12 hours from the GLACIER +// storage class and typically complete within 48 hours from the DEEP_ARCHIVE +// storage class. +// +// For more information about archive retrieval options and +// provisioned capacity for Expedited data access, see Restoring Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the // Amazon Simple Storage Service Developer Guide. You can use Amazon S3 restore // speed upgrade to change the restore speed to a faster speed while it is in @@ -183,51 +182,50 @@ import ( // Amazon Simple Storage Service Developer Guide. Responses A successful operation // returns either the 200 OK or 202 Accepted status code. // -// * If the object copy -// is not previously restored, then Amazon S3 returns 202 Accepted in the -// response. +// * If the object copy is +// not previously restored, then Amazon S3 returns 202 Accepted in the response. // -// * If the object copy is previously restored, Amazon S3 returns -// 200 OK in the response. +// * +// If the object copy is previously restored, Amazon S3 returns 200 OK in the +// response. // // Special Errors // -// * Code: -// RestoreAlreadyInProgress +// * Code: RestoreAlreadyInProgress // -// * Cause: Object restore is already in -// progress. (This error does not apply to SELECT type requests.) +// * Cause: Object +// restore is already in progress. (This error does not apply to SELECT type +// requests.) // -// * HTTP -// Status Code: 409 Conflict +// * HTTP Status Code: 409 Conflict // -// * SOAP Fault Code Prefix: Client +// * SOAP Fault Code Prefix: +// Client // -// * -// Code: GlacierExpeditedRetrievalNotAvailable +// * Code: GlacierExpeditedRetrievalNotAvailable // -// * Cause: S3 Glacier +// * Cause: S3 Glacier // expedited retrievals are currently not available. Try again later. (Returned if // there is insufficient capacity to process the Expedited request. This error // applies only to Expedited retrievals and not to S3 Standard or Bulk // retrievals.) // -// * HTTP Status Code: 503 +// * HTTP Status Code: 503 // -// * SOAP Fault Code Prefix: -// N/A +// * SOAP Fault Code Prefix: N/A // -// Related Resources +// Related +// Resources // -// * PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // -// -// * GetBucketNotificationConfiguration +// * +// GetBucketNotificationConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // -// -// * SQL Reference for Amazon S3 Select and S3 Glacier Select +// * +// SQL Reference for Amazon S3 Select and S3 Glacier Select // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) // in the Amazon Simple Storage Service Developer Guide func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { diff --git a/service/s3/api_op_SelectObjectContent.go b/service/s3/api_op_SelectObjectContent.go index 806905f73c0..e29b1379b8e 100644 --- a/service/s3/api_op_SelectObjectContent.go +++ b/service/s3/api_op_SelectObjectContent.go @@ -33,25 +33,25 @@ import ( // use Amazon S3 Select to query objects that have the following format // properties: // -// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or -// Parquet format. +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet +// format. // -// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select -// supports. +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. // -// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP -// or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select -// supports for CSV and JSON files. Amazon S3 Select supports columnar compression -// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object -// compression for Parquet objects. +// * +// GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP +// and BZIP2 are the only compression formats that Amazon S3 Select supports for +// CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet +// using GZIP or Snappy. Amazon S3 Select does not support whole-object compression +// for Parquet objects. // -// * Server-side encryption - Amazon S3 -// Select supports querying objects that are protected with server-side encryption. -// For objects that are encrypted with customer-provided encryption keys (SSE-C), -// you must use HTTPS, and you must use the headers that are documented in the -// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) +// * Server-side encryption - Amazon S3 Select supports +// querying objects that are protected with server-side encryption. For objects +// that are encrypted with customer-provided encryption keys (SSE-C), you must use +// HTTPS, and you must use the headers that are documented in the GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). For more +// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon Simple Storage Service Developer Guide. For objects that are // encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer master @@ -72,16 +72,16 @@ import ( // following GetObject functionality. For more information, see GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // -// * -// Range: Although you can specify a scan range for an Amazon S3 Select request -// (see SelectObjectContentRequest - ScanRange +// * Range: +// Although you can specify a scan range for an Amazon S3 Select request (see +// SelectObjectContentRequest - ScanRange // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) // in the request parameters), you cannot specify the range of bytes of an object // to return. // -// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: -// You cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage -// classes. For more information, about storage classes see Storage Classes +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You +// cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) // in the Amazon Simple Storage Service Developer Guide. // @@ -91,15 +91,15 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // Related Resources // -// * GetObject +// * GetObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * +// * // GetBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // -// -// * PutBucketLifecycleConfiguration +// * +// PutBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { if params == nil { @@ -184,14 +184,14 @@ type SelectObjectContentInput struct { // 14.35.1 about how to specify the start and end of the range. ScanRangemay be // used in the following ways: // - // * 50100 - process only the records starting - // between the bytes 50 and 100 (inclusive, counting from zero) + // * 50100 - process only the records starting between + // the bytes 50 and 100 (inclusive, counting from zero) // - // * 50 - process - // only the records starting after the byte 50 + // * 50 - process only the + // records starting after the byte 50 // - // * 50 - process only the records - // within the last 50 bytes of the file. + // * 50 - process only the records within the + // last 50 bytes of the file. ScanRange *types.ScanRange } diff --git a/service/s3/api_op_UploadPart.go b/service/s3/api_op_UploadPart.go index cadad5fd147..17a8e2604fc 100644 --- a/service/s3/api_op_UploadPart.go +++ b/service/s3/api_op_UploadPart.go @@ -65,47 +65,47 @@ import ( // in your initiate multipart upload request, you must provide identical encryption // information in each part upload using the following headers. // -// * +// * // x-amz-server-side-encryption-customer-algorithm // -// * +// * // x-amz-server-side-encryption-customer-key // -// * +// * // x-amz-server-side-encryption-customer-key-MD5 // // Special Errors // -// * Code: +// * Code: // NoSuchUpload // -// * Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been aborted -// or completed. +// * Cause: The specified multipart upload does not exist. The upload +// ID might be invalid, or the multipart upload might have been aborted or +// completed. // -// * HTTP Status Code: 404 Not Found +// * HTTP Status Code: 404 Not Found // -// * SOAP Fault -// Code Prefix: Client +// * SOAP Fault Code Prefix: +// Client // // Related Resources // -// * CreateMultipartUpload +// * CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * CompleteMultipartUpload +// * +// CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * AbortMultipartUpload +// * +// AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // ListMultipartUploads // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { diff --git a/service/s3/api_op_UploadPartCopy.go b/service/s3/api_op_UploadPartCopy.go index 2ce0b19886e..435646bcbba 100644 --- a/service/s3/api_op_UploadPartCopy.go +++ b/service/s3/api_op_UploadPartCopy.go @@ -28,25 +28,25 @@ import ( // request. For more information about using the UploadPartCopy operation, see the // following: // -// * For conceptual information about multipart uploads, see -// Uploading Objects Using Multipart Upload +// * For conceptual information about multipart uploads, see Uploading +// Objects Using Multipart Upload // (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the // Amazon Simple Storage Service Developer Guide. // -// * For information about +// * For information about // permissions required to use the multipart upload API, see Multipart Upload API // and Permissions // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the // Amazon Simple Storage Service Developer Guide. // -// * For information about -// copying objects using a single atomic operation vs. the multipart upload, see -// Operations on Objects +// * For information about copying +// objects using a single atomic operation vs. the multipart upload, see Operations +// on Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the // Amazon Simple Storage Service Developer Guide. // -// * For information about -// using server-side encryption with customer-provided encryption keys with the +// * For information about using +// server-side encryption with customer-provided encryption keys with the // UploadPartCopy operation, see CopyObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) and // UploadPart @@ -58,13 +58,13 @@ import ( // x-amz-copy-source-if-unmodified-since, and // x-amz-copy-source-if-modified-since: // -// * Consideration 1 - If both of the +// * Consideration 1 - If both of the // x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are // present in the request as follows: x-amz-copy-source-if-match condition // evaluates to true, and; x-amz-copy-source-if-unmodified-since condition // evaluates to false; Amazon S3 returns 200 OK and copies the data. // -// * +// * // Consideration 2 - If both of the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request as // follows: x-amz-copy-source-if-none-match condition evaluates to false, and; @@ -83,46 +83,45 @@ import ( // the versionId subresource as shown in the following example: x-amz-copy-source: // /bucket/object?versionId=version id Special Errors // -// * Code: -// NoSuchUpload +// * Code: NoSuchUpload // -// * Cause: The specified multipart upload does not exist. -// The upload ID might be invalid, or the multipart upload might have been aborted -// or completed. +// * +// Cause: The specified multipart upload does not exist. The upload ID might be +// invalid, or the multipart upload might have been aborted or completed. // -// * HTTP Status Code: 404 Not Found +// * HTTP +// Status Code: 404 Not Found // -// * Code: -// InvalidRequest +// * Code: InvalidRequest // -// * Cause: The specified copy source is not supported as a -// byte-range copy source. +// * Cause: The specified copy +// source is not supported as a byte-range copy source. // -// * HTTP Status Code: 400 Bad Request +// * HTTP Status Code: 400 +// Bad Request // -// Related -// Resources +// Related Resources // -// * CreateMultipartUpload +// * CreateMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// -// * UploadPart +// * +// UploadPart // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * +// * // CompleteMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// -// * AbortMultipartUpload +// * +// AbortMultipartUpload // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// -// * ListParts +// * +// ListParts // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * +// * // ListMultipartUploads // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { @@ -167,13 +166,13 @@ type UploadPartCopyInput struct { // through an access point // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): // - // * For + // * For // objects not accessed through an access point, specify the name of the source // bucket and key of the source object, separated by a slash (/). For example, to // copy the object reports/january.pdf from the bucket awsexamplebucket, use // awsexamplebucket/reports/january.pdf. The value must be URL encoded. // - // * For + // * For // objects accessed through access points, specify the Amazon Resource Name (ARN) // of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/. For example, to copy the object diff --git a/service/s3/types/enums.go b/service/s3/types/enums.go index 8574bbcd23a..610c5e31c23 100644 --- a/service/s3/types/enums.go +++ b/service/s3/types/enums.go @@ -126,9 +126,9 @@ type BucketLogsPermission string // Enum values for BucketLogsPermission const ( - BucketLogsPermissionFull_control BucketLogsPermission = "FULL_CONTROL" - BucketLogsPermissionRead BucketLogsPermission = "READ" - BucketLogsPermissionWrite BucketLogsPermission = "WRITE" + BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL" + BucketLogsPermissionRead BucketLogsPermission = "READ" + BucketLogsPermissionWrite BucketLogsPermission = "WRITE" ) // Values returns all known values for BucketLogsPermission. Note that this can be @@ -657,14 +657,14 @@ type ObjectStorageClass string // Enum values for ObjectStorageClass const ( - ObjectStorageClassStandard ObjectStorageClass = "STANDARD" - ObjectStorageClassReduced_redundancy ObjectStorageClass = "REDUCED_REDUNDANCY" - ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" - ObjectStorageClassStandard_ia ObjectStorageClass = "STANDARD_IA" - ObjectStorageClassOnezone_ia ObjectStorageClass = "ONEZONE_IA" - ObjectStorageClassIntelligent_tiering ObjectStorageClass = "INTELLIGENT_TIERING" - ObjectStorageClassDeep_archive ObjectStorageClass = "DEEP_ARCHIVE" - ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" + ObjectStorageClassStandard ObjectStorageClass = "STANDARD" + ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY" + ObjectStorageClassGlacier ObjectStorageClass = "GLACIER" + ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA" + ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA" + ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING" + ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" + ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" ) // Values returns all known values for ObjectStorageClass. Note that this can be @@ -737,11 +737,11 @@ type Permission string // Enum values for Permission const ( - PermissionFull_control Permission = "FULL_CONTROL" - PermissionWrite Permission = "WRITE" - PermissionWrite_acp Permission = "WRITE_ACP" - PermissionRead Permission = "READ" - PermissionRead_acp Permission = "READ_ACP" + PermissionFullControl Permission = "FULL_CONTROL" + PermissionWrite Permission = "WRITE" + PermissionWriteAcp Permission = "WRITE_ACP" + PermissionRead Permission = "READ" + PermissionReadAcp Permission = "READ_ACP" ) // Values returns all known values for Permission. Note that this can be expanded @@ -939,14 +939,14 @@ type StorageClass string // Enum values for StorageClass const ( - StorageClassStandard StorageClass = "STANDARD" - StorageClassReduced_redundancy StorageClass = "REDUCED_REDUNDANCY" - StorageClassStandard_ia StorageClass = "STANDARD_IA" - StorageClassOnezone_ia StorageClass = "ONEZONE_IA" - StorageClassIntelligent_tiering StorageClass = "INTELLIGENT_TIERING" - StorageClassGlacier StorageClass = "GLACIER" - StorageClassDeep_archive StorageClass = "DEEP_ARCHIVE" - StorageClassOutposts StorageClass = "OUTPOSTS" + StorageClassStandard StorageClass = "STANDARD" + StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY" + StorageClassStandardIa StorageClass = "STANDARD_IA" + StorageClassOnezoneIa StorageClass = "ONEZONE_IA" + StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING" + StorageClassGlacier StorageClass = "GLACIER" + StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" + StorageClassOutposts StorageClass = "OUTPOSTS" ) // Values returns all known values for StorageClass. Note that this can be expanded @@ -969,7 +969,7 @@ type StorageClassAnalysisSchemaVersion string // Enum values for StorageClassAnalysisSchemaVersion const ( - StorageClassAnalysisSchemaVersionV_1 StorageClassAnalysisSchemaVersion = "V_1" + StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" ) // Values returns all known values for StorageClassAnalysisSchemaVersion. Note that @@ -1024,11 +1024,11 @@ type TransitionStorageClass string // Enum values for TransitionStorageClass const ( - TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" - TransitionStorageClassStandard_ia TransitionStorageClass = "STANDARD_IA" - TransitionStorageClassOnezone_ia TransitionStorageClass = "ONEZONE_IA" - TransitionStorageClassIntelligent_tiering TransitionStorageClass = "INTELLIGENT_TIERING" - TransitionStorageClassDeep_archive TransitionStorageClass = "DEEP_ARCHIVE" + TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" + TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" + TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" + TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" + TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" ) // Values returns all known values for TransitionStorageClass. Note that this can diff --git a/service/s3/types/types.go b/service/s3/types/types.go index 936e9e2c9d9..392886f96a9 100644 --- a/service/s3/types/types.go +++ b/service/s3/types/types.go @@ -313,16 +313,16 @@ type CSVInput struct { // Describes the first line of input. Valid values are: // - // * NONE: First line is - // not a header. + // * NONE: First line is not + // a header. // - // * IGNORE: First line is a header, but you can't use the - // header values to indicate the column in an expression. You can use column - // position (such as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT - // s). + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such as + // _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). // - // * Use: First line is a header, and you can use the header value to - // identify a column in an expression (SELECT "name" FROM OBJECT). + // * Use: First + // line is a header, and you can use the header value to identify a column in an + // expression (SELECT "name" FROM OBJECT). FileHeaderInfo FileHeaderInfo // A single character used for escaping when the field delimiter is part of the @@ -359,11 +359,11 @@ type CSVOutput struct { // Indicates whether to use quotation marks around output fields. // - // * ALWAYS: - // Always use quotation marks for output fields. + // * ALWAYS: Always + // use quotation marks for output fields. // - // * ASNEEDED: Use quotation - // marks for output fields when needed. + // * ASNEEDED: Use quotation marks for + // output fields when needed. QuoteFields QuoteFields // A single character used to separate individual records in the output. Instead of @@ -556,866 +556,838 @@ type Error struct { // meant to be read and understood by programs that detect and handle errors by // type. Amazon S3 error codes // - // * Code: AccessDenied + // * Code: AccessDenied // - // * - // Description: Access Denied + // * Description: Access + // Denied // - // * HTTP Status Code: 403 Forbidden + // * HTTP Status Code: 403 Forbidden // - // * - // SOAP Fault Code Prefix: Client - // - // * Code: AccountProblem - // - // * - // Description: There is a problem with your AWS account that prevents the - // operation from completing successfully. Contact AWS Support for further - // assistance. + // * SOAP Fault Code Prefix: Client // - // * HTTP Status Code: 403 Forbidden + // * + // Code: AccountProblem // - // * SOAP Fault - // Code Prefix: Client + // * Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS Support + // for further assistance. // - // * Code: AllAccessDisabled + // * HTTP Status Code: 403 Forbidden // - // * Description: - // All access to this Amazon S3 resource has been disabled. Contact AWS Support for - // further assistance. + // * SOAP Fault Code + // Prefix: Client // - // * HTTP Status Code: 403 Forbidden + // * Code: AllAccessDisabled // - // * SOAP - // Fault Code Prefix: Client + // * Description: All access to this + // Amazon S3 resource has been disabled. Contact AWS Support for further + // assistance. // - // * Code: AmbiguousGrantByEmailAddress + // * HTTP Status Code: 403 Forbidden // + // * SOAP Fault Code Prefix: + // Client // - // * Description: The email address you provided is associated with more than one - // account. + // * Code: AmbiguousGrantByEmailAddress // - // * HTTP Status Code: 400 Bad Request + // * Description: The email address + // you provided is associated with more than one account. // - // * SOAP Fault Code - // Prefix: Client + // * HTTP Status Code: 400 + // Bad Request // - // * Code: AuthorizationHeaderMalformed + // * SOAP Fault Code Prefix: Client // - // * - // Description: The authorization header you provided is invalid. + // * Code: + // AuthorizationHeaderMalformed // - // * HTTP - // Status Code: 400 Bad Request + // * Description: The authorization header you + // provided is invalid. // - // * HTTP Status Code: N/A + // * HTTP Status Code: 400 Bad Request // - // * Code: - // BadDigest + // * HTTP Status Code: + // N/A // - // * Description: The Content-MD5 you specified did not match - // what we received. + // * Code: BadDigest // - // * HTTP Status Code: 400 Bad Request + // * Description: The Content-MD5 you specified did not + // match what we received. // - // * SOAP - // Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad Request // - // * Code: BucketAlreadyExists + // * SOAP Fault Code + // Prefix: Client // - // * - // Description: The requested bucket name is not available. The bucket namespace is - // shared by all users of the system. Please select a different name and try - // again. + // * Code: BucketAlreadyExists // - // * HTTP Status Code: 409 Conflict + // * Description: The requested bucket + // name is not available. The bucket namespace is shared by all users of the + // system. Please select a different name and try again. // - // * SOAP Fault Code - // Prefix: Client + // * HTTP Status Code: 409 + // Conflict // - // * Code: BucketAlreadyOwnedByYou + // * SOAP Fault Code Prefix: Client // - // * Description: - // The bucket you tried to create already exists, and you own it. Amazon S3 returns - // this error in all AWS Regions except in the North Virginia Region. For legacy - // compatibility, if you re-create an existing bucket that you already own in the - // North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access - // control lists (ACLs). + // * Code: BucketAlreadyOwnedByYou // - // * Code: 409 Conflict (in all Regions except the - // North Virginia Region) + // * + // Description: The bucket you tried to create already exists, and you own it. + // Amazon S3 returns this error in all AWS Regions except in the North Virginia + // Region. For legacy compatibility, if you re-create an existing bucket that you + // already own in the North Virginia Region, Amazon S3 returns 200 OK and resets + // the bucket access control lists (ACLs). // - // * SOAP Fault Code Prefix: Client + // * Code: 409 Conflict (in all Regions + // except the North Virginia Region) // - // * - // Code: BucketNotEmpty + // * SOAP Fault Code Prefix: Client // - // * Description: The bucket you tried to delete is - // not empty. + // * Code: + // BucketNotEmpty // - // * HTTP Status Code: 409 Conflict + // * Description: The bucket you tried to delete is not empty. // - // * SOAP Fault Code - // Prefix: Client + // * + // HTTP Status Code: 409 Conflict // - // * Code: CredentialsNotSupported + // * SOAP Fault Code Prefix: Client // - // * Description: - // This request does not support credentials. + // * Code: + // CredentialsNotSupported // - // * HTTP Status Code: 400 Bad - // Request + // * Description: This request does not support + // credentials. // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad Request // - // * Code: - // CrossLocationLoggingProhibited + // * SOAP Fault Code Prefix: + // Client // - // * Description: Cross-location logging - // not allowed. Buckets in one geographic location cannot log information to a - // bucket in another location. + // * Code: CrossLocationLoggingProhibited // - // * HTTP Status Code: 403 Forbidden + // * Description: Cross-location + // logging not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. // + // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: Client + // * SOAP + // Fault Code Prefix: Client // - // * Code: EntityTooSmall + // * Code: EntityTooSmall // - // * - // Description: Your proposed upload is smaller than the minimum allowed object - // size. + // * Description: Your proposed + // upload is smaller than the minimum allowed object size. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 + // Bad Request // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: EntityTooLarge + // * Code: EntityTooLarge // - // * Description: Your - // proposed upload exceeds the maximum allowed object size. + // * + // Description: Your proposed upload exceeds the maximum allowed object size. // - // * HTTP Status - // Code: 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // ExpiredToken // - // * Description: The provided token has expired. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client + // * Description: The provided token has expired. // + // * HTTP Status + // Code: 400 Bad Request // - // * Code: IllegalVersioningConfigurationException + // * SOAP Fault Code Prefix: Client // - // * Description: - // Indicates that the versioning configuration specified in the request is - // invalid. + // * Code: + // IllegalVersioningConfigurationException // - // * HTTP Status Code: 400 Bad Request + // * Description: Indicates that the + // versioning configuration specified in the request is invalid. // - // * SOAP Fault Code - // Prefix: Client + // * HTTP Status + // Code: 400 Bad Request // - // * Code: IncompleteBody + // * SOAP Fault Code Prefix: Client // - // * Description: You did - // not provide the number of bytes specified by the Content-Length HTTP header + // * Code: + // IncompleteBody // + // * Description: You did not provide the number of bytes specified + // by the Content-Length HTTP header // // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client - // + // * SOAP + // Fault Code Prefix: Client // // * Code: IncorrectNumberOfFilesInPostRequest // - // * Description: POST - // requires exactly one file upload per request. + // * + // Description: POST requires exactly one file upload per request. // - // * HTTP Status Code: 400 - // Bad Request + // * HTTP Status + // Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // InlineDataTooLarge // - // * Description: Inline data exceeds the maximum - // allowed size. + // * Description: Inline data exceeds the maximum allowed + // size. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault - // Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InternalError + // * + // Code: InternalError // - // * Description: We - // encountered an internal error. Please try again. + // * Description: We encountered an internal error. Please try + // again. // - // * HTTP Status Code: - // 500 Internal Server Error + // * HTTP Status Code: 500 Internal Server Error // - // * SOAP Fault Code Prefix: Server + // * SOAP Fault Code Prefix: + // Server // - // * - // Code: InvalidAccessKeyId + // * Code: InvalidAccessKeyId // - // * Description: The AWS access key ID you + // * Description: The AWS access key ID you // provided does not exist in our records. // - // * HTTP Status Code: 403 - // Forbidden + // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidAddressingHeader + // * + // SOAP Fault Code Prefix: Client // - // * Description: You must specify the Anonymous - // role. + // * Code: InvalidAddressingHeader // - // * HTTP Status Code: N/A + // * Description: + // You must specify the Anonymous role. // - // * SOAP Fault Code Prefix: - // Client + // * HTTP Status Code: N/A // - // * Code: InvalidArgument + // * SOAP Fault Code + // Prefix: Client // - // * Description: Invalid - // Argument + // * Code: InvalidArgument // - // * HTTP Status Code: 400 Bad Request + // * Description: Invalid Argument // - // * SOAP Fault Code - // Prefix: Client + // * HTTP + // Status Code: 400 Bad Request // - // * Code: InvalidBucketName + // * SOAP Fault Code Prefix: Client // - // * Description: The - // specified bucket is not valid. + // * Code: + // InvalidBucketName // - // * HTTP Status Code: 400 Bad Request + // * Description: The specified bucket is not valid. // + // * HTTP + // Status Code: 400 Bad Request // // * SOAP Fault Code Prefix: Client // - // * Code: InvalidBucketState - // - // * - // Description: The request is not valid with the current state of the bucket. + // * Code: + // InvalidBucketState // + // * Description: The request is not valid with the current + // state of the bucket. // // * HTTP Status Code: 409 Conflict // - // * SOAP Fault Code Prefix: Client - // + // * SOAP Fault Code + // Prefix: Client // // * Code: InvalidDigest // - // * Description: The Content-MD5 you specified is - // not valid. + // * Description: The Content-MD5 you + // specified is not valid. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault - // Code Prefix: Client + // * SOAP Fault Code + // Prefix: Client // - // * Code: InvalidEncryptionAlgorithmError + // * Code: InvalidEncryptionAlgorithmError // - // * - // Description: The encryption request you specified is not valid. The valid value - // is AES256. + // * Description: The + // encryption request you specified is not valid. The valid value is AES256. // - // * HTTP Status Code: 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault - // Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidLocationConstraint + // * Code: + // InvalidLocationConstraint // - // * - // Description: The specified location constraint is not valid. For more - // information about Regions, see How to Select a Region for Your Buckets + // * Description: The specified location constraint is + // not valid. For more information about Regions, see How to Select a Region for + // Your Buckets // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // + // * + // HTTP Status Code: 400 Bad Request // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidObjectState + // * Code: + // InvalidObjectState // - // * Description: The operation is not valid - // for the current state of the object. + // * Description: The operation is not valid for the current + // state of the object. // - // * HTTP Status Code: 403 - // Forbidden + // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code + // Prefix: Client // - // * Code: - // InvalidPart + // * Code: InvalidPart // - // * Description: One or more of the specified parts could not - // be found. The part might not have been uploaded, or the specified entity tag - // might not have matched the part's entity tag. + // * Description: One or more of the specified + // parts could not be found. The part might not have been uploaded, or the + // specified entity tag might not have matched the part's entity tag. // - // * HTTP Status Code: 400 - // Bad Request + // * HTTP + // Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // InvalidPartOrder // - // * Description: The list of parts was not in ascending - // order. Parts list must be specified in order by part number. - // - // * HTTP - // Status Code: 400 Bad Request + // * Description: The list of parts was not in ascending order. + // Parts list must be specified in order by part number. // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 + // Bad Request // + // * SOAP Fault Code Prefix: Client // // * Code: InvalidPayer // - // * Description: All access to this object has been - // disabled. Please contact AWS Support for further assistance. + // * + // Description: All access to this object has been disabled. Please contact AWS + // Support for further assistance. // - // * HTTP - // Status Code: 403 Forbidden + // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault + // Code Prefix: Client // - // * - // Code: InvalidPolicyDocument + // * Code: InvalidPolicyDocument // - // * Description: The content of the form does - // not meet the conditions specified in the policy document. + // * Description: The content + // of the form does not meet the conditions specified in the policy document. // - // * HTTP Status - // Code: 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // InvalidRange // - // * Description: The requested range cannot be satisfied. - // + // * Description: The requested range cannot be satisfied. // - // * HTTP Status Code: 416 Requested Range Not Satisfiable + // * HTTP + // Status Code: 416 Requested Range Not Satisfiable // - // * SOAP Fault - // Code Prefix: Client - // - // * Code: InvalidRequest + // * SOAP Fault Code Prefix: + // Client // - // * Description: - // Please use AWS4-HMAC-SHA256. + // * Code: InvalidRequest // - // * HTTP Status Code: 400 Bad Request + // * Description: Please use AWS4-HMAC-SHA256. // + // * + // HTTP Status Code: 400 Bad Request // // * Code: N/A // - // * Code: InvalidRequest + // * Code: InvalidRequest // - // * Description: SOAP - // requests must be made over an HTTPS connection. + // * + // Description: SOAP requests must be made over an HTTPS connection. // - // * HTTP Status Code: 400 - // Bad Request + // * HTTP Status + // Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // InvalidRequest // - // * Description: Amazon S3 Transfer Acceleration is not - // supported for buckets with non-DNS compliant names. + // * Description: Amazon S3 Transfer Acceleration is not supported + // for buckets with non-DNS compliant names. // - // * HTTP Status Code: - // 400 Bad Request - // - // * Code: N/A + // * HTTP Status Code: 400 Bad + // Request // - // * Code: InvalidRequest + // * Code: N/A // - // * - // Description: Amazon S3 Transfer Acceleration is not supported for buckets with - // periods (.) in their names. + // * Code: InvalidRequest // - // * HTTP Status Code: 400 Bad Request + // * Description: Amazon S3 Transfer + // Acceleration is not supported for buckets with periods (.) in their names. // + // * + // HTTP Status Code: 400 Bad Request // // * Code: N/A // - // * Code: InvalidRequest + // * Code: InvalidRequest // - // * Description: Amazon S3 - // Transfer Accelerate endpoint only supports virtual style requests. + // * + // Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style + // requests. // - // * - // HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // - // * Code: N/A + // * Code: N/A // - // * Code: + // * Code: // InvalidRequest // - // * Description: Amazon S3 Transfer Accelerate is not - // configured on this bucket. - // - // * HTTP Status Code: 400 Bad Request + // * Description: Amazon S3 Transfer Accelerate is not configured + // on this bucket. // + // * HTTP Status Code: 400 Bad Request // // * Code: N/A // - // * Code: InvalidRequest + // * Code: + // InvalidRequest // - // * Description: Amazon S3 - // Transfer Accelerate is disabled on this bucket. + // * Description: Amazon S3 Transfer Accelerate is disabled on this + // bucket. // - // * HTTP Status Code: 400 - // Bad Request + // * HTTP Status Code: 400 Bad Request // - // * Code: N/A + // * Code: N/A // - // * Code: InvalidRequest + // * Code: + // InvalidRequest // - // * - // Description: Amazon S3 Transfer Acceleration is not supported on this bucket. - // Contact AWS Support for more information. + // * Description: Amazon S3 Transfer Acceleration is not supported + // on this bucket. Contact AWS Support for more information. // - // * HTTP Status Code: 400 Bad - // Request + // * HTTP Status Code: + // 400 Bad Request // - // * Code: N/A + // * Code: N/A // - // * Code: InvalidRequest + // * Code: InvalidRequest // - // * - // Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. - // Contact AWS Support for more information. + // * Description: Amazon S3 + // Transfer Acceleration cannot be enabled on this bucket. Contact AWS Support for + // more information. // - // * HTTP Status Code: 400 Bad - // Request + // * HTTP Status Code: 400 Bad Request // - // * Code: N/A + // * Code: N/A // - // * Code: InvalidSecurity + // * Code: + // InvalidSecurity // - // * - // Description: The provided security credentials are not valid. + // * Description: The provided security credentials are not + // valid. // - // * HTTP - // Status Code: 403 Forbidden + // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * + // * // Code: InvalidSOAPRequest // - // * Description: The SOAP request body is - // invalid. + // * Description: The SOAP request body is invalid. // - // * HTTP Status Code: 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: InvalidStorageClass + // * Code: + // InvalidStorageClass // - // * Description: The - // storage class you specified is not valid. + // * Description: The storage class you specified is not + // valid. // - // * HTTP Status Code: 400 Bad - // Request + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: - // InvalidTargetBucketForLogging + // * + // Code: InvalidTargetBucketForLogging // - // * Description: The target bucket for + // * Description: The target bucket for // logging does not exist, is not owned by you, or does not have the appropriate // grants for the log-delivery group. // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad Request // - // * Code: - // InvalidToken + // * SOAP + // Fault Code Prefix: Client // - // * Description: The provided token is malformed or - // otherwise invalid. + // * Code: InvalidToken // - // * HTTP Status Code: 400 Bad Request + // * Description: The provided + // token is malformed or otherwise invalid. // - // * SOAP - // Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad Request // - // * Code: InvalidURI + // * + // SOAP Fault Code Prefix: Client // - // * Description: - // Couldn't parse the specified URI. + // * Code: InvalidURI // - // * HTTP Status Code: 400 Bad Request + // * Description: Couldn't + // parse the specified URI. // + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code + // Prefix: Client // - // * Code: KeyTooLongError + // * Code: KeyTooLongError // - // * - // Description: Your key is too long. + // * Description: Your key is too long. // - // * HTTP Status Code: 400 Bad - // Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // MalformedACLError // - // * Description: The XML you provided was not - // well-formed or did not validate against our published schema. + // * Description: The XML you provided was not well-formed or + // did not validate against our published schema. // - // * HTTP - // Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad + // Request // + // * SOAP Fault Code Prefix: Client // // * Code: MalformedPOSTRequest // - // * Description: The body of your POST - // request is not well-formed multipart/form-data. - // - // * HTTP Status Code: 400 - // Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // MalformedXML - // - // * Description: This happens when the user sends malformed - // XML (XML that doesn't conform to the published XSD) for the configuration. The - // error message is, "The XML you provided was not well-formed or did not validate - // against our published schema." + // * + // Description: The body of your POST request is not well-formed + // multipart/form-data. // - // * HTTP Status Code: 400 Bad Request - // - // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad Request // - // * Code: MaxMessageLengthExceeded + // * SOAP Fault Code + // Prefix: Client // + // * Code: MalformedXML // - // * Description: Your request was too big. + // * Description: This happens when the user + // sends malformed XML (XML that doesn't conform to the published XSD) for the + // configuration. The error message is, "The XML you provided was not well-formed + // or did not validate against our published schema." // - // * HTTP Status Code: 400 Bad + // * HTTP Status Code: 400 Bad // Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: - // MaxPostPreDataLengthExceededError + // * Code: MaxMessageLengthExceeded // - // * Description: Your POST request - // fields preceding the upload file were too large. + // * + // Description: Your request was too big. // - // * HTTP Status Code: - // 400 Bad Request - // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad Request // - // * Code: - // MetadataTooLarge + // * + // SOAP Fault Code Prefix: Client // - // * Description: Your metadata headers exceed the - // maximum allowed metadata size. + // * Code: MaxPostPreDataLengthExceededError // - // * HTTP Status Code: 400 Bad Request + // * + // Description: Your POST request fields preceding the upload file were too + // large. // + // * HTTP Status Code: 400 Bad Request // // * SOAP Fault Code Prefix: Client // - // * Code: MethodNotAllowed + // * + // Code: MetadataTooLarge // - // * - // Description: The specified method is not allowed against this resource. + // * Description: Your metadata headers exceed the maximum + // allowed metadata size. // + // * HTTP Status Code: 400 Bad Request // - // * HTTP Status Code: 405 Method Not Allowed - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: MissingAttachment + // * SOAP Fault Code + // Prefix: Client // - // * Description: A SOAP - // attachment was expected, but none were found. + // * Code: MethodNotAllowed // - // * HTTP Status Code: N/A + // * Description: The specified method is + // not allowed against this resource. // + // * HTTP Status Code: 405 Method Not + // Allowed // // * SOAP Fault Code Prefix: Client // - // * Code: MissingContentLength + // * Code: MissingAttachment // + // * + // Description: A SOAP attachment was expected, but none were found. // - // * Description: You must provide the Content-Length HTTP header. + // * HTTP Status + // Code: N/A // - // * HTTP - // Status Code: 411 Length Required + // * SOAP Fault Code Prefix: Client // - // * SOAP Fault Code Prefix: Client + // * Code: MissingContentLength // + // * + // Description: You must provide the Content-Length HTTP header. // - // * Code: MissingRequestBodyError + // * HTTP Status + // Code: 411 Length Required // - // * Description: This happens when the - // user sends an empty XML document as a request. The error message is, "Request - // body is empty." + // * SOAP Fault Code Prefix: Client // - // * HTTP Status Code: 400 Bad Request + // * Code: + // MissingRequestBodyError // - // * SOAP - // Fault Code Prefix: Client + // * Description: This happens when the user sends an + // empty XML document as a request. The error message is, "Request body is + // empty." // - // * Code: MissingSecurityElement - // - // * - // Description: The SOAP 1.1 request is missing a security element. + // * HTTP Status Code: 400 Bad Request // - // * HTTP - // Status Code: 400 Bad Request + // * SOAP Fault Code Prefix: + // Client // - // * SOAP Fault Code Prefix: Client + // * Code: MissingSecurityElement // + // * Description: The SOAP 1.1 request is + // missing a security element. // - // * Code: MissingSecurityHeader + // * HTTP Status Code: 400 Bad Request // - // * Description: Your request is missing a - // required header. + // * SOAP Fault + // Code Prefix: Client // - // * HTTP Status Code: 400 Bad Request + // * Code: MissingSecurityHeader // - // * SOAP - // Fault Code Prefix: Client + // * Description: Your request + // is missing a required header. // - // * Code: NoLoggingStatusForKey + // * HTTP Status Code: 400 Bad Request // - // * - // Description: There is no such thing as a logging status subresource for a key. + // * SOAP Fault + // Code Prefix: Client // + // * Code: NoLoggingStatusForKey // - // * HTTP Status Code: 400 Bad Request + // * Description: There is no + // such thing as a logging status subresource for a key. // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 + // Bad Request // + // * SOAP Fault Code Prefix: Client // // * Code: NoSuchBucket // - // * Description: The specified bucket does not - // exist. + // * + // Description: The specified bucket does not exist. // - // * HTTP Status Code: 404 Not Found + // * HTTP Status Code: 404 Not + // Found // - // * SOAP Fault Code - // Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: NoSuchBucketPolicy + // * Code: NoSuchBucketPolicy // - // * Description: The - // specified bucket does not have a bucket policy. + // * + // Description: The specified bucket does not have a bucket policy. // - // * HTTP Status Code: 404 - // Not Found + // * HTTP Status + // Code: 404 Not Found // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: NoSuchKey + // * Code: NoSuchKey // + // * + // Description: The specified key does not exist. // - // * Description: The specified key does not exist. + // * HTTP Status Code: 404 Not + // Found // - // * HTTP Status Code: - // 404 Not Found - // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: - // NoSuchLifecycleConfiguration + // * Code: NoSuchLifecycleConfiguration // - // * Description: The lifecycle configuration - // does not exist. + // * + // Description: The lifecycle configuration does not exist. // - // * HTTP Status Code: 404 Not Found + // * HTTP Status Code: + // 404 Not Found // - // * SOAP Fault - // Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: NoSuchUpload + // * Code: NoSuchUpload // - // * Description: The - // specified multipart upload does not exist. The upload ID might be invalid, or - // the multipart upload might have been aborted or completed. + // * + // Description: The specified multipart upload does not exist. The upload ID might + // be invalid, or the multipart upload might have been aborted or completed. // - // * HTTP - // Status Code: 404 Not Found + // * + // HTTP Status Code: 404 Not Found // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * - // Code: NoSuchVersion + // * Code: + // NoSuchVersion // - // * Description: Indicates that the version ID - // specified in the request does not match an existing version. + // * Description: Indicates that the version ID specified in the + // request does not match an existing version. // - // * HTTP - // Status Code: 404 Not Found + // * HTTP Status Code: 404 Not + // Found // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * - // Code: NotImplemented + // * Code: NotImplemented // - // * Description: A header you provided implies - // functionality that is not implemented. + // * Description: + // A header you provided implies functionality that is not implemented. // - // * HTTP Status Code: 501 Not - // Implemented + // * HTTP + // Status Code: 501 Not Implemented // - // * SOAP Fault Code Prefix: Server + // * SOAP Fault Code Prefix: Server // - // * Code: + // * Code: // NotSignedUp // - // * Description: Your account is not signed up for the Amazon - // S3 service. You must sign up before you can use Amazon S3. You can sign up at - // the following URL: https://aws.amazon.com/s3 + // * Description: Your account is not signed up for the Amazon S3 + // service. You must sign up before you can use Amazon S3. You can sign up at the + // following URL: https://aws.amazon.com/s3 // - // * HTTP Status Code: 403 - // Forbidden + // * HTTP Status Code: 403 Forbidden // - // * SOAP Fault Code Prefix: Client + // * + // SOAP Fault Code Prefix: Client // - // * Code: - // OperationAborted + // * Code: OperationAborted // - // * Description: A conflicting conditional operation is - // currently in progress against this resource. Try again. + // * Description: A + // conflicting conditional operation is currently in progress against this + // resource. Try again. // - // * HTTP Status - // Code: 409 Conflict + // * HTTP Status Code: 409 Conflict // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code + // Prefix: Client // - // * Code: - // PermanentRedirect + // * Code: PermanentRedirect // - // * Description: The bucket you are attempting to - // access must be addressed using the specified endpoint. Send all future requests - // to this endpoint. + // * Description: The bucket you are + // attempting to access must be addressed using the specified endpoint. Send all + // future requests to this endpoint. // - // * HTTP Status Code: 301 Moved Permanently + // * HTTP Status Code: 301 Moved Permanently // - // * + // * // SOAP Fault Code Prefix: Client // - // * Code: PreconditionFailed - // - // * - // Description: At least one of the preconditions you specified did not hold. + // * Code: PreconditionFailed // + // * Description: At + // least one of the preconditions you specified did not hold. // - // * HTTP Status Code: 412 Precondition Failed + // * HTTP Status Code: + // 412 Precondition Failed // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: Redirect + // * SOAP Fault Code Prefix: Client // - // * Description: Temporary redirect. + // * Code: Redirect // + // * + // Description: Temporary redirect. // // * HTTP Status Code: 307 Moved Temporarily // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: RestoreAlreadyInProgress - // - // * Description: Object - // restore is already in progress. - // - // * HTTP Status Code: 409 Conflict - // - // - // * SOAP Fault Code Prefix: Client + // * + // SOAP Fault Code Prefix: Client // - // * Code: RequestIsNotMultiPartContent + // * Code: RestoreAlreadyInProgress // + // * Description: + // Object restore is already in progress. // - // * Description: Bucket POST must be of the enclosure-type multipart/form-data. + // * HTTP Status Code: 409 Conflict // + // * SOAP + // Fault Code Prefix: Client // - // * HTTP Status Code: 400 Bad Request + // * Code: RequestIsNotMultiPartContent // - // * SOAP Fault Code Prefix: Client + // * Description: + // Bucket POST must be of the enclosure-type multipart/form-data. // + // * HTTP Status + // Code: 400 Bad Request // - // * Code: RequestTimeout + // * SOAP Fault Code Prefix: Client // - // * Description: Your socket connection to the - // server was not read from or written to within the timeout period. + // * Code: + // RequestTimeout // - // * - // HTTP Status Code: 400 Bad Request + // * Description: Your socket connection to the server was not read + // from or written to within the timeout period. // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 400 Bad + // Request // + // * SOAP Fault Code Prefix: Client // // * Code: RequestTimeTooSkewed // - // * Description: The difference between the - // request time and the server's time is too large. - // - // * HTTP Status Code: - // 403 Forbidden + // * + // Description: The difference between the request time and the server's time is + // too large. // - // * SOAP Fault Code Prefix: Client + // * HTTP Status Code: 403 Forbidden // - // * Code: - // RequestTorrentOfBucketError + // * SOAP Fault Code Prefix: + // Client // - // * Description: Requesting the torrent file - // of a bucket is not permitted. + // * Code: RequestTorrentOfBucketError // - // * HTTP Status Code: 400 Bad Request + // * Description: Requesting the + // torrent file of a bucket is not permitted. // + // * HTTP Status Code: 400 Bad + // Request // // * SOAP Fault Code Prefix: Client // - // * Code: SignatureDoesNotMatch + // * Code: SignatureDoesNotMatch // - // - // * Description: The request signature we calculated does not match the signature + // * + // Description: The request signature we calculated does not match the signature // you provided. Check your AWS secret access key and signing method. For more // information, see REST Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) and @@ -1423,94 +1395,91 @@ type Error struct { // (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) for // details. // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP Fault Code - // Prefix: Client + // * HTTP Status Code: 403 Forbidden // - // * Code: ServiceUnavailable + // * SOAP Fault Code Prefix: Client // - // * Description: - // Reduce your request rate. + // * + // Code: ServiceUnavailable // - // * HTTP Status Code: 503 Service Unavailable + // * Description: Reduce your request rate. // + // * HTTP + // Status Code: 503 Service Unavailable // // * SOAP Fault Code Prefix: Server // - // * Code: SlowDown + // * Code: + // SlowDown // - // * - // Description: Reduce your request rate. + // * Description: Reduce your request rate. // - // * HTTP Status Code: 503 Slow + // * HTTP Status Code: 503 Slow // Down // - // * SOAP Fault Code Prefix: Server + // * SOAP Fault Code Prefix: Server // - // * Code: - // TemporaryRedirect + // * Code: TemporaryRedirect // - // * Description: You are being redirected to the bucket - // while DNS updates. + // * + // Description: You are being redirected to the bucket while DNS updates. // - // * HTTP Status Code: 307 Moved Temporarily + // * HTTP + // Status Code: 307 Moved Temporarily // - // * - // SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: TokenRefreshRequired + // * Code: + // TokenRefreshRequired // - // * - // Description: The provided token must be refreshed. + // * Description: The provided token must be refreshed. // - // * HTTP Status Code: - // 400 Bad Request + // * + // HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // TooManyBuckets // - // * Description: You have attempted to create more buckets - // than allowed. + // * Description: You have attempted to create more buckets than + // allowed. // - // * HTTP Status Code: 400 Bad Request + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault - // Code Prefix: Client + // * SOAP Fault Code Prefix: + // Client // - // * Code: UnexpectedContent + // * Code: UnexpectedContent // - // * Description: - // This request does not support content. + // * Description: This request does not support + // content. // - // * HTTP Status Code: 400 Bad - // Request + // * HTTP Status Code: 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: + // Client // - // * Code: - // UnresolvableGrantByEmailAddress + // * Code: UnresolvableGrantByEmailAddress // - // * Description: The email address you - // provided does not match any account on record. + // * Description: The email + // address you provided does not match any account on record. // - // * HTTP Status Code: 400 - // Bad Request + // * HTTP Status Code: + // 400 Bad Request // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client // - // * Code: + // * Code: // UserKeyMustBeSpecified // - // * Description: The bucket POST must contain the + // * Description: The bucket POST must contain the // specified field name. If it is specified, check the order of the fields. // + // * HTTP + // Status Code: 400 Bad Request // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client + // * SOAP Fault Code Prefix: Client Code *string // The error key. @@ -1598,27 +1567,27 @@ type Grantee struct { // Email address of the grantee. Using email addresses to specify a grantee is only // supported in the following AWS Regions: // - // * US East (N. Virginia) + // * US East (N. Virginia) // - // * US - // West (N. California) + // * US West (N. + // California) // - // * US West (Oregon) + // * US West (Oregon) // - // * Asia Pacific (Singapore) + // * Asia Pacific (Singapore) // + // * Asia Pacific + // (Sydney) // - // * Asia Pacific (Sydney) + // * Asia Pacific (Tokyo) // - // * Asia Pacific (Tokyo) + // * Europe (Ireland) // - // * Europe (Ireland) + // * South America (São + // Paulo) // - // - // * South America (São Paulo) - // - // For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints + // For a list of all the Amazon S3 supported Regions and endpoints, see + // Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the AWS // General Reference. EmailAddress *string @@ -2119,19 +2088,18 @@ type Object struct { // digest of the object data. Whether or not it is depends on how the object was // created and how it is encrypted as described below: // - // * Objects created by - // the PUT Object, POST Object, or Copy operation, or through the AWS Management + // * Objects created by the + // PUT Object, POST Object, or Copy operation, or through the AWS Management // Console, and are encrypted by SSE-S3 or plaintext, have ETags that are an MD5 // digest of their object data. // - // * Objects created by the PUT Object, POST - // Object, or Copy operation, or through the AWS Management Console, and are - // encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their - // object data. + // * Objects created by the PUT Object, POST Object, + // or Copy operation, or through the AWS Management Console, and are encrypted by + // SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object data. // - // * If an object is created by either the Multipart Upload or - // Part Copy operation, the ETag is not an MD5 digest, regardless of the method of - // encryption. + // * + // If an object is created by either the Multipart Upload or Part Copy operation, + // the ETag is not an MD5 digest, regardless of the method of encryption. ETag *string // The name that you assign to an object. You use the object key to retrieve the @@ -2337,13 +2305,13 @@ type PublicAccessBlockConfiguration struct { // this bucket and objects in this bucket. Setting this element to TRUE causes the // following behavior: // - // * PUT Bucket acl and PUT Object acl calls fail if the + // * PUT Bucket acl and PUT Object acl calls fail if the // specified ACL is public. // - // * PUT Object calls fail if the request includes a + // * PUT Object calls fail if the request includes a // public ACL. // - // * PUT Bucket calls fail if the request includes a public + // * PUT Bucket calls fail if the request includes a public // ACL. // // Enabling this setting doesn't affect existing policies or ACLs. @@ -2512,14 +2480,14 @@ type ReplicationRule struct { // The priority associated with the rule. If you specify multiple rules in a // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts // when filtering. If two or more rules identify the same object based on a - // specified filter, the rule with higher priority takes precedence. For example: - // + // specified filter, the rule with higher priority takes precedence. For + // example: // - // * Same object quality prefix-based filter criteria if prefixes you specified in - // multiple rules overlap + // * Same object quality prefix-based filter criteria if prefixes you + // specified in multiple rules overlap // - // * Same object qualify tag-based filter criteria - // specified in multiple rules + // * Same object qualify tag-based filter + // criteria specified in multiple rules // // For more information, see Replication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon @@ -2538,11 +2506,11 @@ type ReplicationRule struct { // objects to which the rule applies. This element is required only if you specify // more than one filter. For example: // -// * If you specify both a Prefix and a Tag +// * If you specify both a Prefix and a Tag // filter, wrap these filters in an And tag. // -// * If you specify a filter based -// on multiple tags, wrap the Tag elements in an And tag +// * If you specify a filter based on +// multiple tags, wrap the Tag elements in an And tag type ReplicationRuleAndOperator struct { // An object key name prefix that identifies the subset of objects to which the @@ -2561,11 +2529,11 @@ type ReplicationRuleFilter struct { // objects to which the rule applies. This element is required only if you specify // more than one filter. For example: // - // * If you specify both a Prefix and a Tag + // * If you specify both a Prefix and a Tag // filter, wrap these filters in an And tag. // - // * If you specify a filter based - // on multiple tags, wrap the Tag elements in an And tag. + // * If you specify a filter based on + // multiple tags, wrap the Tag elements in an And tag. And *ReplicationRuleAndOperator // An object key name prefix that identifies the subset of objects to which the @@ -2812,9 +2780,9 @@ type ServerSideEncryptionByDefault struct { // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). // For example: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // // Amazon diff --git a/service/s3control/api_op_CreateAccessPoint.go b/service/s3control/api_op_CreateAccessPoint.go index 6fbd30cb5b5..c221f9cd0d0 100644 --- a/service/s3control/api_op_CreateAccessPoint.go +++ b/service/s3control/api_op_CreateAccessPoint.go @@ -20,38 +20,38 @@ import ( // Amazon Simple Storage Service Developer Guide. Using this action with Amazon S3 // on Outposts This action: // -// * Requires a virtual private cloud (VPC) -// configuration as S3 on Outposts only supports VPC style access points. +// * Requires a virtual private cloud (VPC) configuration +// as S3 on Outposts only supports VPC style access points. // -// * -// Does not support ACL on S3 on Outposts buckets. +// * Does not support ACL +// on S3 on Outposts buckets. // -// * Does not support Public -// Access on S3 on Outposts buckets. +// * Does not support Public Access on S3 on Outposts +// buckets. // -// * Does not support object lock for S3 on -// Outposts buckets. +// * Does not support object lock for S3 on Outposts buckets. // -// For more information, see Using Amazon S3 on Outposts in the -// Amazon Simple Storage Service Developer Guide . All Amazon S3 on Outposts REST -// API requests for this action require an additional parameter of outpost-id to be -// passed with the request and an S3 on Outposts endpoint hostname prefix instead -// of s3-control. For an example of the request syntax for Amazon S3 on Outposts -// that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived -// using the access point ARN, see the Example +// For more +// information, see Using Amazon S3 on Outposts in the Amazon Simple Storage +// Service Developer Guide . All Amazon S3 on Outposts REST API requests for this +// action require an additional parameter of outpost-id to be passed with the +// request and an S3 on Outposts endpoint hostname prefix instead of s3-control. +// For an example of the request syntax for Amazon S3 on Outposts that uses the S3 +// on Outposts endpoint hostname prefix and the outpost-id derived using the access +// point ARN, see the Example // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateAccessPoint.html#API_control_CreateAccessPoint_Examples) // section below. The following actions are related to CreateAccessPoint: // -// * +// * // GetAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) // -// -// * DeleteAccessPoint +// * +// DeleteAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPoint.html) // -// -// * ListAccessPoints +// * +// ListAccessPoints // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_ListAccessPoints.html) func (c *Client) CreateAccessPoint(ctx context.Context, params *CreateAccessPointInput, optFns ...func(*Options)) (*CreateAccessPointOutput, error) { if params == nil { diff --git a/service/s3control/api_op_CreateBucket.go b/service/s3control/api_op_CreateBucket.go index 703aaf12e84..236a3019d2c 100644 --- a/service/s3control/api_op_CreateBucket.go +++ b/service/s3control/api_op_CreateBucket.go @@ -24,40 +24,40 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules). // S3 on Outposts buckets do not support // -// * ACLs. Instead, configure access -// point policies to manage access to buckets. +// * ACLs. Instead, configure access point +// policies to manage access to buckets. // -// * Public access. +// * Public access. // -// * Object -// Lock +// * Object Lock // -// * Bucket Location constraint +// * Bucket +// Location constraint // -// For an example of the request syntax for -// Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and -// outpost-id in your API request, see the Example +// For an example of the request syntax for Amazon S3 on +// Outposts that uses the S3 on Outposts endpoint hostname prefix and outpost-id in +// your API request, see the Example // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateBucket.html#API_control_CreateBucket_Examples) // section below. The following actions are related to CreateBucket for Amazon S3 // on Outposts: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // GetBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucket.html) // -// -// * DeleteBucket +// * +// DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html) // -// -// * CreateAccessPoint +// * +// CreateAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateAccessPoint.html) // -// -// * PutAccessPointPolicy +// * +// PutAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutAccessPointPolicy.html) func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { if params == nil { diff --git a/service/s3control/api_op_CreateJob.go b/service/s3control/api_op_CreateJob.go index 86d5c25cfe2..9bf3b0c256d 100644 --- a/service/s3control/api_op_CreateJob.go +++ b/service/s3control/api_op_CreateJob.go @@ -21,19 +21,19 @@ import ( // Amazon Simple Storage Service Developer Guide. This operation creates a S3 Batch // Operations job. Related actions include: // -// * DescribeJob +// * DescribeJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) // -// -// * ListJobs +// * +// ListJobs // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) // -// -// * UpdateJobPriority +// * +// UpdateJobPriority // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) // -// -// * UpdateJobStatus +// * +// UpdateJobStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) func (c *Client) CreateJob(ctx context.Context, params *CreateJobInput, optFns ...func(*Options)) (*CreateJobOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteAccessPoint.go b/service/s3control/api_op_DeleteAccessPoint.go index d88a452e135..ca848763263 100644 --- a/service/s3control/api_op_DeleteAccessPoint.go +++ b/service/s3control/api_op_DeleteAccessPoint.go @@ -22,16 +22,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPoint.html#API_control_DeleteAccessPoint_Examples) // section below. The following actions are related to DeleteAccessPoint: // -// * +// * // CreateAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) // -// -// * GetAccessPoint +// * +// GetAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) // -// -// * ListAccessPoints +// * +// ListAccessPoints // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html) func (c *Client) DeleteAccessPoint(ctx context.Context, params *DeleteAccessPointInput, optFns ...func(*Options)) (*DeleteAccessPointOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteAccessPointPolicy.go b/service/s3control/api_op_DeleteAccessPointPolicy.go index b43aa9e3175..48ed8005d8f 100644 --- a/service/s3control/api_op_DeleteAccessPointPolicy.go +++ b/service/s3control/api_op_DeleteAccessPointPolicy.go @@ -22,12 +22,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPointPolicy.html#API_control_DeleteAccessPointPolicy_Examples) // section below. The following actions are related to DeleteAccessPointPolicy: // -// -// * PutAccessPointPolicy +// * +// PutAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) // -// -// * GetAccessPointPolicy +// * +// GetAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html) func (c *Client) DeleteAccessPointPolicy(ctx context.Context, params *DeleteAccessPointPolicyInput, optFns ...func(*Options)) (*DeleteAccessPointPolicyOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteBucket.go b/service/s3control/api_op_DeleteBucket.go index 5f92a66ebe9..5acd5a356c2 100644 --- a/service/s3control/api_op_DeleteBucket.go +++ b/service/s3control/api_op_DeleteBucket.go @@ -30,15 +30,15 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html#API_control_DeleteBucket_Examples) // section below. Related Resources // -// * CreateBucket +// * CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) // -// -// * GetBucket +// * +// GetBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucket.html) // -// -// * DeleteObject +// * +// DeleteObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteBucketLifecycleConfiguration.go b/service/s3control/api_op_DeleteBucketLifecycleConfiguration.go index 552fc0fc060..850e2fa1a32 100644 --- a/service/s3control/api_op_DeleteBucketLifecycleConfiguration.go +++ b/service/s3control/api_op_DeleteBucketLifecycleConfiguration.go @@ -39,11 +39,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). // Related actions include: // -// * PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) // -// -// * GetBucketLifecycleConfiguration +// * +// GetBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html) func (c *Client) DeleteBucketLifecycleConfiguration(ctx context.Context, params *DeleteBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*DeleteBucketLifecycleConfigurationOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteBucketPolicy.go b/service/s3control/api_op_DeleteBucketPolicy.go index b5a9b0e8be9..2ce9e81ebfc 100644 --- a/service/s3control/api_op_DeleteBucketPolicy.go +++ b/service/s3control/api_op_DeleteBucketPolicy.go @@ -41,12 +41,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketPolicy.html#API_control_DeleteBucketPolicy_Examples) // section below. The following actions are related to DeleteBucketPolicy: // -// * +// * // GetBucketPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html) // -// -// * PutBucketPolicy +// * +// PutBucketPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketPolicy.html) func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteBucketTagging.go b/service/s3control/api_op_DeleteBucketTagging.go index 2536579f699..3b90015748a 100644 --- a/service/s3control/api_op_DeleteBucketTagging.go +++ b/service/s3control/api_op_DeleteBucketTagging.go @@ -30,12 +30,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketTagging.html#API_control_DeleteBucketTagging_Examples) // section below. The following actions are related to DeleteBucketTagging: // -// * +// * // GetBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html) // -// -// * PutBucketTagging +// * +// PutBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html) func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeleteJobTagging.go b/service/s3control/api_op_DeleteJobTagging.go index 2356c1847f3..0e8628c5fb9 100644 --- a/service/s3control/api_op_DeleteJobTagging.go +++ b/service/s3control/api_op_DeleteJobTagging.go @@ -18,18 +18,18 @@ import ( // action. For more information, see Controlling access and labeling jobs using // tags // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags) -// in the Amazon Simple Storage Service Developer Guide. Related actions include: -// +// in the Amazon Simple Storage Service Developer Guide. Related actions +// include: // // * CreateJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * GetJobTagging +// * +// GetJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html) // -// -// * PutJobTagging +// * +// PutJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html) func (c *Client) DeleteJobTagging(ctx context.Context, params *DeleteJobTaggingInput, optFns ...func(*Options)) (*DeleteJobTaggingOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DeletePublicAccessBlock.go b/service/s3control/api_op_DeletePublicAccessBlock.go index 428d53fbd28..9e03b8731d8 100644 --- a/service/s3control/api_op_DeletePublicAccessBlock.go +++ b/service/s3control/api_op_DeletePublicAccessBlock.go @@ -18,11 +18,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). // Related actions include: // -// * GetPublicAccessBlock +// * GetPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) // -// -// * PutPublicAccessBlock +// * +// PutPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html) func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { diff --git a/service/s3control/api_op_DescribeJob.go b/service/s3control/api_op_DescribeJob.go index 3014c12d579..091adaa6d2f 100644 --- a/service/s3control/api_op_DescribeJob.go +++ b/service/s3control/api_op_DescribeJob.go @@ -19,20 +19,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) in the // Amazon Simple Storage Service Developer Guide. Related actions include: // -// * +// * // CreateJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * ListJobs +// * +// ListJobs // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) // -// -// * UpdateJobPriority +// * +// UpdateJobPriority // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) // -// -// * UpdateJobStatus +// * +// UpdateJobStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) func (c *Client) DescribeJob(ctx context.Context, params *DescribeJobInput, optFns ...func(*Options)) (*DescribeJobOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetAccessPoint.go b/service/s3control/api_op_GetAccessPoint.go index 7c95a78dfb8..57f4e470e42 100644 --- a/service/s3control/api_op_GetAccessPoint.go +++ b/service/s3control/api_op_GetAccessPoint.go @@ -24,16 +24,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetAccessPoint.html#API_control_GetAccessPoint_Examples) // section below. The following actions are related to GetAccessPoint: // -// * +// * // CreateAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) // -// -// * DeleteAccessPoint +// * +// DeleteAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) // -// -// * ListAccessPoints +// * +// ListAccessPoints // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html) func (c *Client) GetAccessPoint(ctx context.Context, params *GetAccessPointInput, optFns ...func(*Options)) (*GetAccessPointOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetAccessPointPolicy.go b/service/s3control/api_op_GetAccessPointPolicy.go index aafd8c72ee4..07893423710 100644 --- a/service/s3control/api_op_GetAccessPointPolicy.go +++ b/service/s3control/api_op_GetAccessPointPolicy.go @@ -16,12 +16,11 @@ import ( // Returns the access point policy associated with the specified access point. The // following actions are related to GetAccessPointPolicy: // -// * -// PutAccessPointPolicy +// * PutAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) // -// -// * DeleteAccessPointPolicy +// * +// DeleteAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html) func (c *Client) GetAccessPointPolicy(ctx context.Context, params *GetAccessPointPolicyInput, optFns ...func(*Options)) (*GetAccessPointPolicyOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetBucket.go b/service/s3control/api_op_GetBucket.go index 11c89dbc89b..c7cd7a9f68e 100644 --- a/service/s3control/api_op_GetBucket.go +++ b/service/s3control/api_op_GetBucket.go @@ -19,15 +19,15 @@ import ( // in the Amazon Simple Storage Service Developer Guide. The following actions are // related to GetBucket for Amazon S3 on Outposts: // -// * PutObject +// * PutObject // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * +// * // CreateBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateBucket.html) // -// -// * DeleteBucket +// * +// DeleteBucket // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html) func (c *Client) GetBucket(ctx context.Context, params *GetBucketInput, optFns ...func(*Options)) (*GetBucketOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetBucketLifecycleConfiguration.go b/service/s3control/api_op_GetBucketLifecycleConfiguration.go index f1d509b1b49..ddfbfd173dc 100644 --- a/service/s3control/api_op_GetBucketLifecycleConfiguration.go +++ b/service/s3control/api_op_GetBucketLifecycleConfiguration.go @@ -41,25 +41,24 @@ import ( // section below. GetBucketLifecycleConfiguration has the following special // error: // -// * Error code: NoSuchLifecycleConfiguration +// * Error code: NoSuchLifecycleConfiguration // -// * Description: -// The lifecycle configuration does not exist. +// * Description: The lifecycle +// configuration does not exist. // -// * HTTP Status Code: 404 Not -// Found +// * HTTP Status Code: 404 Not Found // -// * SOAP Fault Code Prefix: Client +// * SOAP Fault +// Code Prefix: Client // -// The following actions are -// related to GetBucketLifecycleConfiguration: +// The following actions are related to +// GetBucketLifecycleConfiguration: // -// * -// PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) // -// -// * DeleteBucketLifecycleConfiguration +// * +// DeleteBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html) func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetBucketPolicy.go b/service/s3control/api_op_GetBucketPolicy.go index 51940560f5f..9d90e9e57df 100644 --- a/service/s3control/api_op_GetBucketPolicy.go +++ b/service/s3control/api_op_GetBucketPolicy.go @@ -39,15 +39,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketPolicy.html#API_control_GetBucketPolicy_Examples) // section below. The following actions are related to GetBucketPolicy: // -// * -// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// * +// GetObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// -// * PutBucketPolicy +// * +// PutBucketPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html) // -// -// * DeleteBucketPolicy +// * +// DeleteBucketPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html) func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetBucketTagging.go b/service/s3control/api_op_GetBucketTagging.go index ab096d3778b..d7214e158fc 100644 --- a/service/s3control/api_op_GetBucketTagging.go +++ b/service/s3control/api_op_GetBucketTagging.go @@ -25,14 +25,14 @@ import ( // owner has this permission and can grant this permission to others. // GetBucketTagging has the following special error: // -// * Error code: +// * Error code: // NoSuchTagSetError // -// * Description: There is no tag set associated with -// the bucket. +// * Description: There is no tag set associated with the +// bucket. // -// All Amazon S3 on Outposts REST API requests for this action require -// an additional parameter of outpost-id to be passed with the request and an S3 on +// All Amazon S3 on Outposts REST API requests for this action require an +// additional parameter of outpost-id to be passed with the request and an S3 on // Outposts endpoint hostname prefix instead of s3-control. For an example of the // request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint // hostname prefix and the outpost-id derived using the access point ARN, see the @@ -40,12 +40,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketTagging.html#API_control_GetBucketTagging_Examples) // section below. The following actions are related to GetBucketTagging: // -// * +// * // PutBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html) // -// -// * DeleteBucketTagging +// * +// DeleteBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html) func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetJobTagging.go b/service/s3control/api_op_GetJobTagging.go index 572bed6e073..9b19428e91a 100644 --- a/service/s3control/api_op_GetJobTagging.go +++ b/service/s3control/api_op_GetJobTagging.go @@ -18,18 +18,18 @@ import ( // have permission to perform the s3:GetJobTagging action. For more information, // see Controlling access and labeling jobs using tags // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags) -// in the Amazon Simple Storage Service Developer Guide. Related actions include: -// +// in the Amazon Simple Storage Service Developer Guide. Related actions +// include: // // * CreateJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * PutJobTagging +// * +// PutJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html) // -// -// * DeleteJobTagging +// * +// DeleteJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) func (c *Client) GetJobTagging(ctx context.Context, params *GetJobTaggingInput, optFns ...func(*Options)) (*GetJobTaggingOutput, error) { if params == nil { diff --git a/service/s3control/api_op_GetPublicAccessBlock.go b/service/s3control/api_op_GetPublicAccessBlock.go index 9a940257eff..ff03072d2b3 100644 --- a/service/s3control/api_op_GetPublicAccessBlock.go +++ b/service/s3control/api_op_GetPublicAccessBlock.go @@ -19,11 +19,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). // Related actions include: // -// * DeletePublicAccessBlock +// * DeletePublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html) // -// -// * PutPublicAccessBlock +// * +// PutPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html) func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { diff --git a/service/s3control/api_op_ListAccessPoints.go b/service/s3control/api_op_ListAccessPoints.go index 303735cb6fa..93a23c42381 100644 --- a/service/s3control/api_op_ListAccessPoints.go +++ b/service/s3control/api_op_ListAccessPoints.go @@ -27,16 +27,16 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetAccessPoint.html#API_control_GetAccessPoint_Examples) // section below. The following actions are related to ListAccessPoints: // -// * +// * // CreateAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) // -// -// * DeleteAccessPoint +// * +// DeleteAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) // -// -// * GetAccessPoint +// * +// GetAccessPoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) func (c *Client) ListAccessPoints(ctx context.Context, params *ListAccessPointsInput, optFns ...func(*Options)) (*ListAccessPointsOutput, error) { if params == nil { diff --git a/service/s3control/api_op_ListJobs.go b/service/s3control/api_op_ListJobs.go index f98472ff474..26109e64038 100644 --- a/service/s3control/api_op_ListJobs.go +++ b/service/s3control/api_op_ListJobs.go @@ -20,20 +20,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) in the // Amazon Simple Storage Service Developer Guide. Related actions include: // -// * +// * // CreateJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * DescribeJob +// * +// DescribeJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) // -// -// * UpdateJobPriority +// * +// UpdateJobPriority // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) // -// -// * UpdateJobStatus +// * +// UpdateJobStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) func (c *Client) ListJobs(ctx context.Context, params *ListJobsInput, optFns ...func(*Options)) (*ListJobsOutput, error) { if params == nil { diff --git a/service/s3control/api_op_PutAccessPointPolicy.go b/service/s3control/api_op_PutAccessPointPolicy.go index 59ac0dddef9..2272e19c34d 100644 --- a/service/s3control/api_op_PutAccessPointPolicy.go +++ b/service/s3control/api_op_PutAccessPointPolicy.go @@ -24,12 +24,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutAccessPointPolicy.html#API_control_PutAccessPointPolicy_Examples) // section below. The following actions are related to PutAccessPointPolicy: // -// * +// * // GetAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html) // -// -// * DeleteAccessPointPolicy +// * +// DeleteAccessPointPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html) func (c *Client) PutAccessPointPolicy(ctx context.Context, params *PutAccessPointPolicyInput, optFns ...func(*Options)) (*PutAccessPointPolicyOutput, error) { if params == nil { diff --git a/service/s3control/api_op_PutBucketLifecycleConfiguration.go b/service/s3control/api_op_PutBucketLifecycleConfiguration.go index 1d42d7f9a73..b2a1b2856cc 100644 --- a/service/s3control/api_op_PutBucketLifecycleConfiguration.go +++ b/service/s3control/api_op_PutBucketLifecycleConfiguration.go @@ -33,11 +33,11 @@ import ( // section below. The following actions are related to // PutBucketLifecycleConfiguration: // -// * GetBucketLifecycleConfiguration +// * GetBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html) // -// -// * DeleteBucketLifecycleConfiguration +// * +// DeleteBucketLifecycleConfiguration // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html) func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { diff --git a/service/s3control/api_op_PutBucketPolicy.go b/service/s3control/api_op_PutBucketPolicy.go index 9531e2cb5fb..428aebdee86 100644 --- a/service/s3control/api_op_PutBucketPolicy.go +++ b/service/s3control/api_op_PutBucketPolicy.go @@ -39,12 +39,12 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketPolicy.html#API_control_PutBucketPolicy_Examples) // section below. The following actions are related to PutBucketPolicy: // -// * +// * // GetBucketPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html) // -// -// * DeleteBucketPolicy +// * +// DeleteBucketPolicy // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html) func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { if params == nil { diff --git a/service/s3control/api_op_PutBucketTagging.go b/service/s3control/api_op_PutBucketTagging.go index 75971e1a66b..fe1af1a733e 100644 --- a/service/s3control/api_op_PutBucketTagging.go +++ b/service/s3control/api_op_PutBucketTagging.go @@ -43,49 +43,48 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // PutBucketTagging has the following special errors: // -// * Error code: +// * Error code: // InvalidTagError // -// * Description: The tag provided was not a valid tag. -// This error can occur if the tag did not pass input validation. For information -// about tag restrictions, see User-Defined Tag Restrictions +// * Description: The tag provided was not a valid tag. This error +// can occur if the tag did not pass input validation. For information about tag +// restrictions, see User-Defined Tag Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) // and AWS-Generated Cost Allocation Tag Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). // +// * +// Error code: MalformedXMLError // -// * Error code: MalformedXMLError +// * Description: The XML provided does not match +// the schema. // -// * Description: The XML provided does -// not match the schema. +// * Error code: OperationAbortedError // -// * Error code: OperationAbortedError +// * Description: A conflicting +// conditional operation is currently in progress against this resource. Try +// again. // -// * -// Description: A conflicting conditional operation is currently in progress -// against this resource. Try again. +// * Error code: InternalError // -// * Error code: InternalError +// * Description: The service was unable to +// apply the provided tag to the bucket. // -// * -// Description: The service was unable to apply the provided tag to the -// bucket. -// -// All Amazon S3 on Outposts REST API requests for this action require an -// additional parameter of outpost-id to be passed with the request and an S3 on -// Outposts endpoint hostname prefix instead of s3-control. For an example of the -// request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint -// hostname prefix and the outpost-id derived using the access point ARN, see the -// Example +// All Amazon S3 on Outposts REST API +// requests for this action require an additional parameter of outpost-id to be +// passed with the request and an S3 on Outposts endpoint hostname prefix instead +// of s3-control. For an example of the request syntax for Amazon S3 on Outposts +// that uses the S3 on Outposts endpoint hostname prefix and the outpost-id derived +// using the access point ARN, see the Example // (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketTagging.html#API_control_PutBucketTagging_Examples) // section below. The following actions are related to PutBucketTagging: // -// * +// * // GetBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html) // -// -// * DeleteBucketTagging +// * +// DeleteBucketTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html) func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { diff --git a/service/s3control/api_op_PutJobTagging.go b/service/s3control/api_op_PutJobTagging.go index 7af3faf6738..d26d28b0ad3 100644 --- a/service/s3control/api_op_PutJobTagging.go +++ b/service/s3control/api_op_PutJobTagging.go @@ -27,35 +27,35 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags) // in the Amazon Simple Storage Service Developer Guide. // -// * If you send this +// * If you send this // request with an empty tag set, Amazon S3 deletes the existing tag set on the // Batch Operations job. If you use this method, you are charged for a Tier 1 // Request (PUT). For more information, see Amazon S3 pricing // (http://aws.amazon.com/s3/pricing/). // -// * For deleting existing tags for your +// * For deleting existing tags for your // Batch Operations job, a DeleteJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) // request is preferred because it achieves the same result without incurring // charges. // -// * A few things to consider about using tags: +// * A few things to consider about using tags: // -// * Amazon S3 -// limits the maximum number of tags to 50 tags per job. +// * Amazon S3 limits the +// maximum number of tags to 50 tags per job. // -// * You can -// associate up to 50 tags with a job as long as they have unique tag keys. +// * You can associate up to 50 tags +// with a job as long as they have unique tag keys. // +// * A tag key can be up to 128 +// Unicode characters in length, and tag values can be up to 256 Unicode characters +// in length. // -// * A tag key can be up to 128 Unicode characters in length, and tag values can be -// up to 256 Unicode characters in length. +// * The key and values are case sensitive. // -// * The key and values are case -// sensitive. -// -// * For tagging-related restrictions related to characters and -// encodings, see User-Defined Tag Restrictions +// * For tagging-related +// restrictions related to characters and encodings, see User-Defined Tag +// Restrictions // (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) // in the AWS Billing and Cost Management User Guide. // @@ -63,15 +63,15 @@ import ( // must have permission to perform the s3:PutJobTagging action. Related actions // include: // -// * CreatJob +// * CreatJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * GetJobTagging +// * +// GetJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html) // -// -// * DeleteJobTagging +// * +// DeleteJobTagging // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) func (c *Client) PutJobTagging(ctx context.Context, params *PutJobTaggingInput, optFns ...func(*Options)) (*PutJobTaggingOutput, error) { if params == nil { diff --git a/service/s3control/api_op_PutPublicAccessBlock.go b/service/s3control/api_op_PutPublicAccessBlock.go index 9bac04e603a..92e7858ed30 100644 --- a/service/s3control/api_op_PutPublicAccessBlock.go +++ b/service/s3control/api_op_PutPublicAccessBlock.go @@ -19,11 +19,11 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). // Related actions include: // -// * GetPublicAccessBlock +// * GetPublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) // -// -// * DeletePublicAccessBlock +// * +// DeletePublicAccessBlock // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html) func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { diff --git a/service/s3control/api_op_UpdateJobPriority.go b/service/s3control/api_op_UpdateJobPriority.go index ddb2760f671..95247818357 100644 --- a/service/s3control/api_op_UpdateJobPriority.go +++ b/service/s3control/api_op_UpdateJobPriority.go @@ -18,20 +18,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) in the // Amazon Simple Storage Service Developer Guide. Related actions include: // -// * +// * // CreateJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * ListJobs +// * +// ListJobs // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) // -// -// * DescribeJob +// * +// DescribeJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) // -// -// * UpdateJobStatus +// * +// UpdateJobStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) func (c *Client) UpdateJobPriority(ctx context.Context, params *UpdateJobPriorityInput, optFns ...func(*Options)) (*UpdateJobPriorityOutput, error) { if params == nil { diff --git a/service/s3control/api_op_UpdateJobStatus.go b/service/s3control/api_op_UpdateJobStatus.go index 304753faf9c..3991fe6fbec 100644 --- a/service/s3control/api_op_UpdateJobStatus.go +++ b/service/s3control/api_op_UpdateJobStatus.go @@ -20,20 +20,20 @@ import ( // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) in the // Amazon Simple Storage Service Developer Guide. Related actions include: // -// * +// * // CreateJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) // -// -// * ListJobs +// * +// ListJobs // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) // -// -// * DescribeJob +// * +// DescribeJob // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) // -// -// * UpdateJobStatus +// * +// UpdateJobStatus // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) func (c *Client) UpdateJobStatus(ctx context.Context, params *UpdateJobStatusInput, optFns ...func(*Options)) (*UpdateJobStatusOutput, error) { if params == nil { diff --git a/service/s3control/types/enums.go b/service/s3control/types/enums.go index 24c002e8c96..2e895e096e9 100644 --- a/service/s3control/types/enums.go +++ b/service/s3control/types/enums.go @@ -104,8 +104,8 @@ type JobManifestFormat string // Enum values for JobManifestFormat const ( - JobManifestFormatS3batchoperations_csv_20180820 JobManifestFormat = "S3BatchOperations_CSV_20180820" - JobManifestFormatS3inventoryreport_csv_20161130 JobManifestFormat = "S3InventoryReport_CSV_20161130" + JobManifestFormatS3batchoperationsCsv20180820 JobManifestFormat = "S3BatchOperations_CSV_20180820" + JobManifestFormatS3inventoryreportCsv20161130 JobManifestFormat = "S3InventoryReport_CSV_20161130" ) // Values returns all known values for JobManifestFormat. Note that this can be @@ -122,7 +122,7 @@ type JobReportFormat string // Enum values for JobReportFormat const ( - JobReportFormatReport_csv_20180820 JobReportFormat = "Report_CSV_20180820" + JobReportFormatReportCsv20180820 JobReportFormat = "Report_CSV_20180820" ) // Values returns all known values for JobReportFormat. Note that this can be @@ -260,13 +260,13 @@ type S3CannedAccessControlList string // Enum values for S3CannedAccessControlList const ( - S3CannedAccessControlListPrivate S3CannedAccessControlList = "private" - S3CannedAccessControlListPublic_read S3CannedAccessControlList = "public-read" - S3CannedAccessControlListPublic_read_write S3CannedAccessControlList = "public-read-write" - S3CannedAccessControlListAws_exec_read S3CannedAccessControlList = "aws-exec-read" - S3CannedAccessControlListAuthenticated_read S3CannedAccessControlList = "authenticated-read" - S3CannedAccessControlListBucket_owner_read S3CannedAccessControlList = "bucket-owner-read" - S3CannedAccessControlListBucket_owner_full_control S3CannedAccessControlList = "bucket-owner-full-control" + S3CannedAccessControlListPrivate S3CannedAccessControlList = "private" + S3CannedAccessControlListPublicRead S3CannedAccessControlList = "public-read" + S3CannedAccessControlListPublicReadWrite S3CannedAccessControlList = "public-read-write" + S3CannedAccessControlListAwsExecRead S3CannedAccessControlList = "aws-exec-read" + S3CannedAccessControlListAuthenticatedRead S3CannedAccessControlList = "authenticated-read" + S3CannedAccessControlListBucketOwnerRead S3CannedAccessControlList = "bucket-owner-read" + S3CannedAccessControlListBucketOwnerFullControl S3CannedAccessControlList = "bucket-owner-full-control" ) // Values returns all known values for S3CannedAccessControlList. Note that this @@ -306,9 +306,9 @@ type S3GranteeTypeIdentifier string // Enum values for S3GranteeTypeIdentifier const ( - S3GranteeTypeIdentifierCanonical S3GranteeTypeIdentifier = "id" - S3GranteeTypeIdentifierEmail_address S3GranteeTypeIdentifier = "emailAddress" - S3GranteeTypeIdentifierGroup S3GranteeTypeIdentifier = "uri" + S3GranteeTypeIdentifierCanonical S3GranteeTypeIdentifier = "id" + S3GranteeTypeIdentifierEmailAddress S3GranteeTypeIdentifier = "emailAddress" + S3GranteeTypeIdentifierGroup S3GranteeTypeIdentifier = "uri" ) // Values returns all known values for S3GranteeTypeIdentifier. Note that this can @@ -398,11 +398,11 @@ type S3Permission string // Enum values for S3Permission const ( - S3PermissionFull_control S3Permission = "FULL_CONTROL" - S3PermissionRead S3Permission = "READ" - S3PermissionWrite S3Permission = "WRITE" - S3PermissionRead_acp S3Permission = "READ_ACP" - S3PermissionWrite_acp S3Permission = "WRITE_ACP" + S3PermissionFullControl S3Permission = "FULL_CONTROL" + S3PermissionRead S3Permission = "READ" + S3PermissionWrite S3Permission = "WRITE" + S3PermissionReadAcp S3Permission = "READ_ACP" + S3PermissionWriteAcp S3Permission = "WRITE_ACP" ) // Values returns all known values for S3Permission. Note that this can be expanded @@ -440,12 +440,12 @@ type S3StorageClass string // Enum values for S3StorageClass const ( - S3StorageClassStandard S3StorageClass = "STANDARD" - S3StorageClassStandard_ia S3StorageClass = "STANDARD_IA" - S3StorageClassOnezone_ia S3StorageClass = "ONEZONE_IA" - S3StorageClassGlacier S3StorageClass = "GLACIER" - S3StorageClassIntelligent_tiering S3StorageClass = "INTELLIGENT_TIERING" - S3StorageClassDeep_archive S3StorageClass = "DEEP_ARCHIVE" + S3StorageClassStandard S3StorageClass = "STANDARD" + S3StorageClassStandardIa S3StorageClass = "STANDARD_IA" + S3StorageClassOnezoneIa S3StorageClass = "ONEZONE_IA" + S3StorageClassGlacier S3StorageClass = "GLACIER" + S3StorageClassIntelligentTiering S3StorageClass = "INTELLIGENT_TIERING" + S3StorageClassDeepArchive S3StorageClass = "DEEP_ARCHIVE" ) // Values returns all known values for S3StorageClass. Note that this can be @@ -466,11 +466,11 @@ type TransitionStorageClass string // Enum values for TransitionStorageClass const ( - TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" - TransitionStorageClassStandard_ia TransitionStorageClass = "STANDARD_IA" - TransitionStorageClassOnezone_ia TransitionStorageClass = "ONEZONE_IA" - TransitionStorageClassIntelligent_tiering TransitionStorageClass = "INTELLIGENT_TIERING" - TransitionStorageClassDeep_archive TransitionStorageClass = "DEEP_ARCHIVE" + TransitionStorageClassGlacier TransitionStorageClass = "GLACIER" + TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA" + TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA" + TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING" + TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE" ) // Values returns all known values for TransitionStorageClass. Note that this can diff --git a/service/s3control/types/types.go b/service/s3control/types/types.go index ebff9afb859..d5c59a6d8d8 100644 --- a/service/s3control/types/types.go +++ b/service/s3control/types/types.go @@ -444,17 +444,17 @@ type PublicAccessBlockConfiguration struct { // buckets in this account. Setting this element to TRUE causes the following // behavior: // - // * PUT Bucket acl and PUT Object acl calls fail if the specified - // ACL is public. + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL + // is public. // - // * PUT Object calls fail if the request includes a public - // ACL. + // * PUT Object calls fail if the request includes a public ACL. // - // * PUT Bucket calls fail if the request includes a public - // ACL. + // * PUT + // Bucket calls fail if the request includes a public ACL. // - // Enabling this setting doesn't affect existing policies or ACLs. This is - // not supported for Amazon S3 on Outposts. + // Enabling this setting + // doesn't affect existing policies or ACLs. This is not supported for Amazon S3 on + // Outposts. BlockPublicAcls *bool // Specifies whether Amazon S3 should block public bucket policies for buckets in diff --git a/service/s3outposts/api_op_CreateEndpoint.go b/service/s3outposts/api_op_CreateEndpoint.go index c2db857833e..c476b6f1b32 100644 --- a/service/s3outposts/api_op_CreateEndpoint.go +++ b/service/s3outposts/api_op_CreateEndpoint.go @@ -16,11 +16,11 @@ import ( // cloud (VPC). This action creates an endpoint and associates it with the // specified Outpost. Related actions include: // -// * DeleteEndpoint +// * DeleteEndpoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_s3outposts_DeleteEndpoint.html) // -// -// * ListEndpoints +// * +// ListEndpoints // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_s3outposts_ListEndpoints.html) func (c *Client) CreateEndpoint(ctx context.Context, params *CreateEndpointInput, optFns ...func(*Options)) (*CreateEndpointOutput, error) { if params == nil { diff --git a/service/s3outposts/api_op_DeleteEndpoint.go b/service/s3outposts/api_op_DeleteEndpoint.go index 3d3ed62993b..cd7027a170a 100644 --- a/service/s3outposts/api_op_DeleteEndpoint.go +++ b/service/s3outposts/api_op_DeleteEndpoint.go @@ -15,12 +15,12 @@ import ( // Outposts buckets so that you can perform actions within your virtual private // cloud (VPC). This action deletes an endpoint. Related actions include: // -// * +// * // CreateEndpoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_s3outposts_CreateEndpoint.html) // -// -// * ListEndpoints +// * +// ListEndpoints // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_s3outposts_ListEndpoints.html) func (c *Client) DeleteEndpoint(ctx context.Context, params *DeleteEndpointInput, optFns ...func(*Options)) (*DeleteEndpointOutput, error) { if params == nil { diff --git a/service/s3outposts/api_op_ListEndpoints.go b/service/s3outposts/api_op_ListEndpoints.go index 85b95fb8bac..68d9f12fe8b 100644 --- a/service/s3outposts/api_op_ListEndpoints.go +++ b/service/s3outposts/api_op_ListEndpoints.go @@ -17,11 +17,11 @@ import ( // cloud (VPC). This action lists endpoints associated with the Outpost. Related // actions include: // -// * CreateEndpoint +// * CreateEndpoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_s3outposts_CreateEndpoint.html) // -// -// * DeleteEndpoint +// * +// DeleteEndpoint // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_s3outposts_DeleteEndpoint.html) func (c *Client) ListEndpoints(ctx context.Context, params *ListEndpointsInput, optFns ...func(*Options)) (*ListEndpointsOutput, error) { if params == nil { diff --git a/service/sagemaker/api_op_CreateAlgorithm.go b/service/sagemaker/api_op_CreateAlgorithm.go index dcbdd00a340..1c1d15e9ded 100644 --- a/service/sagemaker/api_op_CreateAlgorithm.go +++ b/service/sagemaker/api_op_CreateAlgorithm.go @@ -38,26 +38,26 @@ type CreateAlgorithmInput struct { // Specifies details about training jobs run by this algorithm, including the // following: // - // * The Amazon ECR path of the container and the version digest of - // the algorithm. + // * The Amazon ECR path of the container and the version digest of the + // algorithm. // - // * The hyperparameters that the algorithm supports. + // * The hyperparameters that the algorithm supports. // - // * - // The instance types that the algorithm supports for training. + // * The instance + // types that the algorithm supports for training. // - // * Whether the - // algorithm supports distributed training. + // * Whether the algorithm + // supports distributed training. // - // * The metrics that the algorithm - // emits to Amazon CloudWatch. + // * The metrics that the algorithm emits to Amazon + // CloudWatch. // - // * Which metrics that the algorithm emits can be - // used as the objective metric for hyperparameter tuning jobs. + // * Which metrics that the algorithm emits can be used as the + // objective metric for hyperparameter tuning jobs. // - // * The input - // channels that the algorithm supports for training data. For example, an - // algorithm might support train, validation, and test channels. + // * The input channels that the + // algorithm supports for training data. For example, an algorithm might support + // train, validation, and test channels. // // This member is required. TrainingSpecification *types.TrainingSpecification @@ -71,14 +71,14 @@ type CreateAlgorithmInput struct { // Specifies details about inference jobs that the algorithm runs, including the // following: // - // * The Amazon ECR paths of containers that contain the inference - // code and model artifacts. + // * The Amazon ECR paths of containers that contain the inference code + // and model artifacts. // - // * The instance types that the algorithm supports - // for transform jobs and real-time endpoints used for inference. + // * The instance types that the algorithm supports for + // transform jobs and real-time endpoints used for inference. // - // * The input - // and output content formats that the algorithm supports for inference. + // * The input and + // output content formats that the algorithm supports for inference. InferenceSpecification *types.InferenceSpecification // Specifies configurations for one or more training jobs and that Amazon SageMaker diff --git a/service/sagemaker/api_op_CreateCompilationJob.go b/service/sagemaker/api_op_CreateCompilationJob.go index 0084753f12c..10a3c57afa1 100644 --- a/service/sagemaker/api_op_CreateCompilationJob.go +++ b/service/sagemaker/api_op_CreateCompilationJob.go @@ -19,24 +19,23 @@ import ( // Greengrass. In that case, deploy them as an ML resource. In the request body, // you provide the following: // -// * A name for the compilation job +// * A name for the compilation job // -// * -// Information about the input model artifacts +// * Information +// about the input model artifacts // -// * The output location for the -// compiled model and the device (target) that the model runs on +// * The output location for the compiled model +// and the device (target) that the model runs on // -// * The Amazon -// Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform the -// model compilation job. +// * The Amazon Resource Name (ARN) +// of the IAM role that Amazon SageMaker assumes to perform the model compilation +// job. // -// You can also provide a Tag to track the model -// compilation job's resource use and costs. The response body contains the -// CompilationJobArn for the compiled job. To stop a model compilation job, use -// StopCompilationJob. To get information about a particular model compilation job, -// use DescribeCompilationJob. To get information about multiple model compilation -// jobs, use ListCompilationJobs. +// You can also provide a Tag to track the model compilation job's resource +// use and costs. The response body contains the CompilationJobArn for the compiled +// job. To stop a model compilation job, use StopCompilationJob. To get information +// about a particular model compilation job, use DescribeCompilationJob. To get +// information about multiple model compilation jobs, use ListCompilationJobs. func (c *Client) CreateCompilationJob(ctx context.Context, params *CreateCompilationJobInput, optFns ...func(*Options)) (*CreateCompilationJobOutput, error) { if params == nil { params = &CreateCompilationJobInput{} @@ -77,21 +76,20 @@ type CreateCompilationJobInput struct { // perform tasks on your behalf. During model compilation, Amazon SageMaker needs // your permission to: // - // * Read input data from an S3 bucket + // * Read input data from an S3 bucket // - // * Write model + // * Write model // artifacts to an S3 bucket // - // * Write logs to Amazon CloudWatch Logs + // * Write logs to Amazon CloudWatch Logs // - // * - // Publish metrics to Amazon CloudWatch + // * Publish + // metrics to Amazon CloudWatch // - // You grant permissions for all of these - // tasks to an IAM role. To pass this role to Amazon SageMaker, the caller of this - // API must have the iam:PassRole permission. For more information, see Amazon - // SageMaker Roles. - // (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) + // You grant permissions for all of these tasks to an + // IAM role. To pass this role to Amazon SageMaker, the caller of this API must + // have the iam:PassRole permission. For more information, see Amazon SageMaker + // Roles. (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) // // This member is required. RoleArn *string @@ -109,8 +107,8 @@ type CreateCompilationJobOutput struct { // If the action is successful, the service sends back an HTTP 200 response. Amazon // SageMaker returns the following data in JSON format: // - // * CompilationJobArn: - // The Amazon Resource Name (ARN) of the compiled job. + // * CompilationJobArn: The + // Amazon Resource Name (ARN) of the compiled job. // // This member is required. CompilationJobArn *string diff --git a/service/sagemaker/api_op_CreateDomain.go b/service/sagemaker/api_op_CreateDomain.go index 4503c0976aa..96c2fe85752 100644 --- a/service/sagemaker/api_op_CreateDomain.go +++ b/service/sagemaker/api_op_CreateDomain.go @@ -25,41 +25,41 @@ import ( // access type that you choose when you onboard to Studio. The following options // are available: // -// * PublicInternetOnly - Non-EFS traffic goes through a VPC +// * PublicInternetOnly - Non-EFS traffic goes through a VPC // managed by Amazon SageMaker, which allows internet access. This is the default // value. // -// * VpcOnly - All Studio traffic is through the specified VPC and -// subnets. Internet access is disabled by default. To allow internet access, you -// must specify a NAT gateway. When internet access is disabled, you won't be able -// to train or host models unless your VPC has an interface endpoint (PrivateLink) -// or a NAT gateway and your security groups allow outbound connections. +// * VpcOnly - All Studio traffic is through the specified VPC and subnets. +// Internet access is disabled by default. To allow internet access, you must +// specify a NAT gateway. When internet access is disabled, you won't be able to +// train or host models unless your VPC has an interface endpoint (PrivateLink) or +// a NAT gateway and your security groups allow outbound connections. // // VpcOnly // network access type When you choose VpcOnly, you must specify the following: // +// * +// Security group inbound and outbound rules to allow NFS traffic over TCP on port +// 2049 between the domain and the EFS volume // -// * Security group inbound and outbound rules to allow NFS traffic over TCP on -// port 2049 between the domain and the EFS volume -// -// * Security group inbound -// and outbound rules to allow traffic between the JupyterServer app and the +// * Security group inbound and +// outbound rules to allow traffic between the JupyterServer app and the // KernelGateway apps // -// * Interface endpoints to access the SageMaker API and +// * Interface endpoints to access the SageMaker API and // SageMaker runtime // // For more information, see: // -// * Security groups for your -// VPC (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) -// +// * Security groups for your VPC +// (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) // -// * VPC with public and private subnets (NAT) +// * +// VPC with public and private subnets (NAT) // (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html) // -// * -// Connect to SageMaker through a VPC interface endpoint +// * Connect +// to SageMaker through a VPC interface endpoint // (https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html) func (c *Client) CreateDomain(ctx context.Context, params *CreateDomainInput, optFns ...func(*Options)) (*CreateDomainOutput, error) { if params == nil { @@ -107,11 +107,11 @@ type CreateDomainInput struct { // Specifies the VPC used for non-EFS traffic. The default value is // PublicInternetOnly. // - // * PublicInternetOnly - Non-EFS traffic is through a VPC + // * PublicInternetOnly - Non-EFS traffic is through a VPC // managed by Amazon SageMaker, which allows direct internet access // - // * VpcOnly - // - All Studio traffic is through the specified VPC and subnets + // * VpcOnly - + // All Studio traffic is through the specified VPC and subnets AppNetworkAccessType types.AppNetworkAccessType // The AWS Key Management Service (KMS) encryption key ID. Encryption with a diff --git a/service/sagemaker/api_op_CreateEndpointConfig.go b/service/sagemaker/api_op_CreateEndpointConfig.go index 287c222e3b1..a9cb13d7d8f 100644 --- a/service/sagemaker/api_op_CreateEndpointConfig.go +++ b/service/sagemaker/api_op_CreateEndpointConfig.go @@ -76,15 +76,15 @@ type CreateEndpointConfigInput struct { // instance that hosts the endpoint. The KmsKeyId can be any of the following // formats: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias name ARN: + // * Alias name ARN: // arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias // // The KMS key policy must diff --git a/service/sagemaker/api_op_CreateLabelingJob.go b/service/sagemaker/api_op_CreateLabelingJob.go index 05e8b3a5f36..432493db817 100644 --- a/service/sagemaker/api_op_CreateLabelingJob.go +++ b/service/sagemaker/api_op_CreateLabelingJob.go @@ -15,24 +15,24 @@ import ( // You can use the labeled data to train machine learning models. You can select // your workforce from one of three providers: // -// * A private workforce that you +// * A private workforce that you // create. It can include employees, contractors, and outside experts. Use a // private workforce when want the data to stay within your organization or when a // specific set of skills is required. // -// * One or more vendors that you select -// from the AWS Marketplace. Vendors provide expertise in specific areas. +// * One or more vendors that you select from +// the AWS Marketplace. Vendors provide expertise in specific areas. // -// * -// The Amazon Mechanical Turk workforce. This is the largest workforce, but it -// should only be used for public data or data that has been stripped of any -// personally identifiable information. +// * The Amazon +// Mechanical Turk workforce. This is the largest workforce, but it should only be +// used for public data or data that has been stripped of any personally +// identifiable information. // -// You can also use automated data labeling -// to reduce the number of data objects that need to be labeled by a human. -// Automated data labeling uses active learning to determine if a data object can -// be labeled by machine or if it needs to be sent to a human worker. For more -// information, see Using Automated Data Labeling +// You can also use automated data labeling to reduce +// the number of data objects that need to be labeled by a human. Automated data +// labeling uses active learning to determine if a data object can be labeled by +// machine or if it needs to be sent to a human worker. For more information, see +// Using Automated Data Labeling // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-automated-labeling.html). // The data objects to be labeled are contained in an Amazon S3 bucket. You create // a manifest file that describes the location of each object. For more diff --git a/service/sagemaker/api_op_CreateModelPackage.go b/service/sagemaker/api_op_CreateModelPackage.go index 53762661a80..e0d7ab293eb 100644 --- a/service/sagemaker/api_op_CreateModelPackage.go +++ b/service/sagemaker/api_op_CreateModelPackage.go @@ -47,15 +47,15 @@ type CreateModelPackageInput struct { // Specifies details about inference jobs that can be run with models based on this // model package, including the following: // - // * The Amazon ECR paths of - // containers that contain the inference code and model artifacts. + // * The Amazon ECR paths of containers + // that contain the inference code and model artifacts. // - // * The - // instance types that the model package supports for transform jobs and real-time - // endpoints used for inference. + // * The instance types that + // the model package supports for transform jobs and real-time endpoints used for + // inference. // - // * The input and output content formats that - // the model package supports for inference. + // * The input and output content formats that the model package + // supports for inference. InferenceSpecification *types.InferenceSpecification // A description of the model package. diff --git a/service/sagemaker/api_op_CreateNotebookInstance.go b/service/sagemaker/api_op_CreateNotebookInstance.go index f518e195732..7e9db87eec4 100644 --- a/service/sagemaker/api_op_CreateNotebookInstance.go +++ b/service/sagemaker/api_op_CreateNotebookInstance.go @@ -21,29 +21,29 @@ import ( // specific algorithm or with a machine learning framework. After receiving the // request, Amazon SageMaker does the following: // -// * Creates a network interface -// in the Amazon SageMaker VPC. +// * Creates a network interface in +// the Amazon SageMaker VPC. // -// * (Option) If you specified SubnetId, Amazon +// * (Option) If you specified SubnetId, Amazon // SageMaker creates a network interface in your own VPC, which is inferred from // the subnet ID that you provide in the input. When creating this network // interface, Amazon SageMaker attaches the security group that you specified in // the request to the network interface that it creates in your VPC. // -// * -// Launches an EC2 instance of the type specified in the request in the Amazon -// SageMaker VPC. If you specified SubnetId of your VPC, Amazon SageMaker specifies -// both network interfaces when launching this instance. This enables inbound -// traffic from your own VPC to the notebook instance, assuming that the security -// groups allow it. +// * Launches an +// EC2 instance of the type specified in the request in the Amazon SageMaker VPC. +// If you specified SubnetId of your VPC, Amazon SageMaker specifies both network +// interfaces when launching this instance. This enables inbound traffic from your +// own VPC to the notebook instance, assuming that the security groups allow +// it. // -// After creating the notebook instance, Amazon SageMaker returns -// its Amazon Resource Name (ARN). You can't change the name of a notebook instance -// after you create it. After Amazon SageMaker creates the notebook instance, you -// can connect to the Jupyter server and work in Jupyter notebooks. For example, -// you can write code to explore a dataset that you can use for model training, -// train a model, host models by creating Amazon SageMaker endpoints, and validate -// hosted models. For more information, see How It Works +// After creating the notebook instance, Amazon SageMaker returns its Amazon +// Resource Name (ARN). You can't change the name of a notebook instance after you +// create it. After Amazon SageMaker creates the notebook instance, you can connect +// to the Jupyter server and work in Jupyter notebooks. For example, you can write +// code to explore a dataset that you can use for model training, train a model, +// host models by creating Amazon SageMaker endpoints, and validate hosted models. +// For more information, see How It Works // (https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works.html). func (c *Client) CreateNotebookInstance(ctx context.Context, params *CreateNotebookInstanceInput, optFns ...func(*Options)) (*CreateNotebookInstanceOutput, error) { if params == nil { diff --git a/service/sagemaker/api_op_CreateProcessingJob.go b/service/sagemaker/api_op_CreateProcessingJob.go index f58520db91c..8e73032ad0c 100644 --- a/service/sagemaker/api_op_CreateProcessingJob.go +++ b/service/sagemaker/api_op_CreateProcessingJob.go @@ -59,12 +59,12 @@ type CreateProcessingJobInput struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *types.ExperimentConfig // Networking options for a processing job. diff --git a/service/sagemaker/api_op_CreateTrainingJob.go b/service/sagemaker/api_op_CreateTrainingJob.go index 9c0a21317f7..d3611780f17 100644 --- a/service/sagemaker/api_op_CreateTrainingJob.go +++ b/service/sagemaker/api_op_CreateTrainingJob.go @@ -19,39 +19,39 @@ import ( // that you know how to use them for inferences. In the request body, you provide // the following: // -// * AlgorithmSpecification - Identifies the training algorithm -// to use. +// * AlgorithmSpecification - Identifies the training algorithm to +// use. // -// * HyperParameters - Specify these algorithm-specific parameters to -// enable the estimation of model parameters during training. Hyperparameters can -// be tuned to optimize this learning process. For a list of hyperparameters for -// each training algorithm provided by Amazon SageMaker, see Algorithms +// * HyperParameters - Specify these algorithm-specific parameters to enable +// the estimation of model parameters during training. Hyperparameters can be tuned +// to optimize this learning process. For a list of hyperparameters for each +// training algorithm provided by Amazon SageMaker, see Algorithms // (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). // -// * -// InputDataConfig - Describes the training dataset and the Amazon S3, EFS, or FSx -// location where it is stored. +// * InputDataConfig +// - Describes the training dataset and the Amazon S3, EFS, or FSx location where +// it is stored. // -// * OutputDataConfig - Identifies the Amazon S3 -// bucket where you want Amazon SageMaker to save the results of model training. +// * OutputDataConfig - Identifies the Amazon S3 bucket where you +// want Amazon SageMaker to save the results of model training. // +// * ResourceConfig - +// Identifies the resources, ML compute instances, and ML storage volumes to deploy +// for model training. In distributed training, you specify more than one +// instance. // -// * ResourceConfig - Identifies the resources, ML compute instances, and ML -// storage volumes to deploy for model training. In distributed training, you -// specify more than one instance. -// -// * EnableManagedSpotTraining - Optimize the -// cost of training machine learning models by up to 80% by using Amazon EC2 Spot -// instances. For more information, see Managed Spot Training +// * EnableManagedSpotTraining - Optimize the cost of training machine +// learning models by up to 80% by using Amazon EC2 Spot instances. For more +// information, see Managed Spot Training // (https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). // -// -// * RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to +// * +// RoleARN - The Amazon Resource Number (ARN) that Amazon SageMaker assumes to // perform tasks on your behalf during model training. You must grant this role the // necessary permissions so that Amazon SageMaker can successfully complete model // training. // -// * StoppingCondition - To help cap training costs, use +// * StoppingCondition - To help cap training costs, use // MaxRuntimeInSeconds to set a time limit for training. Use MaxWaitTimeInSeconds // to specify how long you are willing to wait for a managed spot training job to // complete. @@ -173,12 +173,12 @@ type CreateTrainingJobInput struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *types.ExperimentConfig // Algorithm-specific parameters that influence the quality of the model. You set diff --git a/service/sagemaker/api_op_CreateTransformJob.go b/service/sagemaker/api_op_CreateTransformJob.go index 7766123f668..9610d146d87 100644 --- a/service/sagemaker/api_op_CreateTransformJob.go +++ b/service/sagemaker/api_op_CreateTransformJob.go @@ -17,26 +17,26 @@ import ( // that you have readily available. In the request body, you provide the // following: // -// * TransformJobName - Identifies the transform job. The name must -// be unique within an AWS Region in an AWS account. +// * TransformJobName - Identifies the transform job. The name must be +// unique within an AWS Region in an AWS account. // -// * ModelName - Identifies -// the model to use. ModelName must be the name of an existing Amazon SageMaker -// model in the same AWS Region and AWS account. For information on creating a -// model, see CreateModel. +// * ModelName - Identifies the +// model to use. ModelName must be the name of an existing Amazon SageMaker model +// in the same AWS Region and AWS account. For information on creating a model, see +// CreateModel. // -// * TransformInput - Describes the dataset to be -// transformed and the Amazon S3 location where it is stored. +// * TransformInput - Describes the dataset to be transformed and the +// Amazon S3 location where it is stored. // -// * -// TransformOutput - Identifies the Amazon S3 location where you want Amazon -// SageMaker to save the results from the transform job. +// * TransformOutput - Identifies the +// Amazon S3 location where you want Amazon SageMaker to save the results from the +// transform job. // -// * TransformResources -// - Identifies the ML compute instances for the transform job. +// * TransformResources - Identifies the ML compute instances for +// the transform job. // -// For more -// information about how batch transformation works, see Batch Transform +// For more information about how batch transformation works, +// see Batch Transform // (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). func (c *Client) CreateTransformJob(ctx context.Context, params *CreateTransformJobInput, optFns ...func(*Options)) (*CreateTransformJobOutput, error) { if params == nil { @@ -111,12 +111,12 @@ type CreateTransformJobInput struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *types.ExperimentConfig // The maximum number of parallel requests that can be sent to each instance in a diff --git a/service/sagemaker/api_op_CreateTrialComponent.go b/service/sagemaker/api_op_CreateTrialComponent.go index ecf0ac2ee63..dcb0f4f3585 100644 --- a/service/sagemaker/api_op_CreateTrialComponent.go +++ b/service/sagemaker/api_op_CreateTrialComponent.go @@ -70,12 +70,12 @@ type CreateTrialComponentInput struct { // The status of the component. States include: // - // * InProgress + // * InProgress // - // * - // Completed + // * Completed // - // * Failed + // * + // Failed Status *types.TrialComponentStatus // A list of tags to associate with the component. You can use Search API to search diff --git a/service/sagemaker/api_op_DescribeDomain.go b/service/sagemaker/api_op_DescribeDomain.go index c32b3c213d5..40bc5f971e7 100644 --- a/service/sagemaker/api_op_DescribeDomain.go +++ b/service/sagemaker/api_op_DescribeDomain.go @@ -41,11 +41,11 @@ type DescribeDomainOutput struct { // Specifies the VPC used for non-EFS traffic. The default value is // PublicInternetOnly. // - // * PublicInternetOnly - Non-EFS traffic is through a VPC + // * PublicInternetOnly - Non-EFS traffic is through a VPC // managed by Amazon SageMaker, which allows direct internet access // - // * VpcOnly - // - All Studio traffic is through the specified VPC and subnets + // * VpcOnly - + // All Studio traffic is through the specified VPC and subnets AppNetworkAccessType types.AppNetworkAccessType // The domain's authentication mode. diff --git a/service/sagemaker/api_op_DescribeEndpoint.go b/service/sagemaker/api_op_DescribeEndpoint.go index 9882c9b6022..70237ff61f8 100644 --- a/service/sagemaker/api_op_DescribeEndpoint.go +++ b/service/sagemaker/api_op_DescribeEndpoint.go @@ -60,38 +60,37 @@ type DescribeEndpointOutput struct { // The status of the endpoint. // - // * OutOfService: Endpoint is not available to - // take incoming requests. + // * OutOfService: Endpoint is not available to take + // incoming requests. // - // * Creating: CreateEndpoint is executing. + // * Creating: CreateEndpoint is executing. // - // * - // Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. + // * Updating: + // UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. // - // - // * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or + // * + // SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or // deleted or re-scaled until it has completed. This maintenance operation does not // change any customer-specified values such as VPC config, KMS encryption, model, // instance type, or instance count. // - // * RollingBack: Endpoint fails to scale up - // or down or change its variant weight and is in the process of rolling back to - // its previous configuration. Once the rollback completes, endpoint returns to an + // * RollingBack: Endpoint fails to scale up or + // down or change its variant weight and is in the process of rolling back to its + // previous configuration. Once the rollback completes, endpoint returns to an // InService status. This transitional status only applies to an endpoint that has // autoscaling enabled and is undergoing variant weight or capacity changes as part // of an UpdateEndpointWeightsAndCapacities call or when the // UpdateEndpointWeightsAndCapacities operation is called explicitly. // - // * - // InService: Endpoint is available to process incoming requests. + // * InService: + // Endpoint is available to process incoming requests. // - // * Deleting: - // DeleteEndpoint is executing. + // * Deleting: DeleteEndpoint + // is executing. // - // * Failed: Endpoint could not be created, - // updated, or re-scaled. Use DescribeEndpointOutput$FailureReason for information - // about the failure. DeleteEndpoint is the only operation that can be performed on - // a failed endpoint. + // * Failed: Endpoint could not be created, updated, or re-scaled. + // Use DescribeEndpointOutput$FailureReason for information about the failure. + // DeleteEndpoint is the only operation that can be performed on a failed endpoint. // // This member is required. EndpointStatus types.EndpointStatus diff --git a/service/sagemaker/api_op_DescribeLabelingJob.go b/service/sagemaker/api_op_DescribeLabelingJob.go index 970a4398eb4..b08ec0b4b61 100644 --- a/service/sagemaker/api_op_DescribeLabelingJob.go +++ b/service/sagemaker/api_op_DescribeLabelingJob.go @@ -109,22 +109,22 @@ type DescribeLabelingJobOutput struct { // The S3 location of the JSON file that defines the categories used to label data // objects. Please note the following label-category limits: // - // * Semantic + // * Semantic // segmentation labeling jobs using automated labeling: 20 labels // - // * Box - // bounding labeling jobs (all): 10 labels + // * Box bounding + // labeling jobs (all): 10 labels // - // The file is a JSON structure in the - // following format: { + // The file is a JSON structure in the following + // format: { // "document-version": "2018-11-28" // // "labels": [ // + // { // - // { // - // "label": "label 1" + // "label": "label 1" // // }, // @@ -134,10 +134,10 @@ type DescribeLabelingJobOutput struct { // // }, // + // ... // - // ... // - // { + // { // // "label": "label n" // diff --git a/service/sagemaker/api_op_DescribeTrainingJob.go b/service/sagemaker/api_op_DescribeTrainingJob.go index 4ff95c09d56..a90c67a3e44 100644 --- a/service/sagemaker/api_op_DescribeTrainingJob.go +++ b/service/sagemaker/api_op_DescribeTrainingJob.go @@ -65,59 +65,58 @@ type DescribeTrainingJobOutput struct { // SecondaryStatusTransition. Amazon SageMaker provides primary statuses and // secondary statuses that apply to each of them: InProgress // - // * Starting - - // Starting the training job. + // * Starting - Starting + // the training job. // - // * Downloading - An optional stage for algorithms - // that support File training input mode. It indicates that data is being - // downloaded to the ML storage volumes. + // * Downloading - An optional stage for algorithms that support + // File training input mode. It indicates that data is being downloaded to the ML + // storage volumes. // - // * Training - Training is in - // progress. + // * Training - Training is in progress. // - // * Interrupted - The job stopped because the managed spot training - // instances were interrupted. + // * Interrupted - The job + // stopped because the managed spot training instances were interrupted. // - // * Uploading - Training is complete and the - // model artifacts are being uploaded to the S3 location. + // * + // Uploading - Training is complete and the model artifacts are being uploaded to + // the S3 location. // // Completed // - // * - // Completed - The training job has completed. + // * Completed - The training job has + // completed. // // Failed // - // * Failed - The training - // job has failed. The reason for the failure is returned in the FailureReason - // field of DescribeTrainingJobResponse. + // * Failed - The training job has failed. The reason for the + // failure is returned in the FailureReason field of + // DescribeTrainingJobResponse. // // Stopped // - // * MaxRuntimeExceeded - The - // job stopped because it exceeded the maximum allowed runtime. + // * MaxRuntimeExceeded - The job stopped + // because it exceeded the maximum allowed runtime. // - // * - // MaxWaitTimeExceeded - The job stopped because it exceeded the maximum allowed - // wait time. + // * MaxWaitTimeExceeded - The + // job stopped because it exceeded the maximum allowed wait time. // - // * Stopped - The training job has stopped. + // * Stopped - The + // training job has stopped. // // Stopping // - // * - // Stopping - Stopping the training job. + // * Stopping - Stopping the training + // job. // - // Valid values for SecondaryStatus are - // subject to change. We no longer support the following secondary statuses: + // Valid values for SecondaryStatus are subject to change. We no longer + // support the following secondary statuses: // - // * - // LaunchingMLInstances + // * LaunchingMLInstances // - // * PreparingTrainingStack + // * + // PreparingTrainingStack // - // * - // DownloadingTrainingImage + // * DownloadingTrainingImage // // This member is required. SecondaryStatus types.SecondaryStatus @@ -146,22 +145,22 @@ type DescribeTrainingJobOutput struct { // The status of the training job. Amazon SageMaker provides the following training // job statuses: // - // * InProgress - The training is in progress. + // * InProgress - The training is in progress. // - // * Completed - // - The training job has completed. + // * Completed - The + // training job has completed. // - // * Failed - The training job has failed. - // To see the reason for the failure, see the FailureReason field in the response - // to a DescribeTrainingJobResponse call. + // * Failed - The training job has failed. To see the + // reason for the failure, see the FailureReason field in the response to a + // DescribeTrainingJobResponse call. // - // * Stopping - The training job is - // stopping. + // * Stopping - The training job is stopping. // - // * Stopped - The training job has stopped. + // * + // Stopped - The training job has stopped. // - // For more detailed - // information, see SecondaryStatus. + // For more detailed information, see + // SecondaryStatus. // // This member is required. TrainingJobStatus types.TrainingJobStatus @@ -210,12 +209,12 @@ type DescribeTrainingJobOutput struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *types.ExperimentConfig // If the training job failed, the reason it failed. diff --git a/service/sagemaker/api_op_DescribeTransformJob.go b/service/sagemaker/api_op_DescribeTransformJob.go index bdb094f19b6..ce2833b09ab 100644 --- a/service/sagemaker/api_op_DescribeTransformJob.go +++ b/service/sagemaker/api_op_DescribeTransformJob.go @@ -102,12 +102,12 @@ type DescribeTransformJobOutput struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *types.ExperimentConfig // If the transform job failed, FailureReason describes why it failed. A transform diff --git a/service/sagemaker/api_op_DescribeTrialComponent.go b/service/sagemaker/api_op_DescribeTrialComponent.go index 971c6d84ccb..f04643d7cc2 100644 --- a/service/sagemaker/api_op_DescribeTrialComponent.go +++ b/service/sagemaker/api_op_DescribeTrialComponent.go @@ -77,12 +77,12 @@ type DescribeTrialComponentOutput struct { // The status of the component. States include: // - // * InProgress + // * InProgress // - // * - // Completed + // * Completed // - // * Failed + // * + // Failed Status *types.TrialComponentStatus // The Amazon Resource Name (ARN) of the trial component. diff --git a/service/sagemaker/api_op_ListCodeRepositories.go b/service/sagemaker/api_op_ListCodeRepositories.go index 6c67fa23432..37cacc12339 100644 --- a/service/sagemaker/api_op_ListCodeRepositories.go +++ b/service/sagemaker/api_op_ListCodeRepositories.go @@ -70,19 +70,18 @@ type ListCodeRepositoriesOutput struct { // Gets a list of summaries of the Git repositories. Each summary specifies the // following values for the repository: // - // * Name + // * Name // - // * Amazon Resource Name - // (ARN) + // * Amazon Resource Name (ARN) // - // * Creation time + // * + // Creation time // - // * Last modified time + // * Last modified time // - // * Configuration - // information, including the URL location of the repository and the ARN of the AWS - // Secrets Manager secret that contains the credentials used to access the - // repository. + // * Configuration information, including the + // URL location of the repository and the ARN of the AWS Secrets Manager secret + // that contains the credentials used to access the repository. // // This member is required. CodeRepositorySummaryList []*types.CodeRepositorySummary diff --git a/service/sagemaker/api_op_ListTrialComponents.go b/service/sagemaker/api_op_ListTrialComponents.go index 7184a48db97..f68dfba5a7c 100644 --- a/service/sagemaker/api_op_ListTrialComponents.go +++ b/service/sagemaker/api_op_ListTrialComponents.go @@ -17,11 +17,11 @@ import ( // that were created in a specific time range. You can also filter on one of the // following: // -// * ExperimentName +// * ExperimentName // -// * SourceArn +// * SourceArn // -// * TrialName +// * TrialName func (c *Client) ListTrialComponents(ctx context.Context, params *ListTrialComponentsInput, optFns ...func(*Options)) (*ListTrialComponentsOutput, error) { if params == nil { params = &ListTrialComponentsInput{} diff --git a/service/sagemaker/doc.go b/service/sagemaker/doc.go index be744423d3d..523fa99cbcc 100644 --- a/service/sagemaker/doc.go +++ b/service/sagemaker/doc.go @@ -6,10 +6,10 @@ // Provides APIs for creating and managing Amazon SageMaker resources. Other // Resources: // -// * Amazon SageMaker Developer Guide +// * Amazon SageMaker Developer Guide // (https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html#first-time-user) // -// -// * Amazon Augmented AI Runtime API Reference +// * +// Amazon Augmented AI Runtime API Reference // (https://docs.aws.amazon.com/augmented-ai/2019-11-07/APIReference/Welcome.html) package sagemaker diff --git a/service/sagemaker/types/enums.go b/service/sagemaker/types/enums.go index a4765da8067..2a4eb53bee8 100644 --- a/service/sagemaker/types/enums.go +++ b/service/sagemaker/types/enums.go @@ -6,8 +6,8 @@ type AlgorithmSortBy string // Enum values for AlgorithmSortBy const ( - AlgorithmSortByName AlgorithmSortBy = "Name" - AlgorithmSortByCreation_time AlgorithmSortBy = "CreationTime" + AlgorithmSortByName AlgorithmSortBy = "Name" + AlgorithmSortByCreationTime AlgorithmSortBy = "CreationTime" ) // Values returns all known values for AlgorithmSortBy. Note that this can be @@ -24,11 +24,11 @@ type AlgorithmStatus string // Enum values for AlgorithmStatus const ( - AlgorithmStatusPending AlgorithmStatus = "Pending" - AlgorithmStatusIn_progress AlgorithmStatus = "InProgress" - AlgorithmStatusCompleted AlgorithmStatus = "Completed" - AlgorithmStatusFailed AlgorithmStatus = "Failed" - AlgorithmStatusDeleting AlgorithmStatus = "Deleting" + AlgorithmStatusPending AlgorithmStatus = "Pending" + AlgorithmStatusInProgress AlgorithmStatus = "InProgress" + AlgorithmStatusCompleted AlgorithmStatus = "Completed" + AlgorithmStatusFailed AlgorithmStatus = "Failed" + AlgorithmStatusDeleting AlgorithmStatus = "Deleting" ) // Values returns all known values for AlgorithmStatus. Note that this can be @@ -48,38 +48,38 @@ type AppInstanceType string // Enum values for AppInstanceType const ( - AppInstanceTypeSystem AppInstanceType = "system" - AppInstanceTypeMl_t3_micro AppInstanceType = "ml.t3.micro" - AppInstanceTypeMl_t3_small AppInstanceType = "ml.t3.small" - AppInstanceTypeMl_t3_medium AppInstanceType = "ml.t3.medium" - AppInstanceTypeMl_t3_large AppInstanceType = "ml.t3.large" - AppInstanceTypeMl_t3_xlarge AppInstanceType = "ml.t3.xlarge" - AppInstanceTypeMl_t3_2xlarge AppInstanceType = "ml.t3.2xlarge" - AppInstanceTypeMl_m5_large AppInstanceType = "ml.m5.large" - AppInstanceTypeMl_m5_xlarge AppInstanceType = "ml.m5.xlarge" - AppInstanceTypeMl_m5_2xlarge AppInstanceType = "ml.m5.2xlarge" - AppInstanceTypeMl_m5_4xlarge AppInstanceType = "ml.m5.4xlarge" - AppInstanceTypeMl_m5_8xlarge AppInstanceType = "ml.m5.8xlarge" - AppInstanceTypeMl_m5_12xlarge AppInstanceType = "ml.m5.12xlarge" - AppInstanceTypeMl_m5_16xlarge AppInstanceType = "ml.m5.16xlarge" - AppInstanceTypeMl_m5_24xlarge AppInstanceType = "ml.m5.24xlarge" - AppInstanceTypeMl_c5_large AppInstanceType = "ml.c5.large" - AppInstanceTypeMl_c5_xlarge AppInstanceType = "ml.c5.xlarge" - AppInstanceTypeMl_c5_2xlarge AppInstanceType = "ml.c5.2xlarge" - AppInstanceTypeMl_c5_4xlarge AppInstanceType = "ml.c5.4xlarge" - AppInstanceTypeMl_c5_9xlarge AppInstanceType = "ml.c5.9xlarge" - AppInstanceTypeMl_c5_12xlarge AppInstanceType = "ml.c5.12xlarge" - AppInstanceTypeMl_c5_18xlarge AppInstanceType = "ml.c5.18xlarge" - AppInstanceTypeMl_c5_24xlarge AppInstanceType = "ml.c5.24xlarge" - AppInstanceTypeMl_p3_2xlarge AppInstanceType = "ml.p3.2xlarge" - AppInstanceTypeMl_p3_8xlarge AppInstanceType = "ml.p3.8xlarge" - AppInstanceTypeMl_p3_16xlarge AppInstanceType = "ml.p3.16xlarge" - AppInstanceTypeMl_g4dn_xlarge AppInstanceType = "ml.g4dn.xlarge" - AppInstanceTypeMl_g4dn_2xlarge AppInstanceType = "ml.g4dn.2xlarge" - AppInstanceTypeMl_g4dn_4xlarge AppInstanceType = "ml.g4dn.4xlarge" - AppInstanceTypeMl_g4dn_8xlarge AppInstanceType = "ml.g4dn.8xlarge" - AppInstanceTypeMl_g4dn_12xlarge AppInstanceType = "ml.g4dn.12xlarge" - AppInstanceTypeMl_g4dn_16xlarge AppInstanceType = "ml.g4dn.16xlarge" + AppInstanceTypeSystem AppInstanceType = "system" + AppInstanceTypeMlT3Micro AppInstanceType = "ml.t3.micro" + AppInstanceTypeMlT3Small AppInstanceType = "ml.t3.small" + AppInstanceTypeMlT3Medium AppInstanceType = "ml.t3.medium" + AppInstanceTypeMlT3Large AppInstanceType = "ml.t3.large" + AppInstanceTypeMlT3Xlarge AppInstanceType = "ml.t3.xlarge" + AppInstanceTypeMlT32xlarge AppInstanceType = "ml.t3.2xlarge" + AppInstanceTypeMlM5Large AppInstanceType = "ml.m5.large" + AppInstanceTypeMlM5Xlarge AppInstanceType = "ml.m5.xlarge" + AppInstanceTypeMlM52xlarge AppInstanceType = "ml.m5.2xlarge" + AppInstanceTypeMlM54xlarge AppInstanceType = "ml.m5.4xlarge" + AppInstanceTypeMlM58xlarge AppInstanceType = "ml.m5.8xlarge" + AppInstanceTypeMlM512xlarge AppInstanceType = "ml.m5.12xlarge" + AppInstanceTypeMlM516xlarge AppInstanceType = "ml.m5.16xlarge" + AppInstanceTypeMlM524xlarge AppInstanceType = "ml.m5.24xlarge" + AppInstanceTypeMlC5Large AppInstanceType = "ml.c5.large" + AppInstanceTypeMlC5Xlarge AppInstanceType = "ml.c5.xlarge" + AppInstanceTypeMlC52xlarge AppInstanceType = "ml.c5.2xlarge" + AppInstanceTypeMlC54xlarge AppInstanceType = "ml.c5.4xlarge" + AppInstanceTypeMlC59xlarge AppInstanceType = "ml.c5.9xlarge" + AppInstanceTypeMlC512xlarge AppInstanceType = "ml.c5.12xlarge" + AppInstanceTypeMlC518xlarge AppInstanceType = "ml.c5.18xlarge" + AppInstanceTypeMlC524xlarge AppInstanceType = "ml.c5.24xlarge" + AppInstanceTypeMlP32xlarge AppInstanceType = "ml.p3.2xlarge" + AppInstanceTypeMlP38xlarge AppInstanceType = "ml.p3.8xlarge" + AppInstanceTypeMlP316xlarge AppInstanceType = "ml.p3.16xlarge" + AppInstanceTypeMlG4dnXlarge AppInstanceType = "ml.g4dn.xlarge" + AppInstanceTypeMlG4dn2xlarge AppInstanceType = "ml.g4dn.2xlarge" + AppInstanceTypeMlG4dn4xlarge AppInstanceType = "ml.g4dn.4xlarge" + AppInstanceTypeMlG4dn8xlarge AppInstanceType = "ml.g4dn.8xlarge" + AppInstanceTypeMlG4dn12xlarge AppInstanceType = "ml.g4dn.12xlarge" + AppInstanceTypeMlG4dn16xlarge AppInstanceType = "ml.g4dn.16xlarge" ) // Values returns all known values for AppInstanceType. Note that this can be @@ -258,16 +258,16 @@ type AutoMLJobSecondaryStatus string // Enum values for AutoMLJobSecondaryStatus const ( - AutoMLJobSecondaryStatusStarting AutoMLJobSecondaryStatus = "Starting" - AutoMLJobSecondaryStatusAnalyzing_data AutoMLJobSecondaryStatus = "AnalyzingData" - AutoMLJobSecondaryStatusFeature_engineering AutoMLJobSecondaryStatus = "FeatureEngineering" - AutoMLJobSecondaryStatusModel_tuning AutoMLJobSecondaryStatus = "ModelTuning" - AutoMLJobSecondaryStatusMax_candidates_reached AutoMLJobSecondaryStatus = "MaxCandidatesReached" - AutoMLJobSecondaryStatusFailed AutoMLJobSecondaryStatus = "Failed" - AutoMLJobSecondaryStatusStopped AutoMLJobSecondaryStatus = "Stopped" - AutoMLJobSecondaryStatusMax_auto_ml_job_runtime_reached AutoMLJobSecondaryStatus = "MaxAutoMLJobRuntimeReached" - AutoMLJobSecondaryStatusStopping AutoMLJobSecondaryStatus = "Stopping" - AutoMLJobSecondaryStatusCandidate_definitions_generated AutoMLJobSecondaryStatus = "CandidateDefinitionsGenerated" + AutoMLJobSecondaryStatusStarting AutoMLJobSecondaryStatus = "Starting" + AutoMLJobSecondaryStatusAnalyzingData AutoMLJobSecondaryStatus = "AnalyzingData" + AutoMLJobSecondaryStatusFeatureEngineering AutoMLJobSecondaryStatus = "FeatureEngineering" + AutoMLJobSecondaryStatusModelTuning AutoMLJobSecondaryStatus = "ModelTuning" + AutoMLJobSecondaryStatusMaxCandidatesReached AutoMLJobSecondaryStatus = "MaxCandidatesReached" + AutoMLJobSecondaryStatusFailed AutoMLJobSecondaryStatus = "Failed" + AutoMLJobSecondaryStatusStopped AutoMLJobSecondaryStatus = "Stopped" + AutoMLJobSecondaryStatusMaxAutoMlJobRuntimeReached AutoMLJobSecondaryStatus = "MaxAutoMLJobRuntimeReached" + AutoMLJobSecondaryStatusStopping AutoMLJobSecondaryStatus = "Stopping" + AutoMLJobSecondaryStatusCandidateDefinitionsGenerated AutoMLJobSecondaryStatus = "CandidateDefinitionsGenerated" ) // Values returns all known values for AutoMLJobSecondaryStatus. Note that this can @@ -292,11 +292,11 @@ type AutoMLJobStatus string // Enum values for AutoMLJobStatus const ( - AutoMLJobStatusCompleted AutoMLJobStatus = "Completed" - AutoMLJobStatusIn_progress AutoMLJobStatus = "InProgress" - AutoMLJobStatusFailed AutoMLJobStatus = "Failed" - AutoMLJobStatusStopped AutoMLJobStatus = "Stopped" - AutoMLJobStatusStopping AutoMLJobStatus = "Stopping" + AutoMLJobStatusCompleted AutoMLJobStatus = "Completed" + AutoMLJobStatusInProgress AutoMLJobStatus = "InProgress" + AutoMLJobStatusFailed AutoMLJobStatus = "Failed" + AutoMLJobStatusStopped AutoMLJobStatus = "Stopped" + AutoMLJobStatusStopping AutoMLJobStatus = "Stopping" ) // Values returns all known values for AutoMLJobStatus. Note that this can be @@ -319,7 +319,7 @@ const ( AutoMLMetricEnumAccuracy AutoMLMetricEnum = "Accuracy" AutoMLMetricEnumMse AutoMLMetricEnum = "MSE" AutoMLMetricEnumF1 AutoMLMetricEnum = "F1" - AutoMLMetricEnumF1_macro AutoMLMetricEnum = "F1macro" + AutoMLMetricEnumF1Macro AutoMLMetricEnum = "F1macro" AutoMLMetricEnumAuc AutoMLMetricEnum = "AUC" ) @@ -340,8 +340,8 @@ type AutoMLS3DataType string // Enum values for AutoMLS3DataType const ( - AutoMLS3DataTypeManifest_file AutoMLS3DataType = "ManifestFile" - AutoMLS3DataTypeS3_prefix AutoMLS3DataType = "S3Prefix" + AutoMLS3DataTypeManifestFile AutoMLS3DataType = "ManifestFile" + AutoMLS3DataTypeS3Prefix AutoMLS3DataType = "S3Prefix" ) // Values returns all known values for AutoMLS3DataType. Note that this can be @@ -358,9 +358,9 @@ type AutoMLSortBy string // Enum values for AutoMLSortBy const ( - AutoMLSortByName AutoMLSortBy = "Name" - AutoMLSortByCreation_time AutoMLSortBy = "CreationTime" - AutoMLSortByStatus AutoMLSortBy = "Status" + AutoMLSortByName AutoMLSortBy = "Name" + AutoMLSortByCreationTime AutoMLSortBy = "CreationTime" + AutoMLSortByStatus AutoMLSortBy = "Status" ) // Values returns all known values for AutoMLSortBy. Note that this can be expanded @@ -396,8 +396,8 @@ type AwsManagedHumanLoopRequestSource string // Enum values for AwsManagedHumanLoopRequestSource const ( - AwsManagedHumanLoopRequestSourceRekognition_detect_moderation_labels_image_v3 AwsManagedHumanLoopRequestSource = "AWS/Rekognition/DetectModerationLabels/Image/V3" - AwsManagedHumanLoopRequestSourceTextract_analyze_document_forms_v1 AwsManagedHumanLoopRequestSource = "AWS/Textract/AnalyzeDocument/Forms/V1" + AwsManagedHumanLoopRequestSourceRekognitionDetectModerationLabelsImageV3 AwsManagedHumanLoopRequestSource = "AWS/Rekognition/DetectModerationLabels/Image/V3" + AwsManagedHumanLoopRequestSourceTextractAnalyzeDocumentFormsV1 AwsManagedHumanLoopRequestSource = "AWS/Textract/AnalyzeDocument/Forms/V1" ) // Values returns all known values for AwsManagedHumanLoopRequestSource. Note that @@ -415,8 +415,8 @@ type BatchStrategy string // Enum values for BatchStrategy const ( - BatchStrategyMulti_record BatchStrategy = "MultiRecord" - BatchStrategySingle_record BatchStrategy = "SingleRecord" + BatchStrategyMultiRecord BatchStrategy = "MultiRecord" + BatchStrategySingleRecord BatchStrategy = "SingleRecord" ) // Values returns all known values for BatchStrategy. Note that this can be @@ -471,11 +471,11 @@ type CandidateStatus string // Enum values for CandidateStatus const ( - CandidateStatusCompleted CandidateStatus = "Completed" - CandidateStatusIn_progress CandidateStatus = "InProgress" - CandidateStatusFailed CandidateStatus = "Failed" - CandidateStatusStopped CandidateStatus = "Stopped" - CandidateStatusStopping CandidateStatus = "Stopping" + CandidateStatusCompleted CandidateStatus = "Completed" + CandidateStatusInProgress CandidateStatus = "InProgress" + CandidateStatusFailed CandidateStatus = "Failed" + CandidateStatusStopped CandidateStatus = "Stopped" + CandidateStatusStopping CandidateStatus = "Stopping" ) // Values returns all known values for CandidateStatus. Note that this can be @@ -551,9 +551,9 @@ type CodeRepositorySortBy string // Enum values for CodeRepositorySortBy const ( - CodeRepositorySortByName CodeRepositorySortBy = "Name" - CodeRepositorySortByCreation_time CodeRepositorySortBy = "CreationTime" - CodeRepositorySortByLast_modified_time CodeRepositorySortBy = "LastModifiedTime" + CodeRepositorySortByName CodeRepositorySortBy = "Name" + CodeRepositorySortByCreationTime CodeRepositorySortBy = "CreationTime" + CodeRepositorySortByLastModifiedTime CodeRepositorySortBy = "LastModifiedTime" ) // Values returns all known values for CodeRepositorySortBy. Note that this can be @@ -633,8 +633,8 @@ type ContainerMode string // Enum values for ContainerMode const ( - ContainerModeSingle_model ContainerMode = "SingleModel" - ContainerModeMulti_model ContainerMode = "MultiModel" + ContainerModeSingleModel ContainerMode = "SingleModel" + ContainerModeMultiModel ContainerMode = "MultiModel" ) // Values returns all known values for ContainerMode. Note that this can be @@ -651,8 +651,8 @@ type ContentClassifier string // Enum values for ContentClassifier const ( - ContentClassifierFree_of_personally_identifiable_information ContentClassifier = "FreeOfPersonallyIdentifiableInformation" - ContentClassifierFree_of_adult_content ContentClassifier = "FreeOfAdultContent" + ContentClassifierFreeOfPersonallyIdentifiableInformation ContentClassifier = "FreeOfPersonallyIdentifiableInformation" + ContentClassifierFreeOfAdultContent ContentClassifier = "FreeOfAdultContent" ) // Values returns all known values for ContentClassifier. Note that this can be @@ -669,10 +669,10 @@ type DetailedAlgorithmStatus string // Enum values for DetailedAlgorithmStatus const ( - DetailedAlgorithmStatusNot_started DetailedAlgorithmStatus = "NotStarted" - DetailedAlgorithmStatusIn_progress DetailedAlgorithmStatus = "InProgress" - DetailedAlgorithmStatusCompleted DetailedAlgorithmStatus = "Completed" - DetailedAlgorithmStatusFailed DetailedAlgorithmStatus = "Failed" + DetailedAlgorithmStatusNotStarted DetailedAlgorithmStatus = "NotStarted" + DetailedAlgorithmStatusInProgress DetailedAlgorithmStatus = "InProgress" + DetailedAlgorithmStatusCompleted DetailedAlgorithmStatus = "Completed" + DetailedAlgorithmStatusFailed DetailedAlgorithmStatus = "Failed" ) // Values returns all known values for DetailedAlgorithmStatus. Note that this can @@ -691,10 +691,10 @@ type DetailedModelPackageStatus string // Enum values for DetailedModelPackageStatus const ( - DetailedModelPackageStatusNot_started DetailedModelPackageStatus = "NotStarted" - DetailedModelPackageStatusIn_progress DetailedModelPackageStatus = "InProgress" - DetailedModelPackageStatusCompleted DetailedModelPackageStatus = "Completed" - DetailedModelPackageStatusFailed DetailedModelPackageStatus = "Failed" + DetailedModelPackageStatusNotStarted DetailedModelPackageStatus = "NotStarted" + DetailedModelPackageStatusInProgress DetailedModelPackageStatus = "InProgress" + DetailedModelPackageStatusCompleted DetailedModelPackageStatus = "Completed" + DetailedModelPackageStatusFailed DetailedModelPackageStatus = "Failed" ) // Values returns all known values for DetailedModelPackageStatus. Note that this @@ -791,14 +791,14 @@ type EndpointStatus string // Enum values for EndpointStatus const ( - EndpointStatusOut_of_service EndpointStatus = "OutOfService" - EndpointStatusCreating EndpointStatus = "Creating" - EndpointStatusUpdating EndpointStatus = "Updating" - EndpointStatusSystem_updating EndpointStatus = "SystemUpdating" - EndpointStatusRolling_back EndpointStatus = "RollingBack" - EndpointStatusIn_service EndpointStatus = "InService" - EndpointStatusDeleting EndpointStatus = "Deleting" - EndpointStatusFailed EndpointStatus = "Failed" + EndpointStatusOutOfService EndpointStatus = "OutOfService" + EndpointStatusCreating EndpointStatus = "Creating" + EndpointStatusUpdating EndpointStatus = "Updating" + EndpointStatusSystemUpdating EndpointStatus = "SystemUpdating" + EndpointStatusRollingBack EndpointStatus = "RollingBack" + EndpointStatusInService EndpointStatus = "InService" + EndpointStatusDeleting EndpointStatus = "Deleting" + EndpointStatusFailed EndpointStatus = "Failed" ) // Values returns all known values for EndpointStatus. Note that this can be @@ -821,13 +821,13 @@ type ExecutionStatus string // Enum values for ExecutionStatus const ( - ExecutionStatusPending ExecutionStatus = "Pending" - ExecutionStatusCompleted ExecutionStatus = "Completed" - ExecutionStatusCompleted_with_violations ExecutionStatus = "CompletedWithViolations" - ExecutionStatusIn_progress ExecutionStatus = "InProgress" - ExecutionStatusFailed ExecutionStatus = "Failed" - ExecutionStatusStopping ExecutionStatus = "Stopping" - ExecutionStatusStopped ExecutionStatus = "Stopped" + ExecutionStatusPending ExecutionStatus = "Pending" + ExecutionStatusCompleted ExecutionStatus = "Completed" + ExecutionStatusCompletedWithViolations ExecutionStatus = "CompletedWithViolations" + ExecutionStatusInProgress ExecutionStatus = "InProgress" + ExecutionStatusFailed ExecutionStatus = "Failed" + ExecutionStatusStopping ExecutionStatus = "Stopping" + ExecutionStatusStopped ExecutionStatus = "Stopped" ) // Values returns all known values for ExecutionStatus. Note that this can be @@ -953,10 +953,10 @@ type HyperParameterScalingType string // Enum values for HyperParameterScalingType const ( - HyperParameterScalingTypeAuto HyperParameterScalingType = "Auto" - HyperParameterScalingTypeLinear HyperParameterScalingType = "Linear" - HyperParameterScalingTypeLogarithmic HyperParameterScalingType = "Logarithmic" - HyperParameterScalingTypeReverse_logarithmic HyperParameterScalingType = "ReverseLogarithmic" + HyperParameterScalingTypeAuto HyperParameterScalingType = "Auto" + HyperParameterScalingTypeLinear HyperParameterScalingType = "Linear" + HyperParameterScalingTypeLogarithmic HyperParameterScalingType = "Logarithmic" + HyperParameterScalingTypeReverseLogarithmic HyperParameterScalingType = "ReverseLogarithmic" ) // Values returns all known values for HyperParameterScalingType. Note that this @@ -1015,11 +1015,11 @@ type HyperParameterTuningJobStatus string // Enum values for HyperParameterTuningJobStatus const ( - HyperParameterTuningJobStatusCompleted HyperParameterTuningJobStatus = "Completed" - HyperParameterTuningJobStatusIn_progress HyperParameterTuningJobStatus = "InProgress" - HyperParameterTuningJobStatusFailed HyperParameterTuningJobStatus = "Failed" - HyperParameterTuningJobStatusStopped HyperParameterTuningJobStatus = "Stopped" - HyperParameterTuningJobStatusStopping HyperParameterTuningJobStatus = "Stopping" + HyperParameterTuningJobStatusCompleted HyperParameterTuningJobStatus = "Completed" + HyperParameterTuningJobStatusInProgress HyperParameterTuningJobStatus = "InProgress" + HyperParameterTuningJobStatusFailed HyperParameterTuningJobStatus = "Failed" + HyperParameterTuningJobStatusStopped HyperParameterTuningJobStatus = "Stopped" + HyperParameterTuningJobStatusStopping HyperParameterTuningJobStatus = "Stopping" ) // Values returns all known values for HyperParameterTuningJobStatus. Note that @@ -1059,8 +1059,8 @@ type HyperParameterTuningJobWarmStartType string // Enum values for HyperParameterTuningJobWarmStartType const ( - HyperParameterTuningJobWarmStartTypeIdentical_data_and_algorithm HyperParameterTuningJobWarmStartType = "IdenticalDataAndAlgorithm" - HyperParameterTuningJobWarmStartTypeTransfer_learning HyperParameterTuningJobWarmStartType = "TransferLearning" + HyperParameterTuningJobWarmStartTypeIdenticalDataAndAlgorithm HyperParameterTuningJobWarmStartType = "IdenticalDataAndAlgorithm" + HyperParameterTuningJobWarmStartTypeTransferLearning HyperParameterTuningJobWarmStartType = "TransferLearning" ) // Values returns all known values for HyperParameterTuningJobWarmStartType. Note @@ -1078,44 +1078,44 @@ type InstanceType string // Enum values for InstanceType const ( - InstanceTypeMl_t2_medium InstanceType = "ml.t2.medium" - InstanceTypeMl_t2_large InstanceType = "ml.t2.large" - InstanceTypeMl_t2_xlarge InstanceType = "ml.t2.xlarge" - InstanceTypeMl_t2_2xlarge InstanceType = "ml.t2.2xlarge" - InstanceTypeMl_t3_medium InstanceType = "ml.t3.medium" - InstanceTypeMl_t3_large InstanceType = "ml.t3.large" - InstanceTypeMl_t3_xlarge InstanceType = "ml.t3.xlarge" - InstanceTypeMl_t3_2xlarge InstanceType = "ml.t3.2xlarge" - InstanceTypeMl_m4_xlarge InstanceType = "ml.m4.xlarge" - InstanceTypeMl_m4_2xlarge InstanceType = "ml.m4.2xlarge" - InstanceTypeMl_m4_4xlarge InstanceType = "ml.m4.4xlarge" - InstanceTypeMl_m4_10xlarge InstanceType = "ml.m4.10xlarge" - InstanceTypeMl_m4_16xlarge InstanceType = "ml.m4.16xlarge" - InstanceTypeMl_m5_xlarge InstanceType = "ml.m5.xlarge" - InstanceTypeMl_m5_2xlarge InstanceType = "ml.m5.2xlarge" - InstanceTypeMl_m5_4xlarge InstanceType = "ml.m5.4xlarge" - InstanceTypeMl_m5_12xlarge InstanceType = "ml.m5.12xlarge" - InstanceTypeMl_m5_24xlarge InstanceType = "ml.m5.24xlarge" - InstanceTypeMl_c4_xlarge InstanceType = "ml.c4.xlarge" - InstanceTypeMl_c4_2xlarge InstanceType = "ml.c4.2xlarge" - InstanceTypeMl_c4_4xlarge InstanceType = "ml.c4.4xlarge" - InstanceTypeMl_c4_8xlarge InstanceType = "ml.c4.8xlarge" - InstanceTypeMl_c5_xlarge InstanceType = "ml.c5.xlarge" - InstanceTypeMl_c5_2xlarge InstanceType = "ml.c5.2xlarge" - InstanceTypeMl_c5_4xlarge InstanceType = "ml.c5.4xlarge" - InstanceTypeMl_c5_9xlarge InstanceType = "ml.c5.9xlarge" - InstanceTypeMl_c5_18xlarge InstanceType = "ml.c5.18xlarge" - InstanceTypeMl_c5d_xlarge InstanceType = "ml.c5d.xlarge" - InstanceTypeMl_c5d_2xlarge InstanceType = "ml.c5d.2xlarge" - InstanceTypeMl_c5d_4xlarge InstanceType = "ml.c5d.4xlarge" - InstanceTypeMl_c5d_9xlarge InstanceType = "ml.c5d.9xlarge" - InstanceTypeMl_c5d_18xlarge InstanceType = "ml.c5d.18xlarge" - InstanceTypeMl_p2_xlarge InstanceType = "ml.p2.xlarge" - InstanceTypeMl_p2_8xlarge InstanceType = "ml.p2.8xlarge" - InstanceTypeMl_p2_16xlarge InstanceType = "ml.p2.16xlarge" - InstanceTypeMl_p3_2xlarge InstanceType = "ml.p3.2xlarge" - InstanceTypeMl_p3_8xlarge InstanceType = "ml.p3.8xlarge" - InstanceTypeMl_p3_16xlarge InstanceType = "ml.p3.16xlarge" + InstanceTypeMlT2Medium InstanceType = "ml.t2.medium" + InstanceTypeMlT2Large InstanceType = "ml.t2.large" + InstanceTypeMlT2Xlarge InstanceType = "ml.t2.xlarge" + InstanceTypeMlT22xlarge InstanceType = "ml.t2.2xlarge" + InstanceTypeMlT3Medium InstanceType = "ml.t3.medium" + InstanceTypeMlT3Large InstanceType = "ml.t3.large" + InstanceTypeMlT3Xlarge InstanceType = "ml.t3.xlarge" + InstanceTypeMlT32xlarge InstanceType = "ml.t3.2xlarge" + InstanceTypeMlM4Xlarge InstanceType = "ml.m4.xlarge" + InstanceTypeMlM42xlarge InstanceType = "ml.m4.2xlarge" + InstanceTypeMlM44xlarge InstanceType = "ml.m4.4xlarge" + InstanceTypeMlM410xlarge InstanceType = "ml.m4.10xlarge" + InstanceTypeMlM416xlarge InstanceType = "ml.m4.16xlarge" + InstanceTypeMlM5Xlarge InstanceType = "ml.m5.xlarge" + InstanceTypeMlM52xlarge InstanceType = "ml.m5.2xlarge" + InstanceTypeMlM54xlarge InstanceType = "ml.m5.4xlarge" + InstanceTypeMlM512xlarge InstanceType = "ml.m5.12xlarge" + InstanceTypeMlM524xlarge InstanceType = "ml.m5.24xlarge" + InstanceTypeMlC4Xlarge InstanceType = "ml.c4.xlarge" + InstanceTypeMlC42xlarge InstanceType = "ml.c4.2xlarge" + InstanceTypeMlC44xlarge InstanceType = "ml.c4.4xlarge" + InstanceTypeMlC48xlarge InstanceType = "ml.c4.8xlarge" + InstanceTypeMlC5Xlarge InstanceType = "ml.c5.xlarge" + InstanceTypeMlC52xlarge InstanceType = "ml.c5.2xlarge" + InstanceTypeMlC54xlarge InstanceType = "ml.c5.4xlarge" + InstanceTypeMlC59xlarge InstanceType = "ml.c5.9xlarge" + InstanceTypeMlC518xlarge InstanceType = "ml.c5.18xlarge" + InstanceTypeMlC5dXlarge InstanceType = "ml.c5d.xlarge" + InstanceTypeMlC5d2xlarge InstanceType = "ml.c5d.2xlarge" + InstanceTypeMlC5d4xlarge InstanceType = "ml.c5d.4xlarge" + InstanceTypeMlC5d9xlarge InstanceType = "ml.c5d.9xlarge" + InstanceTypeMlC5d18xlarge InstanceType = "ml.c5d.18xlarge" + InstanceTypeMlP2Xlarge InstanceType = "ml.p2.xlarge" + InstanceTypeMlP28xlarge InstanceType = "ml.p2.8xlarge" + InstanceTypeMlP216xlarge InstanceType = "ml.p2.16xlarge" + InstanceTypeMlP32xlarge InstanceType = "ml.p3.2xlarge" + InstanceTypeMlP38xlarge InstanceType = "ml.p3.8xlarge" + InstanceTypeMlP316xlarge InstanceType = "ml.p3.16xlarge" ) // Values returns all known values for InstanceType. Note that this can be expanded @@ -1187,7 +1187,7 @@ type LabelingJobStatus string // Enum values for LabelingJobStatus const ( LabelingJobStatusInitializing LabelingJobStatus = "Initializing" - LabelingJobStatusIn_progress LabelingJobStatus = "InProgress" + LabelingJobStatusInProgress LabelingJobStatus = "InProgress" LabelingJobStatusCompleted LabelingJobStatus = "Completed" LabelingJobStatusFailed LabelingJobStatus = "Failed" LabelingJobStatusStopping LabelingJobStatus = "Stopping" @@ -1212,9 +1212,9 @@ type ListCompilationJobsSortBy string // Enum values for ListCompilationJobsSortBy const ( - ListCompilationJobsSortByName ListCompilationJobsSortBy = "Name" - ListCompilationJobsSortByCreation_time ListCompilationJobsSortBy = "CreationTime" - ListCompilationJobsSortByStatus ListCompilationJobsSortBy = "Status" + ListCompilationJobsSortByName ListCompilationJobsSortBy = "Name" + ListCompilationJobsSortByCreationTime ListCompilationJobsSortBy = "CreationTime" + ListCompilationJobsSortByStatus ListCompilationJobsSortBy = "Status" ) // Values returns all known values for ListCompilationJobsSortBy. Note that this @@ -1232,7 +1232,7 @@ type ListLabelingJobsForWorkteamSortByOptions string // Enum values for ListLabelingJobsForWorkteamSortByOptions const ( - ListLabelingJobsForWorkteamSortByOptionsCreation_time ListLabelingJobsForWorkteamSortByOptions = "CreationTime" + ListLabelingJobsForWorkteamSortByOptionsCreationTime ListLabelingJobsForWorkteamSortByOptions = "CreationTime" ) // Values returns all known values for ListLabelingJobsForWorkteamSortByOptions. @@ -1285,8 +1285,8 @@ type ModelPackageSortBy string // Enum values for ModelPackageSortBy const ( - ModelPackageSortByName ModelPackageSortBy = "Name" - ModelPackageSortByCreation_time ModelPackageSortBy = "CreationTime" + ModelPackageSortByName ModelPackageSortBy = "Name" + ModelPackageSortByCreationTime ModelPackageSortBy = "CreationTime" ) // Values returns all known values for ModelPackageSortBy. Note that this can be @@ -1303,11 +1303,11 @@ type ModelPackageStatus string // Enum values for ModelPackageStatus const ( - ModelPackageStatusPending ModelPackageStatus = "Pending" - ModelPackageStatusIn_progress ModelPackageStatus = "InProgress" - ModelPackageStatusCompleted ModelPackageStatus = "Completed" - ModelPackageStatusFailed ModelPackageStatus = "Failed" - ModelPackageStatusDeleting ModelPackageStatus = "Deleting" + ModelPackageStatusPending ModelPackageStatus = "Pending" + ModelPackageStatusInProgress ModelPackageStatus = "InProgress" + ModelPackageStatusCompleted ModelPackageStatus = "Completed" + ModelPackageStatusFailed ModelPackageStatus = "Failed" + ModelPackageStatusDeleting ModelPackageStatus = "Deleting" ) // Values returns all known values for ModelPackageStatus. Note that this can be @@ -1345,9 +1345,9 @@ type MonitoringExecutionSortKey string // Enum values for MonitoringExecutionSortKey const ( - MonitoringExecutionSortKeyCreation_time MonitoringExecutionSortKey = "CreationTime" - MonitoringExecutionSortKeyScheduled_time MonitoringExecutionSortKey = "ScheduledTime" - MonitoringExecutionSortKeyStatus MonitoringExecutionSortKey = "Status" + MonitoringExecutionSortKeyCreationTime MonitoringExecutionSortKey = "CreationTime" + MonitoringExecutionSortKeyScheduledTime MonitoringExecutionSortKey = "ScheduledTime" + MonitoringExecutionSortKeyStatus MonitoringExecutionSortKey = "Status" ) // Values returns all known values for MonitoringExecutionSortKey. Note that this @@ -1365,9 +1365,9 @@ type MonitoringScheduleSortKey string // Enum values for MonitoringScheduleSortKey const ( - MonitoringScheduleSortKeyName MonitoringScheduleSortKey = "Name" - MonitoringScheduleSortKeyCreation_time MonitoringScheduleSortKey = "CreationTime" - MonitoringScheduleSortKeyStatus MonitoringScheduleSortKey = "Status" + MonitoringScheduleSortKeyName MonitoringScheduleSortKey = "Name" + MonitoringScheduleSortKeyCreationTime MonitoringScheduleSortKey = "CreationTime" + MonitoringScheduleSortKeyStatus MonitoringScheduleSortKey = "Status" ) // Values returns all known values for MonitoringScheduleSortKey. Note that this @@ -1385,12 +1385,12 @@ type NotebookInstanceAcceleratorType string // Enum values for NotebookInstanceAcceleratorType const ( - NotebookInstanceAcceleratorTypeMl_eia1_medium NotebookInstanceAcceleratorType = "ml.eia1.medium" - NotebookInstanceAcceleratorTypeMl_eia1_large NotebookInstanceAcceleratorType = "ml.eia1.large" - NotebookInstanceAcceleratorTypeMl_eia1_xlarge NotebookInstanceAcceleratorType = "ml.eia1.xlarge" - NotebookInstanceAcceleratorTypeMl_eia2_medium NotebookInstanceAcceleratorType = "ml.eia2.medium" - NotebookInstanceAcceleratorTypeMl_eia2_large NotebookInstanceAcceleratorType = "ml.eia2.large" - NotebookInstanceAcceleratorTypeMl_eia2_xlarge NotebookInstanceAcceleratorType = "ml.eia2.xlarge" + NotebookInstanceAcceleratorTypeMlEia1Medium NotebookInstanceAcceleratorType = "ml.eia1.medium" + NotebookInstanceAcceleratorTypeMlEia1Large NotebookInstanceAcceleratorType = "ml.eia1.large" + NotebookInstanceAcceleratorTypeMlEia1Xlarge NotebookInstanceAcceleratorType = "ml.eia1.xlarge" + NotebookInstanceAcceleratorTypeMlEia2Medium NotebookInstanceAcceleratorType = "ml.eia2.medium" + NotebookInstanceAcceleratorTypeMlEia2Large NotebookInstanceAcceleratorType = "ml.eia2.large" + NotebookInstanceAcceleratorTypeMlEia2Xlarge NotebookInstanceAcceleratorType = "ml.eia2.xlarge" ) // Values returns all known values for NotebookInstanceAcceleratorType. Note that @@ -1412,9 +1412,9 @@ type NotebookInstanceLifecycleConfigSortKey string // Enum values for NotebookInstanceLifecycleConfigSortKey const ( - NotebookInstanceLifecycleConfigSortKeyName NotebookInstanceLifecycleConfigSortKey = "Name" - NotebookInstanceLifecycleConfigSortKeyCreation_time NotebookInstanceLifecycleConfigSortKey = "CreationTime" - NotebookInstanceLifecycleConfigSortKeyLast_modified_time NotebookInstanceLifecycleConfigSortKey = "LastModifiedTime" + NotebookInstanceLifecycleConfigSortKeyName NotebookInstanceLifecycleConfigSortKey = "Name" + NotebookInstanceLifecycleConfigSortKeyCreationTime NotebookInstanceLifecycleConfigSortKey = "CreationTime" + NotebookInstanceLifecycleConfigSortKeyLastModifiedTime NotebookInstanceLifecycleConfigSortKey = "LastModifiedTime" ) // Values returns all known values for NotebookInstanceLifecycleConfigSortKey. Note @@ -1452,9 +1452,9 @@ type NotebookInstanceSortKey string // Enum values for NotebookInstanceSortKey const ( - NotebookInstanceSortKeyName NotebookInstanceSortKey = "Name" - NotebookInstanceSortKeyCreation_time NotebookInstanceSortKey = "CreationTime" - NotebookInstanceSortKeyStatus NotebookInstanceSortKey = "Status" + NotebookInstanceSortKeyName NotebookInstanceSortKey = "Name" + NotebookInstanceSortKeyCreationTime NotebookInstanceSortKey = "CreationTime" + NotebookInstanceSortKeyStatus NotebookInstanceSortKey = "Status" ) // Values returns all known values for NotebookInstanceSortKey. Note that this can @@ -1556,16 +1556,16 @@ type Operator string // Enum values for Operator const ( - OperatorEquals Operator = "Equals" - OperatorNot_equals Operator = "NotEquals" - OperatorGreater_than Operator = "GreaterThan" - OperatorGreater_than_or_equal_to Operator = "GreaterThanOrEqualTo" - OperatorLess_than Operator = "LessThan" - OperatorLess_than_or_equal_to Operator = "LessThanOrEqualTo" - OperatorContains Operator = "Contains" - OperatorExists Operator = "Exists" - OperatorNot_exists Operator = "NotExists" - OperatorIn Operator = "In" + OperatorEquals Operator = "Equals" + OperatorNotEquals Operator = "NotEquals" + OperatorGreaterThan Operator = "GreaterThan" + OperatorGreaterThanOrEqualTo Operator = "GreaterThanOrEqualTo" + OperatorLessThan Operator = "LessThan" + OperatorLessThanOrEqualTo Operator = "LessThanOrEqualTo" + OperatorContains Operator = "Contains" + OperatorExists Operator = "Exists" + OperatorNotExists Operator = "NotExists" + OperatorIn Operator = "In" ) // Values returns all known values for Operator. Note that this can be expanded in @@ -1611,7 +1611,7 @@ const ( ParameterTypeInteger ParameterType = "Integer" ParameterTypeContinuous ParameterType = "Continuous" ParameterTypeCategorical ParameterType = "Categorical" - ParameterTypeFree_text ParameterType = "FreeText" + ParameterTypeFreeText ParameterType = "FreeText" ) // Values returns all known values for ParameterType. Note that this can be @@ -1630,9 +1630,9 @@ type ProblemType string // Enum values for ProblemType const ( - ProblemTypeBinary_classification ProblemType = "BinaryClassification" - ProblemTypeMulticlass_classification ProblemType = "MulticlassClassification" - ProblemTypeRegression ProblemType = "Regression" + ProblemTypeBinaryClassification ProblemType = "BinaryClassification" + ProblemTypeMulticlassClassification ProblemType = "MulticlassClassification" + ProblemTypeRegression ProblemType = "Regression" ) // Values returns all known values for ProblemType. Note that this can be expanded @@ -1650,44 +1650,44 @@ type ProcessingInstanceType string // Enum values for ProcessingInstanceType const ( - ProcessingInstanceTypeMl_t3_medium ProcessingInstanceType = "ml.t3.medium" - ProcessingInstanceTypeMl_t3_large ProcessingInstanceType = "ml.t3.large" - ProcessingInstanceTypeMl_t3_xlarge ProcessingInstanceType = "ml.t3.xlarge" - ProcessingInstanceTypeMl_t3_2xlarge ProcessingInstanceType = "ml.t3.2xlarge" - ProcessingInstanceTypeMl_m4_xlarge ProcessingInstanceType = "ml.m4.xlarge" - ProcessingInstanceTypeMl_m4_2xlarge ProcessingInstanceType = "ml.m4.2xlarge" - ProcessingInstanceTypeMl_m4_4xlarge ProcessingInstanceType = "ml.m4.4xlarge" - ProcessingInstanceTypeMl_m4_10xlarge ProcessingInstanceType = "ml.m4.10xlarge" - ProcessingInstanceTypeMl_m4_16xlarge ProcessingInstanceType = "ml.m4.16xlarge" - ProcessingInstanceTypeMl_c4_xlarge ProcessingInstanceType = "ml.c4.xlarge" - ProcessingInstanceTypeMl_c4_2xlarge ProcessingInstanceType = "ml.c4.2xlarge" - ProcessingInstanceTypeMl_c4_4xlarge ProcessingInstanceType = "ml.c4.4xlarge" - ProcessingInstanceTypeMl_c4_8xlarge ProcessingInstanceType = "ml.c4.8xlarge" - ProcessingInstanceTypeMl_p2_xlarge ProcessingInstanceType = "ml.p2.xlarge" - ProcessingInstanceTypeMl_p2_8xlarge ProcessingInstanceType = "ml.p2.8xlarge" - ProcessingInstanceTypeMl_p2_16xlarge ProcessingInstanceType = "ml.p2.16xlarge" - ProcessingInstanceTypeMl_p3_2xlarge ProcessingInstanceType = "ml.p3.2xlarge" - ProcessingInstanceTypeMl_p3_8xlarge ProcessingInstanceType = "ml.p3.8xlarge" - ProcessingInstanceTypeMl_p3_16xlarge ProcessingInstanceType = "ml.p3.16xlarge" - ProcessingInstanceTypeMl_c5_xlarge ProcessingInstanceType = "ml.c5.xlarge" - ProcessingInstanceTypeMl_c5_2xlarge ProcessingInstanceType = "ml.c5.2xlarge" - ProcessingInstanceTypeMl_c5_4xlarge ProcessingInstanceType = "ml.c5.4xlarge" - ProcessingInstanceTypeMl_c5_9xlarge ProcessingInstanceType = "ml.c5.9xlarge" - ProcessingInstanceTypeMl_c5_18xlarge ProcessingInstanceType = "ml.c5.18xlarge" - ProcessingInstanceTypeMl_m5_large ProcessingInstanceType = "ml.m5.large" - ProcessingInstanceTypeMl_m5_xlarge ProcessingInstanceType = "ml.m5.xlarge" - ProcessingInstanceTypeMl_m5_2xlarge ProcessingInstanceType = "ml.m5.2xlarge" - ProcessingInstanceTypeMl_m5_4xlarge ProcessingInstanceType = "ml.m5.4xlarge" - ProcessingInstanceTypeMl_m5_12xlarge ProcessingInstanceType = "ml.m5.12xlarge" - ProcessingInstanceTypeMl_m5_24xlarge ProcessingInstanceType = "ml.m5.24xlarge" - ProcessingInstanceTypeMl_r5_large ProcessingInstanceType = "ml.r5.large" - ProcessingInstanceTypeMl_r5_xlarge ProcessingInstanceType = "ml.r5.xlarge" - ProcessingInstanceTypeMl_r5_2xlarge ProcessingInstanceType = "ml.r5.2xlarge" - ProcessingInstanceTypeMl_r5_4xlarge ProcessingInstanceType = "ml.r5.4xlarge" - ProcessingInstanceTypeMl_r5_8xlarge ProcessingInstanceType = "ml.r5.8xlarge" - ProcessingInstanceTypeMl_r5_12xlarge ProcessingInstanceType = "ml.r5.12xlarge" - ProcessingInstanceTypeMl_r5_16xlarge ProcessingInstanceType = "ml.r5.16xlarge" - ProcessingInstanceTypeMl_r5_24xlarge ProcessingInstanceType = "ml.r5.24xlarge" + ProcessingInstanceTypeMlT3Medium ProcessingInstanceType = "ml.t3.medium" + ProcessingInstanceTypeMlT3Large ProcessingInstanceType = "ml.t3.large" + ProcessingInstanceTypeMlT3Xlarge ProcessingInstanceType = "ml.t3.xlarge" + ProcessingInstanceTypeMlT32xlarge ProcessingInstanceType = "ml.t3.2xlarge" + ProcessingInstanceTypeMlM4Xlarge ProcessingInstanceType = "ml.m4.xlarge" + ProcessingInstanceTypeMlM42xlarge ProcessingInstanceType = "ml.m4.2xlarge" + ProcessingInstanceTypeMlM44xlarge ProcessingInstanceType = "ml.m4.4xlarge" + ProcessingInstanceTypeMlM410xlarge ProcessingInstanceType = "ml.m4.10xlarge" + ProcessingInstanceTypeMlM416xlarge ProcessingInstanceType = "ml.m4.16xlarge" + ProcessingInstanceTypeMlC4Xlarge ProcessingInstanceType = "ml.c4.xlarge" + ProcessingInstanceTypeMlC42xlarge ProcessingInstanceType = "ml.c4.2xlarge" + ProcessingInstanceTypeMlC44xlarge ProcessingInstanceType = "ml.c4.4xlarge" + ProcessingInstanceTypeMlC48xlarge ProcessingInstanceType = "ml.c4.8xlarge" + ProcessingInstanceTypeMlP2Xlarge ProcessingInstanceType = "ml.p2.xlarge" + ProcessingInstanceTypeMlP28xlarge ProcessingInstanceType = "ml.p2.8xlarge" + ProcessingInstanceTypeMlP216xlarge ProcessingInstanceType = "ml.p2.16xlarge" + ProcessingInstanceTypeMlP32xlarge ProcessingInstanceType = "ml.p3.2xlarge" + ProcessingInstanceTypeMlP38xlarge ProcessingInstanceType = "ml.p3.8xlarge" + ProcessingInstanceTypeMlP316xlarge ProcessingInstanceType = "ml.p3.16xlarge" + ProcessingInstanceTypeMlC5Xlarge ProcessingInstanceType = "ml.c5.xlarge" + ProcessingInstanceTypeMlC52xlarge ProcessingInstanceType = "ml.c5.2xlarge" + ProcessingInstanceTypeMlC54xlarge ProcessingInstanceType = "ml.c5.4xlarge" + ProcessingInstanceTypeMlC59xlarge ProcessingInstanceType = "ml.c5.9xlarge" + ProcessingInstanceTypeMlC518xlarge ProcessingInstanceType = "ml.c5.18xlarge" + ProcessingInstanceTypeMlM5Large ProcessingInstanceType = "ml.m5.large" + ProcessingInstanceTypeMlM5Xlarge ProcessingInstanceType = "ml.m5.xlarge" + ProcessingInstanceTypeMlM52xlarge ProcessingInstanceType = "ml.m5.2xlarge" + ProcessingInstanceTypeMlM54xlarge ProcessingInstanceType = "ml.m5.4xlarge" + ProcessingInstanceTypeMlM512xlarge ProcessingInstanceType = "ml.m5.12xlarge" + ProcessingInstanceTypeMlM524xlarge ProcessingInstanceType = "ml.m5.24xlarge" + ProcessingInstanceTypeMlR5Large ProcessingInstanceType = "ml.r5.large" + ProcessingInstanceTypeMlR5Xlarge ProcessingInstanceType = "ml.r5.xlarge" + ProcessingInstanceTypeMlR52xlarge ProcessingInstanceType = "ml.r5.2xlarge" + ProcessingInstanceTypeMlR54xlarge ProcessingInstanceType = "ml.r5.4xlarge" + ProcessingInstanceTypeMlR58xlarge ProcessingInstanceType = "ml.r5.8xlarge" + ProcessingInstanceTypeMlR512xlarge ProcessingInstanceType = "ml.r5.12xlarge" + ProcessingInstanceTypeMlR516xlarge ProcessingInstanceType = "ml.r5.16xlarge" + ProcessingInstanceTypeMlR524xlarge ProcessingInstanceType = "ml.r5.24xlarge" ) // Values returns all known values for ProcessingInstanceType. Note that this can @@ -1740,11 +1740,11 @@ type ProcessingJobStatus string // Enum values for ProcessingJobStatus const ( - ProcessingJobStatusIn_progress ProcessingJobStatus = "InProgress" - ProcessingJobStatusCompleted ProcessingJobStatus = "Completed" - ProcessingJobStatusFailed ProcessingJobStatus = "Failed" - ProcessingJobStatusStopping ProcessingJobStatus = "Stopping" - ProcessingJobStatusStopped ProcessingJobStatus = "Stopped" + ProcessingJobStatusInProgress ProcessingJobStatus = "InProgress" + ProcessingJobStatusCompleted ProcessingJobStatus = "Completed" + ProcessingJobStatusFailed ProcessingJobStatus = "Failed" + ProcessingJobStatusStopping ProcessingJobStatus = "Stopping" + ProcessingJobStatusStopped ProcessingJobStatus = "Stopped" ) // Values returns all known values for ProcessingJobStatus. Note that this can be @@ -1801,8 +1801,8 @@ type ProcessingS3DataType string // Enum values for ProcessingS3DataType const ( - ProcessingS3DataTypeManifest_file ProcessingS3DataType = "ManifestFile" - ProcessingS3DataTypeS3_prefix ProcessingS3DataType = "S3Prefix" + ProcessingS3DataTypeManifestFile ProcessingS3DataType = "ManifestFile" + ProcessingS3DataTypeS3Prefix ProcessingS3DataType = "S3Prefix" ) // Values returns all known values for ProcessingS3DataType. Note that this can be @@ -1838,7 +1838,7 @@ type ProcessingS3UploadMode string // Enum values for ProcessingS3UploadMode const ( ProcessingS3UploadModeContinuous ProcessingS3UploadMode = "Continuous" - ProcessingS3UploadModeEnd_of_job ProcessingS3UploadMode = "EndOfJob" + ProcessingS3UploadModeEndOfJob ProcessingS3UploadMode = "EndOfJob" ) // Values returns all known values for ProcessingS3UploadMode. Note that this can @@ -1855,12 +1855,12 @@ type ProductionVariantAcceleratorType string // Enum values for ProductionVariantAcceleratorType const ( - ProductionVariantAcceleratorTypeMl_eia1_medium ProductionVariantAcceleratorType = "ml.eia1.medium" - ProductionVariantAcceleratorTypeMl_eia1_large ProductionVariantAcceleratorType = "ml.eia1.large" - ProductionVariantAcceleratorTypeMl_eia1_xlarge ProductionVariantAcceleratorType = "ml.eia1.xlarge" - ProductionVariantAcceleratorTypeMl_eia2_medium ProductionVariantAcceleratorType = "ml.eia2.medium" - ProductionVariantAcceleratorTypeMl_eia2_large ProductionVariantAcceleratorType = "ml.eia2.large" - ProductionVariantAcceleratorTypeMl_eia2_xlarge ProductionVariantAcceleratorType = "ml.eia2.xlarge" + ProductionVariantAcceleratorTypeMlEia1Medium ProductionVariantAcceleratorType = "ml.eia1.medium" + ProductionVariantAcceleratorTypeMlEia1Large ProductionVariantAcceleratorType = "ml.eia1.large" + ProductionVariantAcceleratorTypeMlEia1Xlarge ProductionVariantAcceleratorType = "ml.eia1.xlarge" + ProductionVariantAcceleratorTypeMlEia2Medium ProductionVariantAcceleratorType = "ml.eia2.medium" + ProductionVariantAcceleratorTypeMlEia2Large ProductionVariantAcceleratorType = "ml.eia2.large" + ProductionVariantAcceleratorTypeMlEia2Xlarge ProductionVariantAcceleratorType = "ml.eia2.xlarge" ) // Values returns all known values for ProductionVariantAcceleratorType. Note that @@ -1882,72 +1882,72 @@ type ProductionVariantInstanceType string // Enum values for ProductionVariantInstanceType const ( - ProductionVariantInstanceTypeMl_t2_medium ProductionVariantInstanceType = "ml.t2.medium" - ProductionVariantInstanceTypeMl_t2_large ProductionVariantInstanceType = "ml.t2.large" - ProductionVariantInstanceTypeMl_t2_xlarge ProductionVariantInstanceType = "ml.t2.xlarge" - ProductionVariantInstanceTypeMl_t2_2xlarge ProductionVariantInstanceType = "ml.t2.2xlarge" - ProductionVariantInstanceTypeMl_m4_xlarge ProductionVariantInstanceType = "ml.m4.xlarge" - ProductionVariantInstanceTypeMl_m4_2xlarge ProductionVariantInstanceType = "ml.m4.2xlarge" - ProductionVariantInstanceTypeMl_m4_4xlarge ProductionVariantInstanceType = "ml.m4.4xlarge" - ProductionVariantInstanceTypeMl_m4_10xlarge ProductionVariantInstanceType = "ml.m4.10xlarge" - ProductionVariantInstanceTypeMl_m4_16xlarge ProductionVariantInstanceType = "ml.m4.16xlarge" - ProductionVariantInstanceTypeMl_m5_large ProductionVariantInstanceType = "ml.m5.large" - ProductionVariantInstanceTypeMl_m5_xlarge ProductionVariantInstanceType = "ml.m5.xlarge" - ProductionVariantInstanceTypeMl_m5_2xlarge ProductionVariantInstanceType = "ml.m5.2xlarge" - ProductionVariantInstanceTypeMl_m5_4xlarge ProductionVariantInstanceType = "ml.m5.4xlarge" - ProductionVariantInstanceTypeMl_m5_12xlarge ProductionVariantInstanceType = "ml.m5.12xlarge" - ProductionVariantInstanceTypeMl_m5_24xlarge ProductionVariantInstanceType = "ml.m5.24xlarge" - ProductionVariantInstanceTypeMl_m5d_large ProductionVariantInstanceType = "ml.m5d.large" - ProductionVariantInstanceTypeMl_m5d_xlarge ProductionVariantInstanceType = "ml.m5d.xlarge" - ProductionVariantInstanceTypeMl_m5d_2xlarge ProductionVariantInstanceType = "ml.m5d.2xlarge" - ProductionVariantInstanceTypeMl_m5d_4xlarge ProductionVariantInstanceType = "ml.m5d.4xlarge" - ProductionVariantInstanceTypeMl_m5d_12xlarge ProductionVariantInstanceType = "ml.m5d.12xlarge" - ProductionVariantInstanceTypeMl_m5d_24xlarge ProductionVariantInstanceType = "ml.m5d.24xlarge" - ProductionVariantInstanceTypeMl_c4_large ProductionVariantInstanceType = "ml.c4.large" - ProductionVariantInstanceTypeMl_c4_xlarge ProductionVariantInstanceType = "ml.c4.xlarge" - ProductionVariantInstanceTypeMl_c4_2xlarge ProductionVariantInstanceType = "ml.c4.2xlarge" - ProductionVariantInstanceTypeMl_c4_4xlarge ProductionVariantInstanceType = "ml.c4.4xlarge" - ProductionVariantInstanceTypeMl_c4_8xlarge ProductionVariantInstanceType = "ml.c4.8xlarge" - ProductionVariantInstanceTypeMl_p2_xlarge ProductionVariantInstanceType = "ml.p2.xlarge" - ProductionVariantInstanceTypeMl_p2_8xlarge ProductionVariantInstanceType = "ml.p2.8xlarge" - ProductionVariantInstanceTypeMl_p2_16xlarge ProductionVariantInstanceType = "ml.p2.16xlarge" - ProductionVariantInstanceTypeMl_p3_2xlarge ProductionVariantInstanceType = "ml.p3.2xlarge" - ProductionVariantInstanceTypeMl_p3_8xlarge ProductionVariantInstanceType = "ml.p3.8xlarge" - ProductionVariantInstanceTypeMl_p3_16xlarge ProductionVariantInstanceType = "ml.p3.16xlarge" - ProductionVariantInstanceTypeMl_c5_large ProductionVariantInstanceType = "ml.c5.large" - ProductionVariantInstanceTypeMl_c5_xlarge ProductionVariantInstanceType = "ml.c5.xlarge" - ProductionVariantInstanceTypeMl_c5_2xlarge ProductionVariantInstanceType = "ml.c5.2xlarge" - ProductionVariantInstanceTypeMl_c5_4xlarge ProductionVariantInstanceType = "ml.c5.4xlarge" - ProductionVariantInstanceTypeMl_c5_9xlarge ProductionVariantInstanceType = "ml.c5.9xlarge" - ProductionVariantInstanceTypeMl_c5_18xlarge ProductionVariantInstanceType = "ml.c5.18xlarge" - ProductionVariantInstanceTypeMl_c5d_large ProductionVariantInstanceType = "ml.c5d.large" - ProductionVariantInstanceTypeMl_c5d_xlarge ProductionVariantInstanceType = "ml.c5d.xlarge" - ProductionVariantInstanceTypeMl_c5d_2xlarge ProductionVariantInstanceType = "ml.c5d.2xlarge" - ProductionVariantInstanceTypeMl_c5d_4xlarge ProductionVariantInstanceType = "ml.c5d.4xlarge" - ProductionVariantInstanceTypeMl_c5d_9xlarge ProductionVariantInstanceType = "ml.c5d.9xlarge" - ProductionVariantInstanceTypeMl_c5d_18xlarge ProductionVariantInstanceType = "ml.c5d.18xlarge" - ProductionVariantInstanceTypeMl_g4dn_xlarge ProductionVariantInstanceType = "ml.g4dn.xlarge" - ProductionVariantInstanceTypeMl_g4dn_2xlarge ProductionVariantInstanceType = "ml.g4dn.2xlarge" - ProductionVariantInstanceTypeMl_g4dn_4xlarge ProductionVariantInstanceType = "ml.g4dn.4xlarge" - ProductionVariantInstanceTypeMl_g4dn_8xlarge ProductionVariantInstanceType = "ml.g4dn.8xlarge" - ProductionVariantInstanceTypeMl_g4dn_12xlarge ProductionVariantInstanceType = "ml.g4dn.12xlarge" - ProductionVariantInstanceTypeMl_g4dn_16xlarge ProductionVariantInstanceType = "ml.g4dn.16xlarge" - ProductionVariantInstanceTypeMl_r5_large ProductionVariantInstanceType = "ml.r5.large" - ProductionVariantInstanceTypeMl_r5_xlarge ProductionVariantInstanceType = "ml.r5.xlarge" - ProductionVariantInstanceTypeMl_r5_2xlarge ProductionVariantInstanceType = "ml.r5.2xlarge" - ProductionVariantInstanceTypeMl_r5_4xlarge ProductionVariantInstanceType = "ml.r5.4xlarge" - ProductionVariantInstanceTypeMl_r5_12xlarge ProductionVariantInstanceType = "ml.r5.12xlarge" - ProductionVariantInstanceTypeMl_r5_24xlarge ProductionVariantInstanceType = "ml.r5.24xlarge" - ProductionVariantInstanceTypeMl_r5d_large ProductionVariantInstanceType = "ml.r5d.large" - ProductionVariantInstanceTypeMl_r5d_xlarge ProductionVariantInstanceType = "ml.r5d.xlarge" - ProductionVariantInstanceTypeMl_r5d_2xlarge ProductionVariantInstanceType = "ml.r5d.2xlarge" - ProductionVariantInstanceTypeMl_r5d_4xlarge ProductionVariantInstanceType = "ml.r5d.4xlarge" - ProductionVariantInstanceTypeMl_r5d_12xlarge ProductionVariantInstanceType = "ml.r5d.12xlarge" - ProductionVariantInstanceTypeMl_r5d_24xlarge ProductionVariantInstanceType = "ml.r5d.24xlarge" - ProductionVariantInstanceTypeMl_inf1_xlarge ProductionVariantInstanceType = "ml.inf1.xlarge" - ProductionVariantInstanceTypeMl_inf1_2xlarge ProductionVariantInstanceType = "ml.inf1.2xlarge" - ProductionVariantInstanceTypeMl_inf1_6xlarge ProductionVariantInstanceType = "ml.inf1.6xlarge" - ProductionVariantInstanceTypeMl_inf1_24xlarge ProductionVariantInstanceType = "ml.inf1.24xlarge" + ProductionVariantInstanceTypeMlT2Medium ProductionVariantInstanceType = "ml.t2.medium" + ProductionVariantInstanceTypeMlT2Large ProductionVariantInstanceType = "ml.t2.large" + ProductionVariantInstanceTypeMlT2Xlarge ProductionVariantInstanceType = "ml.t2.xlarge" + ProductionVariantInstanceTypeMlT22xlarge ProductionVariantInstanceType = "ml.t2.2xlarge" + ProductionVariantInstanceTypeMlM4Xlarge ProductionVariantInstanceType = "ml.m4.xlarge" + ProductionVariantInstanceTypeMlM42xlarge ProductionVariantInstanceType = "ml.m4.2xlarge" + ProductionVariantInstanceTypeMlM44xlarge ProductionVariantInstanceType = "ml.m4.4xlarge" + ProductionVariantInstanceTypeMlM410xlarge ProductionVariantInstanceType = "ml.m4.10xlarge" + ProductionVariantInstanceTypeMlM416xlarge ProductionVariantInstanceType = "ml.m4.16xlarge" + ProductionVariantInstanceTypeMlM5Large ProductionVariantInstanceType = "ml.m5.large" + ProductionVariantInstanceTypeMlM5Xlarge ProductionVariantInstanceType = "ml.m5.xlarge" + ProductionVariantInstanceTypeMlM52xlarge ProductionVariantInstanceType = "ml.m5.2xlarge" + ProductionVariantInstanceTypeMlM54xlarge ProductionVariantInstanceType = "ml.m5.4xlarge" + ProductionVariantInstanceTypeMlM512xlarge ProductionVariantInstanceType = "ml.m5.12xlarge" + ProductionVariantInstanceTypeMlM524xlarge ProductionVariantInstanceType = "ml.m5.24xlarge" + ProductionVariantInstanceTypeMlM5dLarge ProductionVariantInstanceType = "ml.m5d.large" + ProductionVariantInstanceTypeMlM5dXlarge ProductionVariantInstanceType = "ml.m5d.xlarge" + ProductionVariantInstanceTypeMlM5d2xlarge ProductionVariantInstanceType = "ml.m5d.2xlarge" + ProductionVariantInstanceTypeMlM5d4xlarge ProductionVariantInstanceType = "ml.m5d.4xlarge" + ProductionVariantInstanceTypeMlM5d12xlarge ProductionVariantInstanceType = "ml.m5d.12xlarge" + ProductionVariantInstanceTypeMlM5d24xlarge ProductionVariantInstanceType = "ml.m5d.24xlarge" + ProductionVariantInstanceTypeMlC4Large ProductionVariantInstanceType = "ml.c4.large" + ProductionVariantInstanceTypeMlC4Xlarge ProductionVariantInstanceType = "ml.c4.xlarge" + ProductionVariantInstanceTypeMlC42xlarge ProductionVariantInstanceType = "ml.c4.2xlarge" + ProductionVariantInstanceTypeMlC44xlarge ProductionVariantInstanceType = "ml.c4.4xlarge" + ProductionVariantInstanceTypeMlC48xlarge ProductionVariantInstanceType = "ml.c4.8xlarge" + ProductionVariantInstanceTypeMlP2Xlarge ProductionVariantInstanceType = "ml.p2.xlarge" + ProductionVariantInstanceTypeMlP28xlarge ProductionVariantInstanceType = "ml.p2.8xlarge" + ProductionVariantInstanceTypeMlP216xlarge ProductionVariantInstanceType = "ml.p2.16xlarge" + ProductionVariantInstanceTypeMlP32xlarge ProductionVariantInstanceType = "ml.p3.2xlarge" + ProductionVariantInstanceTypeMlP38xlarge ProductionVariantInstanceType = "ml.p3.8xlarge" + ProductionVariantInstanceTypeMlP316xlarge ProductionVariantInstanceType = "ml.p3.16xlarge" + ProductionVariantInstanceTypeMlC5Large ProductionVariantInstanceType = "ml.c5.large" + ProductionVariantInstanceTypeMlC5Xlarge ProductionVariantInstanceType = "ml.c5.xlarge" + ProductionVariantInstanceTypeMlC52xlarge ProductionVariantInstanceType = "ml.c5.2xlarge" + ProductionVariantInstanceTypeMlC54xlarge ProductionVariantInstanceType = "ml.c5.4xlarge" + ProductionVariantInstanceTypeMlC59xlarge ProductionVariantInstanceType = "ml.c5.9xlarge" + ProductionVariantInstanceTypeMlC518xlarge ProductionVariantInstanceType = "ml.c5.18xlarge" + ProductionVariantInstanceTypeMlC5dLarge ProductionVariantInstanceType = "ml.c5d.large" + ProductionVariantInstanceTypeMlC5dXlarge ProductionVariantInstanceType = "ml.c5d.xlarge" + ProductionVariantInstanceTypeMlC5d2xlarge ProductionVariantInstanceType = "ml.c5d.2xlarge" + ProductionVariantInstanceTypeMlC5d4xlarge ProductionVariantInstanceType = "ml.c5d.4xlarge" + ProductionVariantInstanceTypeMlC5d9xlarge ProductionVariantInstanceType = "ml.c5d.9xlarge" + ProductionVariantInstanceTypeMlC5d18xlarge ProductionVariantInstanceType = "ml.c5d.18xlarge" + ProductionVariantInstanceTypeMlG4dnXlarge ProductionVariantInstanceType = "ml.g4dn.xlarge" + ProductionVariantInstanceTypeMlG4dn2xlarge ProductionVariantInstanceType = "ml.g4dn.2xlarge" + ProductionVariantInstanceTypeMlG4dn4xlarge ProductionVariantInstanceType = "ml.g4dn.4xlarge" + ProductionVariantInstanceTypeMlG4dn8xlarge ProductionVariantInstanceType = "ml.g4dn.8xlarge" + ProductionVariantInstanceTypeMlG4dn12xlarge ProductionVariantInstanceType = "ml.g4dn.12xlarge" + ProductionVariantInstanceTypeMlG4dn16xlarge ProductionVariantInstanceType = "ml.g4dn.16xlarge" + ProductionVariantInstanceTypeMlR5Large ProductionVariantInstanceType = "ml.r5.large" + ProductionVariantInstanceTypeMlR5Xlarge ProductionVariantInstanceType = "ml.r5.xlarge" + ProductionVariantInstanceTypeMlR52xlarge ProductionVariantInstanceType = "ml.r5.2xlarge" + ProductionVariantInstanceTypeMlR54xlarge ProductionVariantInstanceType = "ml.r5.4xlarge" + ProductionVariantInstanceTypeMlR512xlarge ProductionVariantInstanceType = "ml.r5.12xlarge" + ProductionVariantInstanceTypeMlR524xlarge ProductionVariantInstanceType = "ml.r5.24xlarge" + ProductionVariantInstanceTypeMlR5dLarge ProductionVariantInstanceType = "ml.r5d.large" + ProductionVariantInstanceTypeMlR5dXlarge ProductionVariantInstanceType = "ml.r5d.xlarge" + ProductionVariantInstanceTypeMlR5d2xlarge ProductionVariantInstanceType = "ml.r5d.2xlarge" + ProductionVariantInstanceTypeMlR5d4xlarge ProductionVariantInstanceType = "ml.r5d.4xlarge" + ProductionVariantInstanceTypeMlR5d12xlarge ProductionVariantInstanceType = "ml.r5d.12xlarge" + ProductionVariantInstanceTypeMlR5d24xlarge ProductionVariantInstanceType = "ml.r5d.24xlarge" + ProductionVariantInstanceTypeMlInf1Xlarge ProductionVariantInstanceType = "ml.inf1.xlarge" + ProductionVariantInstanceTypeMlInf12xlarge ProductionVariantInstanceType = "ml.inf1.2xlarge" + ProductionVariantInstanceTypeMlInf16xlarge ProductionVariantInstanceType = "ml.inf1.6xlarge" + ProductionVariantInstanceTypeMlInf124xlarge ProductionVariantInstanceType = "ml.inf1.24xlarge" ) // Values returns all known values for ProductionVariantInstanceType. Note that @@ -2065,10 +2065,10 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeTraining_job ResourceType = "TrainingJob" - ResourceTypeExperiment ResourceType = "Experiment" - ResourceTypeExperiment_trial ResourceType = "ExperimentTrial" - ResourceTypeExperiment_trial_component ResourceType = "ExperimentTrialComponent" + ResourceTypeTrainingJob ResourceType = "TrainingJob" + ResourceTypeExperiment ResourceType = "Experiment" + ResourceTypeExperimentTrial ResourceType = "ExperimentTrial" + ResourceTypeExperimentTrialComponent ResourceType = "ExperimentTrialComponent" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -2123,12 +2123,12 @@ type RuleEvaluationStatus string // Enum values for RuleEvaluationStatus const ( - RuleEvaluationStatusIn_progress RuleEvaluationStatus = "InProgress" - RuleEvaluationStatusNo_issues_found RuleEvaluationStatus = "NoIssuesFound" - RuleEvaluationStatusIssues_found RuleEvaluationStatus = "IssuesFound" - RuleEvaluationStatusError RuleEvaluationStatus = "Error" - RuleEvaluationStatusStopping RuleEvaluationStatus = "Stopping" - RuleEvaluationStatusStopped RuleEvaluationStatus = "Stopped" + RuleEvaluationStatusInProgress RuleEvaluationStatus = "InProgress" + RuleEvaluationStatusNoIssuesFound RuleEvaluationStatus = "NoIssuesFound" + RuleEvaluationStatusIssuesFound RuleEvaluationStatus = "IssuesFound" + RuleEvaluationStatusError RuleEvaluationStatus = "Error" + RuleEvaluationStatusStopping RuleEvaluationStatus = "Stopping" + RuleEvaluationStatusStopped RuleEvaluationStatus = "Stopped" ) // Values returns all known values for RuleEvaluationStatus. Note that this can be @@ -2149,8 +2149,8 @@ type S3DataDistribution string // Enum values for S3DataDistribution const ( - S3DataDistributionFully_replicated S3DataDistribution = "FullyReplicated" - S3DataDistributionSharded_by_s3_key S3DataDistribution = "ShardedByS3Key" + S3DataDistributionFullyReplicated S3DataDistribution = "FullyReplicated" + S3DataDistributionShardedByS3Key S3DataDistribution = "ShardedByS3Key" ) // Values returns all known values for S3DataDistribution. Note that this can be @@ -2167,9 +2167,9 @@ type S3DataType string // Enum values for S3DataType const ( - S3DataTypeManifest_file S3DataType = "ManifestFile" - S3DataTypeS3_prefix S3DataType = "S3Prefix" - S3DataTypeAugmented_manifest_file S3DataType = "AugmentedManifestFile" + S3DataTypeManifestFile S3DataType = "ManifestFile" + S3DataTypeS3Prefix S3DataType = "S3Prefix" + S3DataTypeAugmentedManifestFile S3DataType = "AugmentedManifestFile" ) // Values returns all known values for S3DataType. Note that this can be expanded @@ -2227,20 +2227,20 @@ type SecondaryStatus string // Enum values for SecondaryStatus const ( - SecondaryStatusStarting SecondaryStatus = "Starting" - SecondaryStatusLaunching_ml_instances SecondaryStatus = "LaunchingMLInstances" - SecondaryStatusPreparing_training_stack SecondaryStatus = "PreparingTrainingStack" - SecondaryStatusDownloading SecondaryStatus = "Downloading" - SecondaryStatusDownloading_training_image SecondaryStatus = "DownloadingTrainingImage" - SecondaryStatusTraining SecondaryStatus = "Training" - SecondaryStatusUploading SecondaryStatus = "Uploading" - SecondaryStatusStopping SecondaryStatus = "Stopping" - SecondaryStatusStopped SecondaryStatus = "Stopped" - SecondaryStatusMax_runtime_exceeded SecondaryStatus = "MaxRuntimeExceeded" - SecondaryStatusCompleted SecondaryStatus = "Completed" - SecondaryStatusFailed SecondaryStatus = "Failed" - SecondaryStatusInterrupted SecondaryStatus = "Interrupted" - SecondaryStatusMax_wait_time_exceeded SecondaryStatus = "MaxWaitTimeExceeded" + SecondaryStatusStarting SecondaryStatus = "Starting" + SecondaryStatusLaunchingMlInstances SecondaryStatus = "LaunchingMLInstances" + SecondaryStatusPreparingTrainingStack SecondaryStatus = "PreparingTrainingStack" + SecondaryStatusDownloading SecondaryStatus = "Downloading" + SecondaryStatusDownloadingTrainingImage SecondaryStatus = "DownloadingTrainingImage" + SecondaryStatusTraining SecondaryStatus = "Training" + SecondaryStatusUploading SecondaryStatus = "Uploading" + SecondaryStatusStopping SecondaryStatus = "Stopping" + SecondaryStatusStopped SecondaryStatus = "Stopped" + SecondaryStatusMaxRuntimeExceeded SecondaryStatus = "MaxRuntimeExceeded" + SecondaryStatusCompleted SecondaryStatus = "Completed" + SecondaryStatusFailed SecondaryStatus = "Failed" + SecondaryStatusInterrupted SecondaryStatus = "Interrupted" + SecondaryStatusMaxWaitTimeExceeded SecondaryStatus = "MaxWaitTimeExceeded" ) // Values returns all known values for SecondaryStatus. Note that this can be @@ -2269,9 +2269,9 @@ type SortBy string // Enum values for SortBy const ( - SortByName SortBy = "Name" - SortByCreation_time SortBy = "CreationTime" - SortByStatus SortBy = "Status" + SortByName SortBy = "Name" + SortByCreationTime SortBy = "CreationTime" + SortByStatus SortBy = "Status" ) // Values returns all known values for SortBy. Note that this can be expanded in @@ -2289,8 +2289,8 @@ type SortExperimentsBy string // Enum values for SortExperimentsBy const ( - SortExperimentsByName SortExperimentsBy = "Name" - SortExperimentsByCreation_time SortExperimentsBy = "CreationTime" + SortExperimentsByName SortExperimentsBy = "Name" + SortExperimentsByCreationTime SortExperimentsBy = "CreationTime" ) // Values returns all known values for SortExperimentsBy. Note that this can be @@ -2325,8 +2325,8 @@ type SortTrialComponentsBy string // Enum values for SortTrialComponentsBy const ( - SortTrialComponentsByName SortTrialComponentsBy = "Name" - SortTrialComponentsByCreation_time SortTrialComponentsBy = "CreationTime" + SortTrialComponentsByName SortTrialComponentsBy = "Name" + SortTrialComponentsByCreationTime SortTrialComponentsBy = "CreationTime" ) // Values returns all known values for SortTrialComponentsBy. Note that this can be @@ -2343,8 +2343,8 @@ type SortTrialsBy string // Enum values for SortTrialsBy const ( - SortTrialsByName SortTrialsBy = "Name" - SortTrialsByCreation_time SortTrialsBy = "CreationTime" + SortTrialsByName SortTrialsBy = "Name" + SortTrialsByCreationTime SortTrialsBy = "CreationTime" ) // Values returns all known values for SortTrialsBy. Note that this can be expanded @@ -2383,33 +2383,33 @@ type TargetDevice string // Enum values for TargetDevice const ( - TargetDeviceLambda TargetDevice = "lambda" - TargetDeviceMl_m4 TargetDevice = "ml_m4" - TargetDeviceMl_m5 TargetDevice = "ml_m5" - TargetDeviceMl_c4 TargetDevice = "ml_c4" - TargetDeviceMl_c5 TargetDevice = "ml_c5" - TargetDeviceMl_p2 TargetDevice = "ml_p2" - TargetDeviceMl_p3 TargetDevice = "ml_p3" - TargetDeviceMl_g4dn TargetDevice = "ml_g4dn" - TargetDeviceMl_inf1 TargetDevice = "ml_inf1" - TargetDeviceJetson_tx1 TargetDevice = "jetson_tx1" - TargetDeviceJetson_tx2 TargetDevice = "jetson_tx2" - TargetDeviceJetson_nano TargetDevice = "jetson_nano" - TargetDeviceJetson_xavier TargetDevice = "jetson_xavier" - TargetDeviceRasp3b TargetDevice = "rasp3b" - TargetDeviceImx8qm TargetDevice = "imx8qm" - TargetDeviceDeeplens TargetDevice = "deeplens" - TargetDeviceRk3399 TargetDevice = "rk3399" - TargetDeviceRk3288 TargetDevice = "rk3288" - TargetDeviceAisage TargetDevice = "aisage" - TargetDeviceSbe_c TargetDevice = "sbe_c" - TargetDeviceQcs605 TargetDevice = "qcs605" - TargetDeviceQcs603 TargetDevice = "qcs603" - TargetDeviceSitara_am57x TargetDevice = "sitara_am57x" - TargetDeviceAmba_cv22 TargetDevice = "amba_cv22" - TargetDeviceX86_win32 TargetDevice = "x86_win32" - TargetDeviceX86_win64 TargetDevice = "x86_win64" - TargetDeviceCoreml TargetDevice = "coreml" + TargetDeviceLambda TargetDevice = "lambda" + TargetDeviceMlM4 TargetDevice = "ml_m4" + TargetDeviceMlM5 TargetDevice = "ml_m5" + TargetDeviceMlC4 TargetDevice = "ml_c4" + TargetDeviceMlC5 TargetDevice = "ml_c5" + TargetDeviceMlP2 TargetDevice = "ml_p2" + TargetDeviceMlP3 TargetDevice = "ml_p3" + TargetDeviceMlG4dn TargetDevice = "ml_g4dn" + TargetDeviceMlInf1 TargetDevice = "ml_inf1" + TargetDeviceJetsonTx1 TargetDevice = "jetson_tx1" + TargetDeviceJetsonTx2 TargetDevice = "jetson_tx2" + TargetDeviceJetsonNano TargetDevice = "jetson_nano" + TargetDeviceJetsonXavier TargetDevice = "jetson_xavier" + TargetDeviceRasp3b TargetDevice = "rasp3b" + TargetDeviceImx8qm TargetDevice = "imx8qm" + TargetDeviceDeeplens TargetDevice = "deeplens" + TargetDeviceRk3399 TargetDevice = "rk3399" + TargetDeviceRk3288 TargetDevice = "rk3288" + TargetDeviceAisage TargetDevice = "aisage" + TargetDeviceSbeC TargetDevice = "sbe_c" + TargetDeviceQcs605 TargetDevice = "qcs605" + TargetDeviceQcs603 TargetDevice = "qcs603" + TargetDeviceSitaraAm57x TargetDevice = "sitara_am57x" + TargetDeviceAmbaCv22 TargetDevice = "amba_cv22" + TargetDeviceX86Win32 TargetDevice = "x86_win32" + TargetDeviceX86Win64 TargetDevice = "x86_win64" + TargetDeviceCoreml TargetDevice = "coreml" ) // Values returns all known values for TargetDevice. Note that this can be expanded @@ -2451,9 +2451,9 @@ type TargetPlatformAccelerator string // Enum values for TargetPlatformAccelerator const ( - TargetPlatformAcceleratorIntel_graphics TargetPlatformAccelerator = "INTEL_GRAPHICS" - TargetPlatformAcceleratorMali TargetPlatformAccelerator = "MALI" - TargetPlatformAcceleratorNvidia TargetPlatformAccelerator = "NVIDIA" + TargetPlatformAcceleratorIntelGraphics TargetPlatformAccelerator = "INTEL_GRAPHICS" + TargetPlatformAcceleratorMali TargetPlatformAccelerator = "MALI" + TargetPlatformAcceleratorNvidia TargetPlatformAccelerator = "NVIDIA" ) // Values returns all known values for TargetPlatformAccelerator. Note that this @@ -2471,11 +2471,11 @@ type TargetPlatformArch string // Enum values for TargetPlatformArch const ( - TargetPlatformArchX86_64 TargetPlatformArch = "X86_64" - TargetPlatformArchX86 TargetPlatformArch = "X86" - TargetPlatformArchArm64 TargetPlatformArch = "ARM64" - TargetPlatformArchArm_eabi TargetPlatformArch = "ARM_EABI" - TargetPlatformArchArm_eabihf TargetPlatformArch = "ARM_EABIHF" + TargetPlatformArchX8664 TargetPlatformArch = "X86_64" + TargetPlatformArchX86 TargetPlatformArch = "X86" + TargetPlatformArchArm64 TargetPlatformArch = "ARM64" + TargetPlatformArchArmEabi TargetPlatformArch = "ARM_EABI" + TargetPlatformArchArmEabihf TargetPlatformArch = "ARM_EABIHF" ) // Values returns all known values for TargetPlatformArch. Note that this can be @@ -2531,44 +2531,44 @@ type TrainingInstanceType string // Enum values for TrainingInstanceType const ( - TrainingInstanceTypeMl_m4_xlarge TrainingInstanceType = "ml.m4.xlarge" - TrainingInstanceTypeMl_m4_2xlarge TrainingInstanceType = "ml.m4.2xlarge" - TrainingInstanceTypeMl_m4_4xlarge TrainingInstanceType = "ml.m4.4xlarge" - TrainingInstanceTypeMl_m4_10xlarge TrainingInstanceType = "ml.m4.10xlarge" - TrainingInstanceTypeMl_m4_16xlarge TrainingInstanceType = "ml.m4.16xlarge" - TrainingInstanceTypeMl_g4dn_xlarge TrainingInstanceType = "ml.g4dn.xlarge" - TrainingInstanceTypeMl_g4dn_2xlarge TrainingInstanceType = "ml.g4dn.2xlarge" - TrainingInstanceTypeMl_g4dn_4xlarge TrainingInstanceType = "ml.g4dn.4xlarge" - TrainingInstanceTypeMl_g4dn_8xlarge TrainingInstanceType = "ml.g4dn.8xlarge" - TrainingInstanceTypeMl_g4dn_12xlarge TrainingInstanceType = "ml.g4dn.12xlarge" - TrainingInstanceTypeMl_g4dn_16xlarge TrainingInstanceType = "ml.g4dn.16xlarge" - TrainingInstanceTypeMl_m5_large TrainingInstanceType = "ml.m5.large" - TrainingInstanceTypeMl_m5_xlarge TrainingInstanceType = "ml.m5.xlarge" - TrainingInstanceTypeMl_m5_2xlarge TrainingInstanceType = "ml.m5.2xlarge" - TrainingInstanceTypeMl_m5_4xlarge TrainingInstanceType = "ml.m5.4xlarge" - TrainingInstanceTypeMl_m5_12xlarge TrainingInstanceType = "ml.m5.12xlarge" - TrainingInstanceTypeMl_m5_24xlarge TrainingInstanceType = "ml.m5.24xlarge" - TrainingInstanceTypeMl_c4_xlarge TrainingInstanceType = "ml.c4.xlarge" - TrainingInstanceTypeMl_c4_2xlarge TrainingInstanceType = "ml.c4.2xlarge" - TrainingInstanceTypeMl_c4_4xlarge TrainingInstanceType = "ml.c4.4xlarge" - TrainingInstanceTypeMl_c4_8xlarge TrainingInstanceType = "ml.c4.8xlarge" - TrainingInstanceTypeMl_p2_xlarge TrainingInstanceType = "ml.p2.xlarge" - TrainingInstanceTypeMl_p2_8xlarge TrainingInstanceType = "ml.p2.8xlarge" - TrainingInstanceTypeMl_p2_16xlarge TrainingInstanceType = "ml.p2.16xlarge" - TrainingInstanceTypeMl_p3_2xlarge TrainingInstanceType = "ml.p3.2xlarge" - TrainingInstanceTypeMl_p3_8xlarge TrainingInstanceType = "ml.p3.8xlarge" - TrainingInstanceTypeMl_p3_16xlarge TrainingInstanceType = "ml.p3.16xlarge" - TrainingInstanceTypeMl_p3dn_24xlarge TrainingInstanceType = "ml.p3dn.24xlarge" - TrainingInstanceTypeMl_c5_xlarge TrainingInstanceType = "ml.c5.xlarge" - TrainingInstanceTypeMl_c5_2xlarge TrainingInstanceType = "ml.c5.2xlarge" - TrainingInstanceTypeMl_c5_4xlarge TrainingInstanceType = "ml.c5.4xlarge" - TrainingInstanceTypeMl_c5_9xlarge TrainingInstanceType = "ml.c5.9xlarge" - TrainingInstanceTypeMl_c5_18xlarge TrainingInstanceType = "ml.c5.18xlarge" - TrainingInstanceTypeMl_c5n_xlarge TrainingInstanceType = "ml.c5n.xlarge" - TrainingInstanceTypeMl_c5n_2xlarge TrainingInstanceType = "ml.c5n.2xlarge" - TrainingInstanceTypeMl_c5n_4xlarge TrainingInstanceType = "ml.c5n.4xlarge" - TrainingInstanceTypeMl_c5n_9xlarge TrainingInstanceType = "ml.c5n.9xlarge" - TrainingInstanceTypeMl_c5n_18xlarge TrainingInstanceType = "ml.c5n.18xlarge" + TrainingInstanceTypeMlM4Xlarge TrainingInstanceType = "ml.m4.xlarge" + TrainingInstanceTypeMlM42xlarge TrainingInstanceType = "ml.m4.2xlarge" + TrainingInstanceTypeMlM44xlarge TrainingInstanceType = "ml.m4.4xlarge" + TrainingInstanceTypeMlM410xlarge TrainingInstanceType = "ml.m4.10xlarge" + TrainingInstanceTypeMlM416xlarge TrainingInstanceType = "ml.m4.16xlarge" + TrainingInstanceTypeMlG4dnXlarge TrainingInstanceType = "ml.g4dn.xlarge" + TrainingInstanceTypeMlG4dn2xlarge TrainingInstanceType = "ml.g4dn.2xlarge" + TrainingInstanceTypeMlG4dn4xlarge TrainingInstanceType = "ml.g4dn.4xlarge" + TrainingInstanceTypeMlG4dn8xlarge TrainingInstanceType = "ml.g4dn.8xlarge" + TrainingInstanceTypeMlG4dn12xlarge TrainingInstanceType = "ml.g4dn.12xlarge" + TrainingInstanceTypeMlG4dn16xlarge TrainingInstanceType = "ml.g4dn.16xlarge" + TrainingInstanceTypeMlM5Large TrainingInstanceType = "ml.m5.large" + TrainingInstanceTypeMlM5Xlarge TrainingInstanceType = "ml.m5.xlarge" + TrainingInstanceTypeMlM52xlarge TrainingInstanceType = "ml.m5.2xlarge" + TrainingInstanceTypeMlM54xlarge TrainingInstanceType = "ml.m5.4xlarge" + TrainingInstanceTypeMlM512xlarge TrainingInstanceType = "ml.m5.12xlarge" + TrainingInstanceTypeMlM524xlarge TrainingInstanceType = "ml.m5.24xlarge" + TrainingInstanceTypeMlC4Xlarge TrainingInstanceType = "ml.c4.xlarge" + TrainingInstanceTypeMlC42xlarge TrainingInstanceType = "ml.c4.2xlarge" + TrainingInstanceTypeMlC44xlarge TrainingInstanceType = "ml.c4.4xlarge" + TrainingInstanceTypeMlC48xlarge TrainingInstanceType = "ml.c4.8xlarge" + TrainingInstanceTypeMlP2Xlarge TrainingInstanceType = "ml.p2.xlarge" + TrainingInstanceTypeMlP28xlarge TrainingInstanceType = "ml.p2.8xlarge" + TrainingInstanceTypeMlP216xlarge TrainingInstanceType = "ml.p2.16xlarge" + TrainingInstanceTypeMlP32xlarge TrainingInstanceType = "ml.p3.2xlarge" + TrainingInstanceTypeMlP38xlarge TrainingInstanceType = "ml.p3.8xlarge" + TrainingInstanceTypeMlP316xlarge TrainingInstanceType = "ml.p3.16xlarge" + TrainingInstanceTypeMlP3dn24xlarge TrainingInstanceType = "ml.p3dn.24xlarge" + TrainingInstanceTypeMlC5Xlarge TrainingInstanceType = "ml.c5.xlarge" + TrainingInstanceTypeMlC52xlarge TrainingInstanceType = "ml.c5.2xlarge" + TrainingInstanceTypeMlC54xlarge TrainingInstanceType = "ml.c5.4xlarge" + TrainingInstanceTypeMlC59xlarge TrainingInstanceType = "ml.c5.9xlarge" + TrainingInstanceTypeMlC518xlarge TrainingInstanceType = "ml.c5.18xlarge" + TrainingInstanceTypeMlC5nXlarge TrainingInstanceType = "ml.c5n.xlarge" + TrainingInstanceTypeMlC5n2xlarge TrainingInstanceType = "ml.c5n.2xlarge" + TrainingInstanceTypeMlC5n4xlarge TrainingInstanceType = "ml.c5n.4xlarge" + TrainingInstanceTypeMlC5n9xlarge TrainingInstanceType = "ml.c5n.9xlarge" + TrainingInstanceTypeMlC5n18xlarge TrainingInstanceType = "ml.c5n.18xlarge" ) // Values returns all known values for TrainingInstanceType. Note that this can be @@ -2661,11 +2661,11 @@ type TrainingJobStatus string // Enum values for TrainingJobStatus const ( - TrainingJobStatusIn_progress TrainingJobStatus = "InProgress" - TrainingJobStatusCompleted TrainingJobStatus = "Completed" - TrainingJobStatusFailed TrainingJobStatus = "Failed" - TrainingJobStatusStopping TrainingJobStatus = "Stopping" - TrainingJobStatusStopped TrainingJobStatus = "Stopped" + TrainingJobStatusInProgress TrainingJobStatus = "InProgress" + TrainingJobStatusCompleted TrainingJobStatus = "Completed" + TrainingJobStatusFailed TrainingJobStatus = "Failed" + TrainingJobStatusStopping TrainingJobStatus = "Stopping" + TrainingJobStatusStopped TrainingJobStatus = "Stopped" ) // Values returns all known values for TrainingJobStatus. Note that this can be @@ -2685,32 +2685,32 @@ type TransformInstanceType string // Enum values for TransformInstanceType const ( - TransformInstanceTypeMl_m4_xlarge TransformInstanceType = "ml.m4.xlarge" - TransformInstanceTypeMl_m4_2xlarge TransformInstanceType = "ml.m4.2xlarge" - TransformInstanceTypeMl_m4_4xlarge TransformInstanceType = "ml.m4.4xlarge" - TransformInstanceTypeMl_m4_10xlarge TransformInstanceType = "ml.m4.10xlarge" - TransformInstanceTypeMl_m4_16xlarge TransformInstanceType = "ml.m4.16xlarge" - TransformInstanceTypeMl_c4_xlarge TransformInstanceType = "ml.c4.xlarge" - TransformInstanceTypeMl_c4_2xlarge TransformInstanceType = "ml.c4.2xlarge" - TransformInstanceTypeMl_c4_4xlarge TransformInstanceType = "ml.c4.4xlarge" - TransformInstanceTypeMl_c4_8xlarge TransformInstanceType = "ml.c4.8xlarge" - TransformInstanceTypeMl_p2_xlarge TransformInstanceType = "ml.p2.xlarge" - TransformInstanceTypeMl_p2_8xlarge TransformInstanceType = "ml.p2.8xlarge" - TransformInstanceTypeMl_p2_16xlarge TransformInstanceType = "ml.p2.16xlarge" - TransformInstanceTypeMl_p3_2xlarge TransformInstanceType = "ml.p3.2xlarge" - TransformInstanceTypeMl_p3_8xlarge TransformInstanceType = "ml.p3.8xlarge" - TransformInstanceTypeMl_p3_16xlarge TransformInstanceType = "ml.p3.16xlarge" - TransformInstanceTypeMl_c5_xlarge TransformInstanceType = "ml.c5.xlarge" - TransformInstanceTypeMl_c5_2xlarge TransformInstanceType = "ml.c5.2xlarge" - TransformInstanceTypeMl_c5_4xlarge TransformInstanceType = "ml.c5.4xlarge" - TransformInstanceTypeMl_c5_9xlarge TransformInstanceType = "ml.c5.9xlarge" - TransformInstanceTypeMl_c5_18xlarge TransformInstanceType = "ml.c5.18xlarge" - TransformInstanceTypeMl_m5_large TransformInstanceType = "ml.m5.large" - TransformInstanceTypeMl_m5_xlarge TransformInstanceType = "ml.m5.xlarge" - TransformInstanceTypeMl_m5_2xlarge TransformInstanceType = "ml.m5.2xlarge" - TransformInstanceTypeMl_m5_4xlarge TransformInstanceType = "ml.m5.4xlarge" - TransformInstanceTypeMl_m5_12xlarge TransformInstanceType = "ml.m5.12xlarge" - TransformInstanceTypeMl_m5_24xlarge TransformInstanceType = "ml.m5.24xlarge" + TransformInstanceTypeMlM4Xlarge TransformInstanceType = "ml.m4.xlarge" + TransformInstanceTypeMlM42xlarge TransformInstanceType = "ml.m4.2xlarge" + TransformInstanceTypeMlM44xlarge TransformInstanceType = "ml.m4.4xlarge" + TransformInstanceTypeMlM410xlarge TransformInstanceType = "ml.m4.10xlarge" + TransformInstanceTypeMlM416xlarge TransformInstanceType = "ml.m4.16xlarge" + TransformInstanceTypeMlC4Xlarge TransformInstanceType = "ml.c4.xlarge" + TransformInstanceTypeMlC42xlarge TransformInstanceType = "ml.c4.2xlarge" + TransformInstanceTypeMlC44xlarge TransformInstanceType = "ml.c4.4xlarge" + TransformInstanceTypeMlC48xlarge TransformInstanceType = "ml.c4.8xlarge" + TransformInstanceTypeMlP2Xlarge TransformInstanceType = "ml.p2.xlarge" + TransformInstanceTypeMlP28xlarge TransformInstanceType = "ml.p2.8xlarge" + TransformInstanceTypeMlP216xlarge TransformInstanceType = "ml.p2.16xlarge" + TransformInstanceTypeMlP32xlarge TransformInstanceType = "ml.p3.2xlarge" + TransformInstanceTypeMlP38xlarge TransformInstanceType = "ml.p3.8xlarge" + TransformInstanceTypeMlP316xlarge TransformInstanceType = "ml.p3.16xlarge" + TransformInstanceTypeMlC5Xlarge TransformInstanceType = "ml.c5.xlarge" + TransformInstanceTypeMlC52xlarge TransformInstanceType = "ml.c5.2xlarge" + TransformInstanceTypeMlC54xlarge TransformInstanceType = "ml.c5.4xlarge" + TransformInstanceTypeMlC59xlarge TransformInstanceType = "ml.c5.9xlarge" + TransformInstanceTypeMlC518xlarge TransformInstanceType = "ml.c5.18xlarge" + TransformInstanceTypeMlM5Large TransformInstanceType = "ml.m5.large" + TransformInstanceTypeMlM5Xlarge TransformInstanceType = "ml.m5.xlarge" + TransformInstanceTypeMlM52xlarge TransformInstanceType = "ml.m5.2xlarge" + TransformInstanceTypeMlM54xlarge TransformInstanceType = "ml.m5.4xlarge" + TransformInstanceTypeMlM512xlarge TransformInstanceType = "ml.m5.12xlarge" + TransformInstanceTypeMlM524xlarge TransformInstanceType = "ml.m5.24xlarge" ) // Values returns all known values for TransformInstanceType. Note that this can be @@ -2751,11 +2751,11 @@ type TransformJobStatus string // Enum values for TransformJobStatus const ( - TransformJobStatusIn_progress TransformJobStatus = "InProgress" - TransformJobStatusCompleted TransformJobStatus = "Completed" - TransformJobStatusFailed TransformJobStatus = "Failed" - TransformJobStatusStopping TransformJobStatus = "Stopping" - TransformJobStatusStopped TransformJobStatus = "Stopped" + TransformJobStatusInProgress TransformJobStatus = "InProgress" + TransformJobStatusCompleted TransformJobStatus = "Completed" + TransformJobStatusFailed TransformJobStatus = "Failed" + TransformJobStatusStopping TransformJobStatus = "Stopping" + TransformJobStatusStopped TransformJobStatus = "Stopped" ) // Values returns all known values for TransformJobStatus. Note that this can be @@ -2775,11 +2775,11 @@ type TrialComponentPrimaryStatus string // Enum values for TrialComponentPrimaryStatus const ( - TrialComponentPrimaryStatusIn_progress TrialComponentPrimaryStatus = "InProgress" - TrialComponentPrimaryStatusCompleted TrialComponentPrimaryStatus = "Completed" - TrialComponentPrimaryStatusFailed TrialComponentPrimaryStatus = "Failed" - TrialComponentPrimaryStatusStopping TrialComponentPrimaryStatus = "Stopping" - TrialComponentPrimaryStatusStopped TrialComponentPrimaryStatus = "Stopped" + TrialComponentPrimaryStatusInProgress TrialComponentPrimaryStatus = "InProgress" + TrialComponentPrimaryStatusCompleted TrialComponentPrimaryStatus = "Completed" + TrialComponentPrimaryStatusFailed TrialComponentPrimaryStatus = "Failed" + TrialComponentPrimaryStatusStopping TrialComponentPrimaryStatus = "Stopping" + TrialComponentPrimaryStatusStopped TrialComponentPrimaryStatus = "Stopped" ) // Values returns all known values for TrialComponentPrimaryStatus. Note that this diff --git a/service/sagemaker/types/types.go b/service/sagemaker/types/types.go index cd4f9bd8a5d..83f9eca47e7 100644 --- a/service/sagemaker/types/types.go +++ b/service/sagemaker/types/types.go @@ -44,22 +44,21 @@ type AlgorithmSpecification struct { // default is false and time-series metrics aren't generated except in the // following cases: // - // * You use one of the Amazon SageMaker built-in - // algorithms + // * You use one of the Amazon SageMaker built-in algorithms // - // * You use one of the following Prebuilt Amazon SageMaker Docker - // Images + // * + // You use one of the following Prebuilt Amazon SageMaker Docker Images // (https://docs.aws.amazon.com/sagemaker/latest/dg/pre-built-containers-frameworks-deep-learning.html): // + // * + // Tensorflow (version >= 1.15) // - // * Tensorflow (version >= 1.15) - // - // * MXNet (version >= 1.6) + // * MXNet (version >= 1.6) // - // * - // PyTorch (version >= 1.3) + // * PyTorch (version >= + // 1.3) // - // * You specify at least one MetricDefinition + // * You specify at least one MetricDefinition EnableSageMakerMetricsTimeSeries *bool // A list of metric definition objects. Each object specifies the metric name and @@ -188,7 +187,7 @@ type AnnotationConsolidationConfig struct { // Bounding box - Finds the most similar boxes from different workers based on the // Jaccard index of the boxes. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBoxarn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBoxarn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBoxarn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBoxarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBoxarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBoxarn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBoxarn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBoxarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBoxarn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBoxarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBoxarn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox // // Image @@ -196,7 +195,7 @@ type AnnotationConsolidationConfig struct { // estimate the true class of an image based on annotations from individual // workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassarn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassarn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassarn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassarn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassarn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassarn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassarn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass // // Multi-label @@ -204,21 +203,20 @@ type AnnotationConsolidationConfig struct { // to estimate the true classes of an image based on annotations from individual // workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabelarn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel // // Semantic // segmentation - Treats each pixel in an image as a multi-class classification and // treats pixel annotations from workers as "votes" for the correct label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentationarn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentationarn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentationarn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentationarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentationarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentationarn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentationarn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentationarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentationarn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentationarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentationarn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation // // Text // classification - Uses a variant of the Expectation Maximization approach to // estimate the true class of text based on annotations from individual workers. // - // // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassarn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassarn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassarn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassarn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassarn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassarn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassarn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass // @@ -227,21 +225,21 @@ type AnnotationConsolidationConfig struct { // estimate the true classes of text based on annotations from individual // workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabelarn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel // // Named // entity recognition - Groups similar selections and calculates aggregate // boundaries, resolving to most-assigned label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognitionarn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognitionarn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognitionarn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognitionarn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognitionarn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognitionarn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition // // Named // entity recognition - Groups similar selections and calculates aggregate // boundaries, resolving to most-assigned label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognitionarn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognitionarn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognitionarn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognitionarn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognitionarn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognitionarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognitionarn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition // // Video @@ -249,7 +247,7 @@ type AnnotationConsolidationConfig struct { // using predefined labels that you specify. Workers are shown videos and are asked // to choose one label for each video. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClassarn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClassarn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClassarn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClassarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClassarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClassarn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClassarn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClassarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClassarn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClassarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClassarn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass // // Video @@ -259,7 +257,7 @@ type AnnotationConsolidationConfig struct { // and localize various objects in a series of video frames, such as cars, bikes, // and pedestrians. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetectionarn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetectionarn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetectionarn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetectionarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetectionarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetectionarn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetectionarn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetectionarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetectionarn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetectionarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetectionarn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection // // Video @@ -268,7 +266,7 @@ type AnnotationConsolidationConfig struct { // bounding boxes. For example, you can use this task to ask workers to track the // movement of objects, such as cars, bikes, and pedestrians. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTrackingarn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTrackingarn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTrackingarn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTrackingarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTrackingarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTrackingarn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTrackingarn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTrackingarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTrackingarn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTrackingarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTrackingarn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking // // 3D @@ -277,7 +275,7 @@ type AnnotationConsolidationConfig struct { // example, you can use this task type to ask workers to identify different types // of objects in a point cloud, such as cars, bikes, and pedestrians. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetectionarn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection // // 3D @@ -286,7 +284,7 @@ type AnnotationConsolidationConfig struct { // For example, you can use this task type to ask workers to track the movement of // vehicles across multiple point cloud frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTrackingarn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking // // 3D @@ -295,7 +293,7 @@ type AnnotationConsolidationConfig struct { // point cloud using different colors where each color is assigned to one of the // classes you specify. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentationarn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation // // Use @@ -307,7 +305,7 @@ type AnnotationConsolidationConfig struct { // multi-class classification and treats pixel adjusted annotations from workers as // "votes" for the correct label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentationarn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation // // Semantic @@ -315,7 +313,7 @@ type AnnotationConsolidationConfig struct { // approach to estimate the true class of verification judgment for semantic // segmentation labels based on annotations from individual workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentationarn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation // // Bounding @@ -323,14 +321,14 @@ type AnnotationConsolidationConfig struct { // estimate the true class of verification judgement for bounding box labels based // on annotations from individual workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBoxarn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBoxarn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBoxarn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBoxarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBoxarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBoxarn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBoxarn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBoxarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBoxarn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBoxarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBoxarn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox // // Bounding // box adjustment - Finds the most similar boxes from different workers based on // the Jaccard index of the adjusted annotations. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBoxarn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox // // Video @@ -338,7 +336,7 @@ type AnnotationConsolidationConfig struct { // adjust bounding boxes that workers have added to video frames to classify and // localize objects in a sequence of video frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetectionarn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection // // Video @@ -346,14 +344,14 @@ type AnnotationConsolidationConfig struct { // adjust bounding boxes that workers have added to video frames to track object // movement across a sequence of video frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTrackingarn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking // // 3D // point cloud object detection adjustment - Use this task type when you want // workers to adjust 3D cuboids around objects in a 3D point cloud. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetectionarn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection // // 3D @@ -361,7 +359,7 @@ type AnnotationConsolidationConfig struct { // workers to adjust 3D cuboids around objects that appear in a sequence of 3D // point cloud frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTrackingarn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking // // 3D @@ -369,7 +367,7 @@ type AnnotationConsolidationConfig struct { // workers to adjust a point-level semantic segmentation masks using a paint // tool. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentationarn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation // // This member is required. @@ -570,69 +568,69 @@ type AutoMLJobObjective struct { // machine learning system. This metric is optimized during training to provide the // best estimate for model parameter values from data. Here are the options: // - // * + // * // MSE: The mean squared error (MSE) is the average of the squared differences // between the predicted and actual values. It is used for regression. MSE values // are always positive, the better a model is at predicting the actual values the // smaller the MSE value. When the data contains outliers, they tend to dominate // the MSE which might cause subpar prediction performance. // - // * Accuracy: The - // ratio of the number correctly classified items to the total number (correctly - // and incorrectly) classified. It is used for binary and multiclass - // classification. Measures how close the predicted class values are to the actual - // values. Accuracy values vary between zero and one, one being perfect accuracy - // and zero perfect inaccuracy. - // - // * F1: The F1 score is the harmonic mean of the - // precision and recall. It is used for binary classification into classes - // traditionally referred to as positive and negative. Predictions are said to be - // true when they match their actual (correct) class; false when they do not. - // Precision is the ratio of the true positive predictions to all positive - // predictions (including the false positives) in a data set and measures the - // quality of the prediction when it predicts the positive class. Recall (or - // sensitivity) is the ratio of the true positive predictions to all actual - // positive instances and measures how completely a model predicts the actual class - // members in a data set. The standard F1 score weighs precision and recall - // equally. But which metric is paramount typically depends on specific aspects of - // a problem. F1 scores vary between zero and one, one being the best possible - // performance and zero the worst. - // - // * AUC: The area under the curve (AUC) - // metric is used to compare and evaluate binary classification by algorithms such - // as logistic regression that return probabilities. A threshold is needed to map - // the probabilities into classifications. The relevant curve is the receiver - // operating characteristic curve that plots the true positive rate (TPR) of - // predictions (or recall) against the false positive rate (FPR) as a function of - // the threshold value, above which a prediction is considered positive. Increasing - // the threshold results in fewer false positives but more false negatives. AUC is - // the area under this receiver operating characteristic curve and so provides an - // aggregated measure of the model performance across all possible classification - // thresholds. The AUC score can also be interpreted as the probability that a - // randomly selected positive data point is more likely to be predicted positive - // than a randomly selected negative example. AUC scores vary between zero and one, - // one being perfect accuracy and one half not better than a random classifier. - // Values less that one half predict worse than a random predictor and such - // consistently bad predictors can be inverted to obtain better than random - // predictors. - // - // * F1macro: The F1macro score applies F1 scoring to multiclass - // classification. In this context, you have multiple classes to predict. You just - // calculate the precision and recall for each class as you did for the positive - // class in binary classification. Then used these values to calculate the F1 score - // for each class and average them to obtain the F1macro score. F1macro scores vary - // between zero and one, one being the best possible performance and zero the - // worst. - // - // If you do not specify a metric explicitly, the default behavior is to - // automatically use: - // - // * MSE: for regression. - // - // * F1: for binary - // classification - // - // * Accuracy: for multiclass classification. + // * Accuracy: The ratio + // of the number correctly classified items to the total number (correctly and + // incorrectly) classified. It is used for binary and multiclass classification. + // Measures how close the predicted class values are to the actual values. Accuracy + // values vary between zero and one, one being perfect accuracy and zero perfect + // inaccuracy. + // + // * F1: The F1 score is the harmonic mean of the precision and + // recall. It is used for binary classification into classes traditionally referred + // to as positive and negative. Predictions are said to be true when they match + // their actual (correct) class; false when they do not. Precision is the ratio of + // the true positive predictions to all positive predictions (including the false + // positives) in a data set and measures the quality of the prediction when it + // predicts the positive class. Recall (or sensitivity) is the ratio of the true + // positive predictions to all actual positive instances and measures how + // completely a model predicts the actual class members in a data set. The standard + // F1 score weighs precision and recall equally. But which metric is paramount + // typically depends on specific aspects of a problem. F1 scores vary between zero + // and one, one being the best possible performance and zero the worst. + // + // * AUC: The + // area under the curve (AUC) metric is used to compare and evaluate binary + // classification by algorithms such as logistic regression that return + // probabilities. A threshold is needed to map the probabilities into + // classifications. The relevant curve is the receiver operating characteristic + // curve that plots the true positive rate (TPR) of predictions (or recall) against + // the false positive rate (FPR) as a function of the threshold value, above which + // a prediction is considered positive. Increasing the threshold results in fewer + // false positives but more false negatives. AUC is the area under this receiver + // operating characteristic curve and so provides an aggregated measure of the + // model performance across all possible classification thresholds. The AUC score + // can also be interpreted as the probability that a randomly selected positive + // data point is more likely to be predicted positive than a randomly selected + // negative example. AUC scores vary between zero and one, one being perfect + // accuracy and one half not better than a random classifier. Values less that one + // half predict worse than a random predictor and such consistently bad predictors + // can be inverted to obtain better than random predictors. + // + // * F1macro: The F1macro + // score applies F1 scoring to multiclass classification. In this context, you have + // multiple classes to predict. You just calculate the precision and recall for + // each class as you did for the positive class in binary classification. Then used + // these values to calculate the F1 score for each class and average them to obtain + // the F1macro score. F1macro scores vary between zero and one, one being the best + // possible performance and zero the worst. + // + // If you do not specify a metric + // explicitly, the default behavior is to automatically use: + // + // * MSE: for + // regression. + // + // * F1: for binary classification + // + // * Accuracy: for multiclass + // classification. // // This member is required. MetricName AutoMLMetricEnum @@ -1415,38 +1413,38 @@ type EndpointSummary struct { // The status of the endpoint. // - // * OutOfService: Endpoint is not available to - // take incoming requests. - // - // * Creating: CreateEndpoint is executing. + // * OutOfService: Endpoint is not available to take + // incoming requests. // - // * - // Updating: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. + // * Creating: CreateEndpoint is executing. // + // * Updating: + // UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing. // - // * SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or + // * + // SystemUpdating: Endpoint is undergoing maintenance and cannot be updated or // deleted or re-scaled until it has completed. This maintenance operation does not // change any customer-specified values such as VPC config, KMS encryption, model, // instance type, or instance count. // - // * RollingBack: Endpoint fails to scale up - // or down or change its variant weight and is in the process of rolling back to - // its previous configuration. Once the rollback completes, endpoint returns to an + // * RollingBack: Endpoint fails to scale up or + // down or change its variant weight and is in the process of rolling back to its + // previous configuration. Once the rollback completes, endpoint returns to an // InService status. This transitional status only applies to an endpoint that has // autoscaling enabled and is undergoing variant weight or capacity changes as part // of an UpdateEndpointWeightsAndCapacities call or when the // UpdateEndpointWeightsAndCapacities operation is called explicitly. // - // * - // InService: Endpoint is available to process incoming requests. + // * InService: + // Endpoint is available to process incoming requests. // - // * Deleting: - // DeleteEndpoint is executing. + // * Deleting: DeleteEndpoint + // is executing. // - // * Failed: Endpoint could not be created, - // updated, or re-scaled. Use DescribeEndpointOutput$FailureReason for information - // about the failure. DeleteEndpoint is the only operation that can be performed on - // a failed endpoint. + // * Failed: Endpoint could not be created, updated, or re-scaled. + // Use DescribeEndpointOutput$FailureReason for information about the failure. + // DeleteEndpoint is the only operation that can be performed on a failed + // endpoint. // // To get a list of endpoints with a specified status, use the // ListEndpointsInput$StatusEquals filter. @@ -1501,12 +1499,12 @@ type Experiment struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // -// * CreateProcessingJob +// * CreateProcessingJob // -// * +// * // CreateTrainingJob // -// * CreateTransformJob +// * CreateTransformJob type ExperimentConfig struct { // The name of an existing experiment to associate the trial component with. @@ -1634,31 +1632,31 @@ type Filter struct { // for text properties. A SearchExpression can include the Contains operator // multiple times when the value of Name is one of the following: // - // * + // * // Experiment.DisplayName // - // * Experiment.ExperimentName + // * Experiment.ExperimentName // - // * Experiment.Tags + // * Experiment.Tags // + // * + // Trial.DisplayName // - // * Trial.DisplayName - // - // * Trial.TrialName + // * Trial.TrialName // - // * Trial.Tags + // * Trial.Tags // - // * + // * // TrialComponent.DisplayName // - // * TrialComponent.TrialComponentName + // * TrialComponent.TrialComponentName // - // * + // * // TrialComponent.Tags // - // * TrialComponent.InputArtifacts + // * TrialComponent.InputArtifacts // - // * + // * // TrialComponent.OutputArtifacts // // A SearchExpression can include only one Contains @@ -1847,222 +1845,218 @@ type HumanLoopConfig struct { // are in US dollars and should be based on the complexity of the task; the longer // it takes in your initial testing, the more you should offer. // - // * 0.036 + // * 0.036 // - // * + // * // 0.048 // - // * 0.060 + // * 0.060 // - // * 0.072 + // * 0.072 // - // * 0.120 + // * 0.120 // - // * 0.240 + // * 0.240 // - // * 0.360 + // * 0.360 // - // * - // 0.480 + // * 0.480 // - // * 0.600 + // * 0.600 // - // * 0.720 + // * 0.720 // - // * 0.840 + // * + // 0.840 // - // * 0.960 + // * 0.960 // - // * 1.080 + // * 1.080 // - // * - // 1.200 + // * 1.200 // - // Use one of the following prices for image classification, text - // classification, and custom tasks. Prices are in US dollars. + // Use one of the following prices for image + // classification, text classification, and custom tasks. Prices are in US + // dollars. // - // * 0.012 + // * 0.012 // - // * - // 0.024 + // * 0.024 // - // * 0.036 + // * 0.036 // - // * 0.048 + // * 0.048 // - // * 0.060 + // * 0.060 // - // * 0.072 + // * 0.072 // - // * 0.120 + // * 0.120 // - // * + // * // 0.240 // - // * 0.360 + // * 0.360 // - // * 0.480 + // * 0.480 // - // * 0.600 + // * 0.600 // - // * 0.720 + // * 0.720 // - // * 0.840 + // * 0.840 // - // * - // 0.960 + // * 0.960 // - // * 1.080 + // * 1.080 // - // * 1.200 - // - // Use one of the following prices for semantic - // segmentation tasks. Prices are in US dollars. + // * + // 1.200 // - // * 0.840 + // Use one of the following prices for semantic segmentation tasks. Prices + // are in US dollars. // - // * 0.960 + // * 0.840 // - // * - // 1.080 + // * 0.960 // - // * 1.200 + // * 1.080 // - // Use one of the following prices for Textract AnalyzeDocument - // Important Form Key Amazon Augmented AI review tasks. Prices are in US dollars. + // * 1.200 // + // Use one of the following + // prices for Textract AnalyzeDocument Important Form Key Amazon Augmented AI + // review tasks. Prices are in US dollars. // // * 2.400 // - // * 2.280 + // * 2.280 // - // * 2.160 + // * 2.160 // - // * 2.040 + // * 2.040 // - // * 1.920 + // * + // 1.920 // - // * 1.800 + // * 1.800 // - // * - // 1.680 + // * 1.680 // - // * 1.560 + // * 1.560 // - // * 1.440 + // * 1.440 // - // * 1.320 + // * 1.320 // - // * 1.200 + // * 1.200 // - // * 1.080 + // * 1.080 // - // * - // 0.960 + // * 0.960 // - // * 0.840 + // * + // 0.840 // - // * 0.720 + // * 0.720 // - // * 0.600 + // * 0.600 // - // * 0.480 + // * 0.480 // - // * 0.360 + // * 0.360 // - // * - // 0.240 + // * 0.240 // - // * 0.120 + // * 0.120 // - // * 0.072 + // * 0.072 // - // * 0.060 + // * 0.060 // - // * 0.048 + // * + // 0.048 // - // * 0.036 + // * 0.036 // - // * - // 0.024 + // * 0.024 // - // * 0.012 + // * 0.012 // - // Use one of the following prices for Rekognition - // DetectModerationLabels Amazon Augmented AI review tasks. Prices are in US - // dollars. + // Use one of the following prices for + // Rekognition DetectModerationLabels Amazon Augmented AI review tasks. Prices are + // in US dollars. // - // * 1.200 + // * 1.200 // - // * 1.080 + // * 1.080 // - // * 0.960 + // * 0.960 // - // * 0.840 + // * 0.840 // - // * 0.720 + // * 0.720 // - // * - // 0.600 + // * 0.600 // - // * 0.480 + // * 0.480 // - // * 0.360 + // * + // 0.360 // - // * 0.240 + // * 0.240 // - // * 0.120 + // * 0.120 // - // * 0.072 + // * 0.072 // - // * - // 0.060 + // * 0.060 // - // * 0.048 + // * 0.048 // - // * 0.036 + // * 0.036 // - // * 0.024 + // * 0.024 // - // * 0.012 + // * + // 0.012 // - // Use one of the - // following prices for Amazon Augmented AI custom human review tasks. Prices are - // in US dollars. + // Use one of the following prices for Amazon Augmented AI custom human + // review tasks. Prices are in US dollars. // - // * 1.200 + // * 1.200 // - // * 1.080 + // * 1.080 // - // * 0.960 + // * 0.960 // - // * 0.840 + // * 0.840 // - // * + // * // 0.720 // - // * 0.600 + // * 0.600 // - // * 0.480 + // * 0.480 // - // * 0.360 + // * 0.360 // - // * 0.240 + // * 0.240 // - // * 0.120 + // * 0.120 // - // * - // 0.072 + // * 0.072 // - // * 0.060 + // * 0.060 // - // * 0.048 + // * 0.048 // - // * 0.036 + // * + // 0.036 // - // * 0.024 + // * 0.024 // - // * 0.012 + // * 0.012 PublicWorkforceTaskPrice *PublicWorkforceTaskPrice // The length of time that a task remains available for review by human workers. @@ -2110,40 +2104,40 @@ type HumanTaskConfig struct { // Bounding box - Finds the most similar boxes from different workers based on the // Jaccard index of the boxes. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox // - // * + // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox // // Image @@ -2151,40 +2145,40 @@ type HumanTaskConfig struct { // estimate the true class of an image based on annotations from individual // workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass // - // * + // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass // // Multi-label @@ -2192,46 +2186,39 @@ type HumanTaskConfig struct { // to estimate the true classes of an image based on annotations from individual // workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel // + // * + // arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel - // - // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel - // + // * + // arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel // + // * + // arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel - // - // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel - // + // * + // arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel // @@ -2239,39 +2226,38 @@ type HumanTaskConfig struct { // segmentation - Treats each pixel in an image as a multi-class classification and // treats pixel annotations from workers as "votes" for the correct label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation // + // * + // arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation - // - // * + // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation // - // * + // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation // + // * + // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation - // - // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation - // - // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation - // + // * + // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation + // * + // arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation // + // * + // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation @@ -2280,40 +2266,40 @@ type HumanTaskConfig struct { // classification - Uses a variant of the Expectation Maximization approach to // estimate the true class of text based on annotations from individual workers. // + // * + // arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass - // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass // - // * + // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass // // Multi-label @@ -2321,45 +2307,39 @@ type HumanTaskConfig struct { // estimate the true classes of text based on annotations from individual // workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel // + // * + // arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel - // - // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel - // + // * + // arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel // + // * + // arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel - // - // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel - // + // * + // arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel // - // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel - // + // * + // arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel // @@ -2367,43 +2347,39 @@ type HumanTaskConfig struct { // entity recognition - Groups similar selections and calculates aggregate // boundaries, resolving to most-assigned label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition // + // * + // arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition - // - // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition - // - // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition + // * + // arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition // + // * + // arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition // - // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition - // + // * + // arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition // @@ -2412,40 +2388,40 @@ type HumanTaskConfig struct { // using predefined labels that you specify. Workers are shown videos and are asked // to choose one label for each video. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass // // Video @@ -2455,40 +2431,39 @@ type HumanTaskConfig struct { // and localize various objects in a series of video frames, such as cars, bikes, // and pedestrians. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection // - // * + // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection // - // * + // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection // + // * + // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection - // - // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection - // - // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection - // + // * + // arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection + // * + // arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection // + // * + // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection + // * + // arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection // - // * + // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection // @@ -2498,41 +2473,41 @@ type HumanTaskConfig struct { // bounding boxes. For example, you can use this task to ask workers to track the // movement of objects, such as cars, bikes, and pedestrians. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking // - // * + // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking // - // * + // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking // + // * + // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking - // - // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking + // * + // arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking // - // * + // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking // - // * + // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking // + // * + // arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking - // - // * + // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking // - // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking + // * + // arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking // // 3D // Point Cloud Modalities Use the following pre-annotation lambdas for 3D point @@ -2544,50 +2519,39 @@ type HumanTaskConfig struct { // identify different types of objects in a point cloud, such as cars, bikes, and // pedestrians. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection // @@ -2597,50 +2561,39 @@ type HumanTaskConfig struct { // For example, you can use this task type to ask workers to track the movement of // vehicles across multiple point cloud frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking // @@ -2650,50 +2603,39 @@ type HumanTaskConfig struct { // point cloud using different colors where each color is assigned to one of the // classes you specify. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation // @@ -2706,50 +2648,39 @@ type HumanTaskConfig struct { // approach to estimate the true class of verification judgement for bounding box // labels based on annotations from individual workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking // @@ -2757,43 +2688,39 @@ type HumanTaskConfig struct { // box adjustment - Finds the most similar boxes from different workers based on // the Jaccard index of the adjusted annotations. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox // - // * + // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox // - // * + // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox // - // * + // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox // + // * + // arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox - // - // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox - // - // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox + // * + // arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox // + // * + // arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox // - // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox - // + // * + // arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox // @@ -2802,50 +2729,39 @@ type HumanTaskConfig struct { // approach to estimate the true class of verification judgment for semantic // segmentation labels based on annotations from individual workers. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation // @@ -2854,50 +2770,39 @@ type HumanTaskConfig struct { // classification and treats pixel adjusted annotations from workers as "votes" for // the correct label. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation // @@ -2906,50 +2811,39 @@ type HumanTaskConfig struct { // adjust bounding boxes that workers have added to video frames to classify and // localize objects in a sequence of video frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection // @@ -2958,50 +2852,39 @@ type HumanTaskConfig struct { // adjust bounding boxes that workers have added to video frames to track object // movement across a sequence of video frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking // @@ -3009,50 +2892,39 @@ type HumanTaskConfig struct { // point cloud object detection adjustment - Adjust 3D cuboids in a point cloud // frame. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection // @@ -3060,50 +2932,39 @@ type HumanTaskConfig struct { // point cloud object tracking adjustment - Adjust 3D cuboids across a sequence of // point cloud frames. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking // @@ -3111,50 +2972,39 @@ type HumanTaskConfig struct { // point cloud semantic segmentation adjustment - Adjust semantic segmentation // masks in a 3D point cloud. // - // * + // * // arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation // - // // * // arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation // @@ -3429,17 +3279,17 @@ type HyperParameterTrainingJobSummary struct { // The status of the objective metric for the training job: // - // * Succeeded: The - // final objective metric for the training job was evaluated by the hyperparameter - // tuning job and used in the hyperparameter tuning process. + // * Succeeded: The final + // objective metric for the training job was evaluated by the hyperparameter tuning + // job and used in the hyperparameter tuning process. // - // * Pending: The - // training job is in progress and evaluation of its final objective metric is - // pending. + // * Pending: The training job + // is in progress and evaluation of its final objective metric is pending. // - // * Failed: The final objective metric for the training job was not - // evaluated, and was not used in the hyperparameter tuning process. This typically - // occurs when the training job failed or did not emit an objective metric. + // * + // Failed: The final objective metric for the training job was not evaluated, and + // was not used in the hyperparameter tuning process. This typically occurs when + // the training job failed or did not emit an objective metric. ObjectiveStatus ObjectiveStatus // Specifies the time when the training job ends on training instances. You are @@ -3623,11 +3473,11 @@ type ImageConfig struct { // Set this to one of the following values: // - // * Platform - The model image is - // hosted in Amazon ECR. + // * Platform - The model image is hosted + // in Amazon ECR. // - // * Vpc - The model image is hosted in a private Docker - // registry in your VPC. + // * Vpc - The model image is hosted in a private Docker registry + // in your VPC. // // This member is required. RepositoryAccessMode RepositoryAccessMode @@ -3673,173 +3523,168 @@ type InputConfig struct { // with a JSON dictionary form. The data inputs are InputConfig$Framework // specific. // - // * TensorFlow: You must specify the name and shape (NHWC format) - // of the expected data inputs using a dictionary format for your trained model. - // The dictionary formats required for the console and CLI are different. + // * TensorFlow: You must specify the name and shape (NHWC format) of + // the expected data inputs using a dictionary format for your trained model. The + // dictionary formats required for the console and CLI are different. // + // * Examples + // for one input: // - // * Examples for one input: - // - // * If using the console, - // {"input":[1,1024,1024,3]} + // * If using the console, {"input":[1,1024,1024,3]} // - // * If using the CLI, - // {\"input\":[1,1024,1024,3]} + // * If using + // the CLI, {\"input\":[1,1024,1024,3]} // - // * Examples for two inputs: + // * Examples for two inputs: // - // * - // If using the console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} + // * If using the + // console, {"data1": [1,28,28,1], "data2":[1,28,28,1]} // - // * - // If using the CLI, {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} + // * If using the CLI, + // {\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]} // - // * KERAS: - // You must specify the name and shape (NCHW format) of expected data inputs using - // a dictionary format for your trained model. Note that while Keras model - // artifacts should be uploaded in NHWC (channel-last) format, DataInputConfig - // should be specified in NCHW (channel-first) format. The dictionary formats - // required for the console and CLI are different. - // - // * Examples for one - // input: + // * KERAS: You must specify the + // name and shape (NCHW format) of expected data inputs using a dictionary format + // for your trained model. Note that while Keras model artifacts should be uploaded + // in NHWC (channel-last) format, DataInputConfig should be specified in NCHW + // (channel-first) format. The dictionary formats required for the console and CLI + // are different. // - // * If using the console, {"input_1":[1,3,224,224]} + // * Examples for one input: // + // * If using the console, + // {"input_1":[1,3,224,224]} // // * If using the CLI, {\"input_1\":[1,3,224,224]} // - // * Examples for two - // inputs: + // * + // Examples for two inputs: // - // * If using the console, {"input_1": [1,3,224,224], + // * If using the console, {"input_1": [1,3,224,224], // "input_2":[1,3,224,224]} // - // * If using the CLI, {\"input_1\": - // [1,3,224,224], \"input_2\":[1,3,224,224]} - // - // * MXNET/ONNX: You must specify - // the name and shape (NCHW format) of the expected data inputs in order using a - // dictionary format for your trained model. The dictionary formats required for - // the console and CLI are different. + // * If using the CLI, {\"input_1\": [1,3,224,224], + // \"input_2\":[1,3,224,224]} // - // * Examples for one input: + // * MXNET/ONNX: You must specify the name and shape + // (NCHW format) of the expected data inputs in order using a dictionary format for + // your trained model. The dictionary formats required for the console and CLI are + // different. // + // * Examples for one input: // - // * If using the console, {"data":[1,3,1024,1024]} - // - // * If using the - // CLI, {\"data\":[1,3,1024,1024]} - // - // * Examples for two inputs: + // * If using the console, + // {"data":[1,3,1024,1024]} // + // * If using the CLI, {\"data\":[1,3,1024,1024]} // - // * If using the console, {"var1": [1,1,28,28], "var2":[1,1,28,28]} + // * + // Examples for two inputs: // - // * - // If using the CLI, {\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} + // * If using the console, {"var1": [1,1,28,28], + // "var2":[1,1,28,28]} // - // * PyTorch: - // You can either specify the name and shape (NCHW format) of expected data inputs - // in order using a dictionary format for your trained model or you can specify the - // shape only using a list format. The dictionary formats required for the console - // and CLI are different. The list formats for the console and CLI are the same. + // * If using the CLI, {\"var1\": [1,1,28,28], + // \"var2\":[1,1,28,28]} // + // * PyTorch: You can either specify the name and shape + // (NCHW format) of expected data inputs in order using a dictionary format for + // your trained model or you can specify the shape only using a list format. The + // dictionary formats required for the console and CLI are different. The list + // formats for the console and CLI are the same. // - // * Examples for one input in dictionary format: + // * Examples for one input in + // dictionary format: // - // * If using the - // console, {"input0":[1,3,224,224]} + // * If using the console, {"input0":[1,3,224,224]} // - // * If using the CLI, - // {\"input0\":[1,3,224,224]} + // * If using + // the CLI, {\"input0\":[1,3,224,224]} // - // * Example for one input in list format: + // * Example for one input in list format: // [[1,3,224,224]] // - // * Examples for two inputs in dictionary format: - // - // - // * If using the console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} - // + // * Examples for two inputs in dictionary format: // - // * If using the CLI, {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} + // * If using the + // console, {"input0":[1,3,224,224], "input1":[1,3,224,224]} // + // * If using the CLI, + // {\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]} // - // * Example for two inputs in list format: [[1,3,224,224], [1,3,224,224]] + // * Example for two inputs + // in list format: [[1,3,224,224], [1,3,224,224]] // - // * - // XGBOOST: input data name and shape are not needed. + // * XGBOOST: input data name and + // shape are not needed. // - // DataInputConfig supports the - // following parameters for CoreMLOutputConfig$TargetDevice (ML Model format): + // DataInputConfig supports the following parameters for + // CoreMLOutputConfig$TargetDevice (ML Model format): // + // * shape: Input shape, for + // example {"input_1": {"shape": [1,224,224,3]}}. In addition to static input + // shapes, CoreML converter supports Flexible input shapes: // - // * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. In - // addition to static input shapes, CoreML converter supports Flexible input - // shapes: + // * Range Dimension. You + // can use the Range Dimension feature if you know the input shape will be within + // some specific interval in that dimension, for example: {"input_1": {"shape": + // ["1..10", 224, 224, 3]}} // - // * Range Dimension. You can use the Range Dimension feature if - // you know the input shape will be within some specific interval in that - // dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} + // * Enumerated shapes. Sometimes, the models are trained + // to work only on a select set of inputs. You can enumerate all supported input + // shapes, for example: {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, + // 3]]}} // - // - // * Enumerated shapes. Sometimes, the models are trained to work only on a select - // set of inputs. You can enumerate all supported input shapes, for example: - // {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} - // - // * - // default_shape: Default input shape. You can set a default shape during + // * default_shape: Default input shape. You can set a default shape during // conversion for both Range Dimension and Enumerated Shapes. For example // {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, 224, // 3]}} // - // * type: Input type. Allowed values: Image and Tensor. By default, the + // * type: Input type. Allowed values: Image and Tensor. By default, the // converter generates an ML Model with inputs of type Tensor (MultiArray). User // can set input type to be Image. Image input type requires additional input // parameters such as bias and scale. // - // * bias: If the input type is an Image, - // you need to provide the bias vector. + // * bias: If the input type is an Image, you + // need to provide the bias vector. // - // * scale: If the input type is an - // Image, you need to provide a scale factor. + // * scale: If the input type is an Image, you + // need to provide a scale factor. // - // CoreML ClassifierConfig parameters - // can be specified using OutputConfig$CompilerOptions. CoreML converter supports + // CoreML ClassifierConfig parameters can be + // specified using OutputConfig$CompilerOptions. CoreML converter supports // Tensorflow and PyTorch models. CoreML conversion examples: // - // * Tensor type + // * Tensor type // input: // - // * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], + // * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], // [1,160,160,3]], "default_shape": [1,224,224,3]}} // - // * Tensor type input - // without input name (PyTorch): + // * Tensor type input without + // input name (PyTorch): // - // * "DataInputConfig": [{"shape": - // [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] + // * "DataInputConfig": [{"shape": [[1,3,224,224], + // [1,3,160,160]], "default_shape": [1,3,224,224]}] // - // * Image - // type input: + // * Image type input: // - // * "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], - // [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": - // [-1,-1,-1], "scale": 0.007843137255}} + // * + // "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], [1,160,160,3]], + // "default_shape": [1,224,224,3], "type": "Image", "bias": [-1,-1,-1], "scale": + // 0.007843137255}} // - // * "CompilerOptions": - // {"class_labels": "imagenet_labels_1000.txt"} + // * "CompilerOptions": {"class_labels": + // "imagenet_labels_1000.txt"} // - // * Image type input without - // input name (PyTorch): + // * Image type input without input name (PyTorch): // - // * "DataInputConfig": [{"shape": [[1,3,224,224], - // [1,3,160,160]], "default_shape": [1,3,224,224], "type": "Image", "bias": - // [-1,-1,-1], "scale": 0.007843137255}] + // * + // "DataInputConfig": [{"shape": [[1,3,224,224], [1,3,160,160]], "default_shape": + // [1,3,224,224], "type": "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] // - // * "CompilerOptions": - // {"class_labels": "imagenet_labels_1000.txt"} + // * + // "CompilerOptions": {"class_labels": "imagenet_labels_1000.txt"} // // This member is required. DataInputConfig *string @@ -3960,20 +3805,20 @@ type LabelingJobAlgorithmsConfig struct { // Specifies the Amazon Resource Name (ARN) of the algorithm used for // auto-labeling. You must select one of the following ARNs: // - // * Image + // * Image // classification // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification // - // - // * Text classification + // * + // Text classification // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification // - // - // * Object detection + // * + // Object detection // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection // - // - // * Semantic Segmentation + // * + // Semantic Segmentation // arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/semantic-segmentation // // This member is required. @@ -4109,11 +3954,11 @@ type LabelingJobResourceConfig struct { // encrypt data on the storage volume attached to the ML compute instance(s) that // run the training job. The VolumeKmsKeyId can be any of the following formats: // + // * + // // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // KMS Key ID "1234abcd-12ab-34cd-56ef-1234567890ab" - // - // * // Amazon Resource - // Name (ARN) of a KMS Key + // * // Amazon Resource Name + // (ARN) of a KMS Key // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string } @@ -4712,11 +4557,11 @@ type MonitoringStoppingCondition struct { // to filter on a training job's InputDataConfig property with a specific channel // name and S3Uri prefix, define the following filters: // -// * +// * // '{Name:"InputDataConfig.ChannelName", "Operator":"Equals", "Value":"train"}', // -// -// * '{Name:"InputDataConfig.DataSource.S3DataSource.S3Uri", "Operator":"Contains", +// * +// '{Name:"InputDataConfig.DataSource.S3DataSource.S3Uri", "Operator":"Contains", // "Value":"mybucket/catdata"}' type NestedFilters struct { @@ -4984,55 +4829,54 @@ type OutputConfig struct { // accelerators and highly recommended for CPU compilations. For any other cases, // it is optional to specify CompilerOptions. // - // * CPU: Compilation for CPU - // supports the following compiler options. - // - // * mcpu: CPU - // micro-architecture. For example, {'mcpu': 'skylake-avx512'} + // * CPU: Compilation for CPU supports + // the following compiler options. // - // * mattr: - // CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} + // * mcpu: CPU micro-architecture. For example, + // {'mcpu': 'skylake-avx512'} // - // * ARM: Details of - // ARM CPU compilations. + // * mattr: CPU flags. For example, {'mattr': ['+neon', + // '+vfpv4']} // - // * NEON: NEON is an implementation of the Advanced - // SIMD extension used in ARMv7 processors. For example, add {'mattr': ['+neon']} - // to the compiler options if compiling for ARM 32-bit platform with the NEON - // support. + // * ARM: Details of ARM CPU compilations. // - // * NVIDIA: Compilation for NVIDIA GPU supports the following - // compiler options. + // * NEON: NEON is an + // implementation of the Advanced SIMD extension used in ARMv7 processors. For + // example, add {'mattr': ['+neon']} to the compiler options if compiling for ARM + // 32-bit platform with the NEON support. // - // * gpu_code: Specifies the targeted architecture. + // * NVIDIA: Compilation for NVIDIA GPU + // supports the following compiler options. // + // * gpu_code: Specifies the targeted + // architecture. // // * trt-ver: Specifies the TensorRT versions in x.y.z. format. // - // * + // * // cuda-ver: Specifies the CUDA version in x.y format. // - // For example, - // {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} + // For example, {'gpu-code': + // 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver': '10.1'} // - // * ANDROID: - // Compilation for the Android OS supports the following compiler options: + // * ANDROID: Compilation for the + // Android OS supports the following compiler options: // + // * ANDROID_PLATFORM: + // Specifies the Android API levels. Available levels range from 21 to 29. For + // example, {'ANDROID_PLATFORM': 28}. // - // * ANDROID_PLATFORM: Specifies the Android API levels. Available levels range - // from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. + // * mattr: Add {'mattr': ['+neon']} to + // compiler options if compiling for ARM 32-bit platform with NEON support. // - // * mattr: Add - // {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit platform - // with NEON support. - // - // * CoreML: Compilation for the CoreML - // OutputConfig$TargetDevice supports the following compiler options: + // * + // CoreML: Compilation for the CoreML OutputConfig$TargetDevice supports the + // following compiler options: // - // * - // class_labels: Specifies the classification labels file name inside input tar.gz - // file. For example, {"class_labels": "imagenet_labels_1000.txt"}. Labels inside - // the txt file should be separated by newlines. + // * class_labels: Specifies the classification labels + // file name inside input tar.gz file. For example, {"class_labels": + // "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated by + // newlines. CompilerOptions *string // Identifies the target device or the machine learning instance that you want to @@ -5046,28 +4890,28 @@ type OutputConfig struct { // TargetDevice. The following examples show how to configure the TargetPlatform // and CompilerOptions JSON strings for popular target platforms: // - // * Raspberry - // Pi 3 Model B+ "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, + // * Raspberry Pi 3 + // Model B+ "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, // "CompilerOptions": {'mattr': ['+neon']} // - // * Jetson TX2 "TargetPlatform": - // {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, "CompilerOptions": + // * Jetson TX2 "TargetPlatform": {"Os": + // "LINUX", "Arch": "ARM64", "Accelerator": "NVIDIA"}, "CompilerOptions": // {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', 'cuda-ver': '10.0'} // - // * EC2 - // m5.2xlarge instance OS "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", - // "Accelerator": "NVIDIA"}, "CompilerOptions": {'mcpu': 'skylake-avx512'} + // * EC2 m5.2xlarge + // instance OS "TargetPlatform": {"Os": "LINUX", "Arch": "X86_64", "Accelerator": + // "NVIDIA"}, "CompilerOptions": {'mcpu': 'skylake-avx512'} // - // * - // RK3399 "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": - // "MALI"} - // - // * ARMv7 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": - // "ARM_EABI"}, "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} + // * RK3399 + // "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": "MALI"} // + // * + // ARMv7 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, + // "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} // - // * ARMv8 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, - // "CompilerOptions": {'ANDROID_PLATFORM': 29} + // * ARMv8 phone + // (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, "CompilerOptions": + // {'ANDROID_PLATFORM': 29} TargetPlatform *TargetPlatform } @@ -5085,24 +4929,24 @@ type OutputDataConfig struct { // encrypt the model artifacts at rest using Amazon S3 server-side encryption. The // KmsKeyId can be any of the following formats: // - // * // KMS Key ID + // * // KMS Key ID // "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // Amazon Resource Name (ARN) of a - // KMS Key + // * // Amazon Resource Name (ARN) of a KMS + // Key // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" // + // * + // // KMS Key Alias "alias/ExampleAlias" // - // * // KMS Key Alias "alias/ExampleAlias" - // - // * // Amazon Resource Name (ARN) of - // a KMS Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" + // * // Amazon Resource Name (ARN) of a KMS + // Key Alias "arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias" // - // If you - // use a KMS key ID or an alias of your master key, the Amazon SageMaker execution - // role must include permissions to call kms:Encrypt. If you don't provide a KMS - // key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's - // account. Amazon SageMaker uses server-side encryption with KMS-managed keys for + // If you use a + // KMS key ID or an alias of your master key, the Amazon SageMaker execution role + // must include permissions to call kms:Encrypt. If you don't provide a KMS key ID, + // Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. + // Amazon SageMaker uses server-side encryption with KMS-managed keys for // OutputDataConfig. If you use a bucket policy with an s3:PutObject permission // that only allows objects with server-side encryption, set the condition key of // s3:x-amz-server-side-encryption to "aws:kms". For more information, see @@ -5243,12 +5087,12 @@ type ProcessingJob struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *ExperimentConfig // A string, up to one KB in size, that contains the reason a processing job @@ -5557,222 +5401,218 @@ type PropertyNameSuggestion struct { // are in US dollars and should be based on the complexity of the task; the longer // it takes in your initial testing, the more you should offer. // -// * 0.036 +// * 0.036 // -// * +// * // 0.048 // -// * 0.060 +// * 0.060 // -// * 0.072 +// * 0.072 // -// * 0.120 +// * 0.120 // -// * 0.240 +// * 0.240 // -// * 0.360 +// * 0.360 // -// * -// 0.480 +// * 0.480 // -// * 0.600 +// * 0.600 // -// * 0.720 +// * 0.720 // -// * 0.840 +// * +// 0.840 // -// * 0.960 +// * 0.960 // -// * 1.080 +// * 1.080 // -// * -// 1.200 +// * 1.200 // -// Use one of the following prices for image classification, text -// classification, and custom tasks. Prices are in US dollars. +// Use one of the following prices for image +// classification, text classification, and custom tasks. Prices are in US +// dollars. // -// * 0.012 +// * 0.012 // -// * -// 0.024 +// * 0.024 // -// * 0.036 +// * 0.036 // -// * 0.048 +// * 0.048 // -// * 0.060 +// * 0.060 // -// * 0.072 +// * 0.072 // -// * 0.120 +// * 0.120 // -// * +// * // 0.240 // -// * 0.360 +// * 0.360 // -// * 0.480 +// * 0.480 // -// * 0.600 +// * 0.600 // -// * 0.720 +// * 0.720 // -// * 0.840 +// * 0.840 // -// * -// 0.960 +// * 0.960 // -// * 1.080 +// * 1.080 // -// * 1.200 -// -// Use one of the following prices for semantic -// segmentation tasks. Prices are in US dollars. +// * +// 1.200 // -// * 0.840 +// Use one of the following prices for semantic segmentation tasks. Prices +// are in US dollars. // -// * 0.960 +// * 0.840 // -// * -// 1.080 +// * 0.960 // -// * 1.200 +// * 1.080 // -// Use one of the following prices for Textract AnalyzeDocument -// Important Form Key Amazon Augmented AI review tasks. Prices are in US dollars. +// * 1.200 // +// Use one of the following +// prices for Textract AnalyzeDocument Important Form Key Amazon Augmented AI +// review tasks. Prices are in US dollars. // // * 2.400 // -// * 2.280 +// * 2.280 // -// * 2.160 +// * 2.160 // -// * 2.040 +// * 2.040 // -// * 1.920 +// * +// 1.920 // -// * 1.800 +// * 1.800 // -// * -// 1.680 +// * 1.680 // -// * 1.560 +// * 1.560 // -// * 1.440 +// * 1.440 // -// * 1.320 +// * 1.320 // -// * 1.200 +// * 1.200 // -// * 1.080 +// * 1.080 // -// * -// 0.960 +// * 0.960 // -// * 0.840 +// * +// 0.840 // -// * 0.720 +// * 0.720 // -// * 0.600 +// * 0.600 // -// * 0.480 +// * 0.480 // -// * 0.360 +// * 0.360 // -// * -// 0.240 +// * 0.240 // -// * 0.120 +// * 0.120 // -// * 0.072 +// * 0.072 // -// * 0.060 +// * 0.060 // -// * 0.048 +// * +// 0.048 // -// * 0.036 +// * 0.036 // -// * -// 0.024 +// * 0.024 // -// * 0.012 +// * 0.012 // -// Use one of the following prices for Rekognition -// DetectModerationLabels Amazon Augmented AI review tasks. Prices are in US -// dollars. +// Use one of the following prices for +// Rekognition DetectModerationLabels Amazon Augmented AI review tasks. Prices are +// in US dollars. // -// * 1.200 +// * 1.200 // -// * 1.080 +// * 1.080 // -// * 0.960 +// * 0.960 // -// * 0.840 +// * 0.840 // -// * 0.720 +// * 0.720 // -// * -// 0.600 +// * 0.600 // -// * 0.480 +// * 0.480 // -// * 0.360 +// * +// 0.360 // -// * 0.240 +// * 0.240 // -// * 0.120 +// * 0.120 // -// * 0.072 +// * 0.072 // -// * -// 0.060 +// * 0.060 // -// * 0.048 +// * 0.048 // -// * 0.036 +// * 0.036 // -// * 0.024 +// * 0.024 // -// * 0.012 +// * +// 0.012 // -// Use one of the -// following prices for Amazon Augmented AI custom human review tasks. Prices are -// in US dollars. +// Use one of the following prices for Amazon Augmented AI custom human +// review tasks. Prices are in US dollars. // -// * 1.200 +// * 1.200 // -// * 1.080 +// * 1.080 // -// * 0.960 +// * 0.960 // -// * 0.840 +// * 0.840 // -// * +// * // 0.720 // -// * 0.600 +// * 0.600 // -// * 0.480 +// * 0.480 // -// * 0.360 +// * 0.360 // -// * 0.240 +// * 0.240 // -// * 0.120 +// * 0.120 // -// * -// 0.072 +// * 0.072 // -// * 0.060 +// * 0.060 // -// * 0.048 +// * 0.048 // -// * 0.036 +// * +// 0.036 // -// * 0.024 +// * 0.024 // -// * 0.012 +// * 0.012 type PublicWorkforceTaskPrice struct { // Defines the amount of money paid to an Amazon Mechanical Turk worker in United @@ -5865,11 +5705,11 @@ type ResourceConfig struct { // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html). // The VolumeKmsKeyId can be in any of the following formats: // - // * // KMS Key ID + // * // KMS Key ID // "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * // Amazon Resource Name (ARN) of a - // KMS Key + // * // Amazon Resource Name (ARN) of a KMS + // Key // "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" VolumeKmsKeyId *string } @@ -5929,10 +5769,10 @@ type S3DataSource struct { // Depending on the value specified for the S3DataType, identifies either a key // name prefix or a manifest. For example: // - // * A key name prefix might look like + // * A key name prefix might look like // this: s3://bucketname/exampleprefix // - // * A manifest might look like this: + // * A manifest might look like this: // s3://bucketname/example.manifest A manifest is an S3 object which is a JSON file // consisting of an array of elements. The first element is a prefix which is // followed by one or more suffixes. SageMaker appends the suffix elements to the @@ -5977,37 +5817,37 @@ type ScheduleConfig struct { // A cron expression that describes details about the monitoring schedule. // Currently the only supported cron expressions are: // - // * If you want to set the - // job to start every hour, please use the following: Hourly: cron(0 * ? * * *) + // * If you want to set the job + // to start every hour, please use the following: Hourly: cron(0 * ? * * *) // - // - // * If you want to start the job daily: cron(0 [00-23] ? * * *) + // * If + // you want to start the job daily: cron(0 [00-23] ? * * *) // // For example, the // following are valid cron expressions: // - // * Daily at noon UTC: cron(0 12 ? * * + // * Daily at noon UTC: cron(0 12 ? * * // *) // - // * Daily at midnight UTC: cron(0 0 ? * * *) + // * Daily at midnight UTC: cron(0 0 ? * * *) // - // To support running every 6, - // 12 hours, the following are also supported: cron(0 [00-23]/[01-24] ? * * *) For + // To support running every 6, 12 + // hours, the following are also supported: cron(0 [00-23]/[01-24] ? * * *) For // example, the following are valid cron expressions: // - // * Every 12 hours, - // starting at 5pm UTC: cron(0 17/12 ? * * *) + // * Every 12 hours, starting + // at 5pm UTC: cron(0 17/12 ? * * *) // - // * Every two hours starting at - // midnight: cron(0 0/2 ? * * *) + // * Every two hours starting at midnight: + // cron(0 0/2 ? * * *) // - // * Even though the cron expression is set to - // start at 5PM UTC, note that there could be a delay of 0-20 minutes from the - // actual requested time to run the execution. + // * Even though the cron expression is set to start at 5PM + // UTC, note that there could be a delay of 0-20 minutes from the actual requested + // time to run the execution. // - // * We recommend that if you - // would like a daily schedule, you do not provide this parameter. Amazon SageMaker - // will pick a time for running every day. + // * We recommend that if you would like a daily + // schedule, you do not provide this parameter. Amazon SageMaker will pick a time + // for running every day. // // This member is required. ScheduleExpression *string @@ -6019,20 +5859,20 @@ type ScheduleConfig struct { // filter, or nested filter. A SearchExpression can contain up to twenty elements. // A SearchExpression contains the following components: // -// * A list of Filter +// * A list of Filter // objects. Each filter defines a simple Boolean expression comprised of a resource // property name, Boolean operator, and value. // -// * A list of NestedFilter -// objects. Each nested filter defines a list of Boolean expressions using a list -// of resource properties. A nested filter is satisfied if a single object in the -// list satisfies all Boolean expressions. +// * A list of NestedFilter objects. +// Each nested filter defines a list of Boolean expressions using a list of +// resource properties. A nested filter is satisfied if a single object in the list +// satisfies all Boolean expressions. // -// * A list of SearchExpression -// objects. A search expression object can be nested in a list of search expression +// * A list of SearchExpression objects. A +// search expression object can be nested in a list of search expression // objects. // -// * A Boolean operator: And or Or. +// * A Boolean operator: And or Or. type SearchExpression struct { // A list of filter objects. @@ -6087,51 +5927,50 @@ type SecondaryStatusTransition struct { // Contains a secondary status information from a training job. Status might be one // of the following secondary statuses: InProgress // - // * Starting - Starting the + // * Starting - Starting the // training job. // - // * Downloading - An optional stage for algorithms that support + // * Downloading - An optional stage for algorithms that support // File training input mode. It indicates that data is being downloaded to the ML // storage volumes. // - // * Training - Training is in progress. + // * Training - Training is in progress. // - // * Uploading - - // Training is complete and the model artifacts are being uploaded to the S3 + // * Uploading - Training + // is complete and the model artifacts are being uploaded to the S3 // location. // // Completed // - // * Completed - The training job has - // completed. + // * Completed - The training job has completed. // // Failed // - // * Failed - The training job has failed. The reason for - // the failure is returned in the FailureReason field of - // DescribeTrainingJobResponse. + // * + // Failed - The training job has failed. The reason for the failure is returned in + // the FailureReason field of DescribeTrainingJobResponse. // // Stopped // - // * MaxRuntimeExceeded - The job - // stopped because it exceeded the maximum allowed runtime. + // * + // MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed + // runtime. // - // * Stopped - The - // training job has stopped. + // * Stopped - The training job has stopped. // // Stopping // - // * Stopping - Stopping the training - // job. + // * Stopping - + // Stopping the training job. // - // We no longer support the following secondary statuses: + // We no longer support the following secondary + // statuses: // - // * - // LaunchingMLInstances + // * LaunchingMLInstances // - // * PreparingTrainingStack + // * PreparingTrainingStack // - // * + // * // DownloadingTrainingImage // // This member is required. @@ -6146,41 +5985,40 @@ type SecondaryStatusTransition struct { // SageMaker provides secondary statuses and status messages that apply to each of // them: Starting // - // * Starting the training job. + // * Starting the training job. // - // * Launching requested ML + // * Launching requested ML // instances. // - // * Insufficient capacity error from EC2 while launching - // instances, retrying! + // * Insufficient capacity error from EC2 while launching instances, + // retrying! // - // * Launched instance was unhealthy, replacing it! + // * Launched instance was unhealthy, replacing it! // - // - // * Preparing the instances for training. + // * Preparing the + // instances for training. // // Training // - // * Downloading the - // training image. + // * Downloading the training image. // - // * Training image download completed. Training in - // progress. + // * Training + // image download completed. Training in progress. // - // Status messages are subject to change. Therefore, we recommend not - // including them in code that programmatically initiates actions. For examples, - // don't use status messages in if statements. To have an overview of your training - // job's progress, view TrainingJobStatus and SecondaryStatus in - // DescribeTrainingJob, and StatusMessage together. For example, at the start of a - // training job, you might see the following: + // Status messages are subject to + // change. Therefore, we recommend not including them in code that programmatically + // initiates actions. For examples, don't use status messages in if statements. To + // have an overview of your training job's progress, view TrainingJobStatus and + // SecondaryStatus in DescribeTrainingJob, and StatusMessage together. For example, + // at the start of a training job, you might see the following: // - // * TrainingJobStatus - - // InProgress + // * + // TrainingJobStatus - InProgress // - // * SecondaryStatus - Training + // * SecondaryStatus - Training // - // * StatusMessage - Downloading - // the training image + // * StatusMessage - + // Downloading the training image StatusMessage *string } @@ -6348,29 +6186,29 @@ type TargetPlatform struct { // Specifies a target platform architecture. // - // * X86_64: 64-bit version of the - // x86 instruction set. + // * X86_64: 64-bit version of the x86 + // instruction set. // - // * X86: 32-bit version of the x86 instruction set. + // * X86: 32-bit version of the x86 instruction set. // + // * ARM64: + // ARMv8 64-bit CPU. // - // * ARM64: ARMv8 64-bit CPU. + // * ARM_EABIHF: ARMv7 32-bit, Hard Float. // - // * ARM_EABIHF: ARMv7 32-bit, Hard Float. - // - // * - // ARM_EABI: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM platform. + // * ARM_EABI: ARMv7 + // 32-bit, Soft Float. Used by Android 32-bit ARM platform. // // This member is required. Arch TargetPlatformArch // Specifies a target platform OS. // - // * LINUX: Linux-based operating systems. - // + // * LINUX: Linux-based operating systems. // - // * ANDROID: Android operating systems. Android API level can be specified using - // the ANDROID_PLATFORM compiler option. For example, "CompilerOptions": + // * + // ANDROID: Android operating systems. Android API level can be specified using the + // ANDROID_PLATFORM compiler option. For example, "CompilerOptions": // {'ANDROID_PLATFORM': 28} // // This member is required. @@ -6378,14 +6216,14 @@ type TargetPlatform struct { // Specifies a target platform accelerator (optional). // - // * NVIDIA: Nvidia - // graphics processing unit. It also requires gpu-code, trt-ver, cuda-ver compiler + // * NVIDIA: Nvidia graphics + // processing unit. It also requires gpu-code, trt-ver, cuda-ver compiler // options // - // * MALI: ARM Mali graphics processor + // * MALI: ARM Mali graphics processor // - // * INTEL_GRAPHICS: - // Integrated Intel graphics + // * INTEL_GRAPHICS: Integrated Intel + // graphics Accelerator TargetPlatformAccelerator } @@ -6460,12 +6298,12 @@ type TrainingJob struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *ExperimentConfig // If the training job failed, the reason it failed. @@ -6509,52 +6347,51 @@ type TrainingJob struct { // under SecondaryStatusTransition. Amazon SageMaker provides primary statuses and // secondary statuses that apply to each of them: InProgress // - // * Starting - - // Starting the training job. + // * Starting - Starting + // the training job. // - // * Downloading - An optional stage for algorithms - // that support File training input mode. It indicates that data is being - // downloaded to the ML storage volumes. + // * Downloading - An optional stage for algorithms that support + // File training input mode. It indicates that data is being downloaded to the ML + // storage volumes. // - // * Training - Training is in - // progress. + // * Training - Training is in progress. // - // * Uploading - Training is complete and the model artifacts are - // being uploaded to the S3 location. + // * Uploading - Training + // is complete and the model artifacts are being uploaded to the S3 + // location. // // Completed // - // * Completed - The training - // job has completed. + // * Completed - The training job has completed. // // Failed // - // * Failed - The training job has failed. The - // reason for the failure is returned in the FailureReason field of - // DescribeTrainingJobResponse. + // * + // Failed - The training job has failed. The reason for the failure is returned in + // the FailureReason field of DescribeTrainingJobResponse. // // Stopped // - // * MaxRuntimeExceeded - The job - // stopped because it exceeded the maximum allowed runtime. + // * + // MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed + // runtime. // - // * Stopped - The - // training job has stopped. + // * Stopped - The training job has stopped. // // Stopping // - // * Stopping - Stopping the training - // job. + // * Stopping - + // Stopping the training job. // - // Valid values for SecondaryStatus are subject to change. We no longer - // support the following secondary statuses: + // Valid values for SecondaryStatus are subject to + // change. We no longer support the following secondary statuses: // - // * LaunchingMLInstances + // * + // LaunchingMLInstances // - // * - // PreparingTrainingStack + // * PreparingTrainingStack // - // * DownloadingTrainingImage + // * DownloadingTrainingImage SecondaryStatus SecondaryStatus // A history of all of the secondary statuses that the training job has @@ -6593,23 +6430,22 @@ type TrainingJob struct { // The status of the training job. Training job statuses are: // - // * InProgress - - // The training is in progress. + // * InProgress - The + // training is in progress. // - // * Completed - The training job has - // completed. + // * Completed - The training job has completed. // - // * Failed - The training job has failed. To see the reason for - // the failure, see the FailureReason field in the response to a - // DescribeTrainingJobResponse call. + // * + // Failed - The training job has failed. To see the reason for the failure, see the + // FailureReason field in the response to a DescribeTrainingJobResponse call. // - // * Stopping - The training job is - // stopping. + // * + // Stopping - The training job is stopping. // - // * Stopped - The training job has stopped. + // * Stopped - The training job has + // stopped. // - // For more detailed - // information, see SecondaryStatus. + // For more detailed information, see SecondaryStatus. TrainingJobStatus TrainingJobStatus // Indicates the time when the training job starts on training instances. You are @@ -6812,9 +6648,9 @@ type TransformInput struct { // supports a number of record-oriented binary data formats. Currently, the // supported record formats are: // - // * RecordIO + // * RecordIO // - // * TFRecord + // * TFRecord // // When splitting is // enabled, the size of a mini-batch depends on the values of the BatchStrategy and @@ -6867,12 +6703,12 @@ type TransformJob struct { // Associates a SageMaker job as a trial component with an experiment and trial. // Specified when you call the following APIs: // - // * CreateProcessingJob + // * CreateProcessingJob // - // * + // * // CreateTrainingJob // - // * CreateTransformJob + // * CreateTransformJob ExperimentConfig *ExperimentConfig // If the transform job failed, the reason it failed. @@ -6928,20 +6764,19 @@ type TransformJob struct { // The status of the transform job. Transform job statuses are: // - // * InProgress - - // The job is in progress. + // * InProgress - The + // job is in progress. // - // * Completed - The job has completed. + // * Completed - The job has completed. // - // * Failed - // - The transform job has failed. To see the reason for the failure, see the + // * Failed - The + // transform job has failed. To see the reason for the failure, see the // FailureReason field in the response to a DescribeTransformJob call. // - // * - // Stopping - The transform job is stopping. + // * Stopping + // - The transform job is stopping. // - // * Stopped - The transform job has - // stopped. + // * Stopped - The transform job has stopped. TransformJobStatus TransformJobStatus // Describes the results of a transform job. @@ -7067,16 +6902,16 @@ type TransformOutput struct { // encrypt the model artifacts at rest using Amazon S3 server-side encryption. The // KmsKeyId can be any of the following formats: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias name ARN: + // * Alias name ARN: // arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias // // If you don't provide a @@ -7113,15 +6948,15 @@ type TransformResources struct { // that run the batch transform job. The VolumeKmsKeyId can be any of the following // formats: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias name ARN: + // * Alias name ARN: // arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias VolumeKmsKeyId *string } @@ -7142,10 +6977,10 @@ type TransformS3DataSource struct { // Depending on the value specified for the S3DataType, identifies either a key // name prefix or a manifest. For example: // - // * A key name prefix might look like + // * A key name prefix might look like // this: s3://bucketname/exampleprefix. // - // * A manifest might look like this: + // * A manifest might look like this: // s3://bucketname/example.manifest The manifest is an S3 object which is a JSON // file with the following format: [ {"prefix": // "s3://customer_bucket/some/prefix/"},"relative/path/to/custdata-1","relative/path/custdata-2",..."relative/path/custdata-N"] @@ -7416,12 +7251,12 @@ type TrialComponentSummary struct { // The status of the component. States include: // - // * InProgress + // * InProgress // - // * - // Completed + // * Completed // - // * Failed + // * + // Failed Status *TrialComponentStatus // The ARN of the trial component. @@ -7491,35 +7326,35 @@ type UiConfig struct { // point cloud object detection and 3D point cloud object detection adjustment // labeling jobs. // - // * + // * // arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection // // Use // this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud object // tracking adjustment labeling jobs. // - // * + // * // arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking // // Use // this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point cloud // semantic segmentation adjustment labeling jobs. // - // * + // * // arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation // // Video // Frame HumanTaskUiArns Use this HumanTaskUiArn for video frame object detection // and video frame object detection adjustment labeling jobs. // - // * + // * // arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection // // Use // this HumanTaskUiArn for video frame object tracking and video frame object // tracking adjustment labeling jobs. // - // * + // * // arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking HumanTaskUiArn *string @@ -7627,17 +7462,17 @@ type VariantProperty struct { // The type of variant property. The supported values are: // - // * - // DesiredInstanceCount: Overrides the existing variant instance counts using the + // * DesiredInstanceCount: + // Overrides the existing variant instance counts using the // ProductionVariant$InitialInstanceCount values in the // CreateEndpointConfigInput$ProductionVariants. // - // * DesiredWeight: Overrides - // the existing variant weights using the ProductionVariant$InitialVariantWeight - // values in the CreateEndpointConfigInput$ProductionVariants. + // * DesiredWeight: Overrides the + // existing variant weights using the ProductionVariant$InitialVariantWeight values + // in the CreateEndpointConfigInput$ProductionVariants. // - // * - // DataCaptureConfig: (Not currently supported.) + // * DataCaptureConfig: (Not + // currently supported.) // // This member is required. VariantPropertyType VariantPropertyType diff --git a/service/sagemakera2iruntime/doc.go b/service/sagemakera2iruntime/doc.go index baf5c9c7c98..5c529b0df4b 100644 --- a/service/sagemakera2iruntime/doc.go +++ b/service/sagemakera2iruntime/doc.go @@ -17,7 +17,7 @@ // about API actions and data types that you can use to interact with Amazon A2I // programmatically. Use this guide to: // -// * Start a human loop with the +// * Start a human loop with the // StartHumanLoop operation when using Amazon A2I with a custom task type. To learn // more about the difference between custom and built-in task types, see Use Task // Types @@ -27,7 +27,7 @@ // (https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-start-human-loop.html#a2i-instructions-starthumanloop) // in the Amazon SageMaker Developer Guide. // -// * Manage your human loops. You can +// * Manage your human loops. You can // list all human loops that you have created, describe individual human loops, and // stop and delete human loops. To learn more, see Monitor and Manage Your Human // Loop diff --git a/service/sagemakera2iruntime/types/enums.go b/service/sagemakera2iruntime/types/enums.go index 37589a1f20e..0947a887ee7 100644 --- a/service/sagemakera2iruntime/types/enums.go +++ b/service/sagemakera2iruntime/types/enums.go @@ -6,8 +6,8 @@ type ContentClassifier string // Enum values for ContentClassifier const ( - ContentClassifierFree_of_personally_identifiable_information ContentClassifier = "FreeOfPersonallyIdentifiableInformation" - ContentClassifierFree_of_adult_content ContentClassifier = "FreeOfAdultContent" + ContentClassifierFreeOfPersonallyIdentifiableInformation ContentClassifier = "FreeOfPersonallyIdentifiableInformation" + ContentClassifierFreeOfAdultContent ContentClassifier = "FreeOfAdultContent" ) // Values returns all known values for ContentClassifier. Note that this can be @@ -24,11 +24,11 @@ type HumanLoopStatus string // Enum values for HumanLoopStatus const ( - HumanLoopStatusIn_progress HumanLoopStatus = "InProgress" - HumanLoopStatusFailed HumanLoopStatus = "Failed" - HumanLoopStatusCompleted HumanLoopStatus = "Completed" - HumanLoopStatusStopped HumanLoopStatus = "Stopped" - HumanLoopStatusStopping HumanLoopStatus = "Stopping" + HumanLoopStatusInProgress HumanLoopStatus = "InProgress" + HumanLoopStatusFailed HumanLoopStatus = "Failed" + HumanLoopStatusCompleted HumanLoopStatus = "Completed" + HumanLoopStatusStopped HumanLoopStatus = "Stopped" + HumanLoopStatusStopping HumanLoopStatus = "Stopping" ) // Values returns all known values for HumanLoopStatus. Note that this can be diff --git a/service/savingsplans/types/enums.go b/service/savingsplans/types/enums.go index c802b4cde2b..a5b48393b87 100644 --- a/service/savingsplans/types/enums.go +++ b/service/savingsplans/types/enums.go @@ -43,8 +43,8 @@ type SavingsPlanOfferingPropertyKey string // Enum values for SavingsPlanOfferingPropertyKey const ( - SavingsPlanOfferingPropertyKeyRegion SavingsPlanOfferingPropertyKey = "region" - SavingsPlanOfferingPropertyKeyInstance_family SavingsPlanOfferingPropertyKey = "instanceFamily" + SavingsPlanOfferingPropertyKeyRegion SavingsPlanOfferingPropertyKey = "region" + SavingsPlanOfferingPropertyKeyInstanceFamily SavingsPlanOfferingPropertyKey = "instanceFamily" ) // Values returns all known values for SavingsPlanOfferingPropertyKey. Note that @@ -62,9 +62,9 @@ type SavingsPlanPaymentOption string // Enum values for SavingsPlanPaymentOption const ( - SavingsPlanPaymentOptionAll_upfront SavingsPlanPaymentOption = "All Upfront" - SavingsPlanPaymentOptionPartial_upfront SavingsPlanPaymentOption = "Partial Upfront" - SavingsPlanPaymentOptionNo_upfront SavingsPlanPaymentOption = "No Upfront" + SavingsPlanPaymentOptionAllUpfront SavingsPlanPaymentOption = "All Upfront" + SavingsPlanPaymentOptionPartialUpfront SavingsPlanPaymentOption = "Partial Upfront" + SavingsPlanPaymentOptionNoUpfront SavingsPlanPaymentOption = "No Upfront" ) // Values returns all known values for SavingsPlanPaymentOption. Note that this can @@ -102,12 +102,12 @@ type SavingsPlanRateFilterAttribute string // Enum values for SavingsPlanRateFilterAttribute const ( - SavingsPlanRateFilterAttributeRegion SavingsPlanRateFilterAttribute = "region" - SavingsPlanRateFilterAttributeInstance_family SavingsPlanRateFilterAttribute = "instanceFamily" - SavingsPlanRateFilterAttributeInstance_type SavingsPlanRateFilterAttribute = "instanceType" - SavingsPlanRateFilterAttributeProduct_description SavingsPlanRateFilterAttribute = "productDescription" - SavingsPlanRateFilterAttributeTenancy SavingsPlanRateFilterAttribute = "tenancy" - SavingsPlanRateFilterAttributeProduct_id SavingsPlanRateFilterAttribute = "productId" + SavingsPlanRateFilterAttributeRegion SavingsPlanRateFilterAttribute = "region" + SavingsPlanRateFilterAttributeInstanceFamily SavingsPlanRateFilterAttribute = "instanceFamily" + SavingsPlanRateFilterAttributeInstanceType SavingsPlanRateFilterAttribute = "instanceType" + SavingsPlanRateFilterAttributeProductDescription SavingsPlanRateFilterAttribute = "productDescription" + SavingsPlanRateFilterAttributeTenancy SavingsPlanRateFilterAttribute = "tenancy" + SavingsPlanRateFilterAttributeProductId SavingsPlanRateFilterAttribute = "productId" ) // Values returns all known values for SavingsPlanRateFilterAttribute. Note that @@ -129,14 +129,14 @@ type SavingsPlanRateFilterName string // Enum values for SavingsPlanRateFilterName const ( - SavingsPlanRateFilterNameRegion SavingsPlanRateFilterName = "region" - SavingsPlanRateFilterNameInstance_type SavingsPlanRateFilterName = "instanceType" - SavingsPlanRateFilterNameProduct_description SavingsPlanRateFilterName = "productDescription" - SavingsPlanRateFilterNameTenancy SavingsPlanRateFilterName = "tenancy" - SavingsPlanRateFilterNameProduct_type SavingsPlanRateFilterName = "productType" - SavingsPlanRateFilterNameService_code SavingsPlanRateFilterName = "serviceCode" - SavingsPlanRateFilterNameUsage_type SavingsPlanRateFilterName = "usageType" - SavingsPlanRateFilterNameOperation SavingsPlanRateFilterName = "operation" + SavingsPlanRateFilterNameRegion SavingsPlanRateFilterName = "region" + SavingsPlanRateFilterNameInstanceType SavingsPlanRateFilterName = "instanceType" + SavingsPlanRateFilterNameProductDescription SavingsPlanRateFilterName = "productDescription" + SavingsPlanRateFilterNameTenancy SavingsPlanRateFilterName = "tenancy" + SavingsPlanRateFilterNameProductType SavingsPlanRateFilterName = "productType" + SavingsPlanRateFilterNameServiceCode SavingsPlanRateFilterName = "serviceCode" + SavingsPlanRateFilterNameUsageType SavingsPlanRateFilterName = "usageType" + SavingsPlanRateFilterNameOperation SavingsPlanRateFilterName = "operation" ) // Values returns all known values for SavingsPlanRateFilterName. Note that this @@ -159,11 +159,11 @@ type SavingsPlanRatePropertyKey string // Enum values for SavingsPlanRatePropertyKey const ( - SavingsPlanRatePropertyKeyRegion SavingsPlanRatePropertyKey = "region" - SavingsPlanRatePropertyKeyInstance_type SavingsPlanRatePropertyKey = "instanceType" - SavingsPlanRatePropertyKeyInstance_family SavingsPlanRatePropertyKey = "instanceFamily" - SavingsPlanRatePropertyKeyProduct_description SavingsPlanRatePropertyKey = "productDescription" - SavingsPlanRatePropertyKeyTenancy SavingsPlanRatePropertyKey = "tenancy" + SavingsPlanRatePropertyKeyRegion SavingsPlanRatePropertyKey = "region" + SavingsPlanRatePropertyKeyInstanceType SavingsPlanRatePropertyKey = "instanceType" + SavingsPlanRatePropertyKeyInstanceFamily SavingsPlanRatePropertyKey = "instanceFamily" + SavingsPlanRatePropertyKeyProductDescription SavingsPlanRatePropertyKey = "productDescription" + SavingsPlanRatePropertyKeyTenancy SavingsPlanRatePropertyKey = "tenancy" ) // Values returns all known values for SavingsPlanRatePropertyKey. Note that this @@ -203,9 +203,9 @@ type SavingsPlanRateUnit string // Enum values for SavingsPlanRateUnit const ( - SavingsPlanRateUnitHours SavingsPlanRateUnit = "Hrs" - SavingsPlanRateUnitLambda_gb_second SavingsPlanRateUnit = "Lambda-GB-Second" - SavingsPlanRateUnitRequest SavingsPlanRateUnit = "Request" + SavingsPlanRateUnitHours SavingsPlanRateUnit = "Hrs" + SavingsPlanRateUnitLambdaGbSecond SavingsPlanRateUnit = "Lambda-GB-Second" + SavingsPlanRateUnitRequest SavingsPlanRateUnit = "Request" ) // Values returns all known values for SavingsPlanRateUnit. Note that this can be @@ -223,15 +223,15 @@ type SavingsPlansFilterName string // Enum values for SavingsPlansFilterName const ( - SavingsPlansFilterNameRegion SavingsPlansFilterName = "region" - SavingsPlansFilterNameEc2_instance_family SavingsPlansFilterName = "ec2-instance-family" - SavingsPlansFilterNameCommitment SavingsPlansFilterName = "commitment" - SavingsPlansFilterNameUpfront SavingsPlansFilterName = "upfront" - SavingsPlansFilterNameTerm SavingsPlansFilterName = "term" - SavingsPlansFilterNameSavings_plan_type SavingsPlansFilterName = "savings-plan-type" - SavingsPlansFilterNamePayment_option SavingsPlansFilterName = "payment-option" - SavingsPlansFilterNameStart SavingsPlansFilterName = "start" - SavingsPlansFilterNameEnd SavingsPlansFilterName = "end" + SavingsPlansFilterNameRegion SavingsPlansFilterName = "region" + SavingsPlansFilterNameEc2InstanceFamily SavingsPlansFilterName = "ec2-instance-family" + SavingsPlansFilterNameCommitment SavingsPlansFilterName = "commitment" + SavingsPlansFilterNameUpfront SavingsPlansFilterName = "upfront" + SavingsPlansFilterNameTerm SavingsPlansFilterName = "term" + SavingsPlansFilterNameSavingsPlanType SavingsPlansFilterName = "savings-plan-type" + SavingsPlansFilterNamePaymentOption SavingsPlansFilterName = "payment-option" + SavingsPlansFilterNameStart SavingsPlansFilterName = "start" + SavingsPlansFilterNameEnd SavingsPlansFilterName = "end" ) // Values returns all known values for SavingsPlansFilterName. Note that this can @@ -255,12 +255,12 @@ type SavingsPlanState string // Enum values for SavingsPlanState const ( - SavingsPlanStatePayment_pending SavingsPlanState = "payment-pending" - SavingsPlanStatePayment_failed SavingsPlanState = "payment-failed" - SavingsPlanStateActive SavingsPlanState = "active" - SavingsPlanStateRetired SavingsPlanState = "retired" - SavingsPlanStateQueued SavingsPlanState = "queued" - SavingsPlanStateQueued_deleted SavingsPlanState = "queued-deleted" + SavingsPlanStatePaymentPending SavingsPlanState = "payment-pending" + SavingsPlanStatePaymentFailed SavingsPlanState = "payment-failed" + SavingsPlanStateActive SavingsPlanState = "active" + SavingsPlanStateRetired SavingsPlanState = "retired" + SavingsPlanStateQueued SavingsPlanState = "queued" + SavingsPlanStateQueuedDeleted SavingsPlanState = "queued-deleted" ) // Values returns all known values for SavingsPlanState. Note that this can be @@ -281,8 +281,8 @@ type SavingsPlanType string // Enum values for SavingsPlanType const ( - SavingsPlanTypeCompute SavingsPlanType = "Compute" - SavingsPlanTypeEc2_instance SavingsPlanType = "EC2Instance" + SavingsPlanTypeCompute SavingsPlanType = "Compute" + SavingsPlanTypeEc2Instance SavingsPlanType = "EC2Instance" ) // Values returns all known values for SavingsPlanType. Note that this can be diff --git a/service/schemas/types/enums.go b/service/schemas/types/enums.go index 8b31b3e63b4..bf4fa235a62 100644 --- a/service/schemas/types/enums.go +++ b/service/schemas/types/enums.go @@ -6,9 +6,9 @@ type CodeGenerationStatus string // Enum values for CodeGenerationStatus const ( - CodeGenerationStatusCreate_in_progress CodeGenerationStatus = "CREATE_IN_PROGRESS" - CodeGenerationStatusCreate_complete CodeGenerationStatus = "CREATE_COMPLETE" - CodeGenerationStatusCreate_failed CodeGenerationStatus = "CREATE_FAILED" + CodeGenerationStatusCreateInProgress CodeGenerationStatus = "CREATE_IN_PROGRESS" + CodeGenerationStatusCreateComplete CodeGenerationStatus = "CREATE_COMPLETE" + CodeGenerationStatusCreateFailed CodeGenerationStatus = "CREATE_FAILED" ) // Values returns all known values for CodeGenerationStatus. Note that this can be diff --git a/service/secretsmanager/api_op_CancelRotateSecret.go b/service/secretsmanager/api_op_CancelRotateSecret.go index 44822fa4273..88e01c8c990 100644 --- a/service/secretsmanager/api_op_CancelRotateSecret.go +++ b/service/secretsmanager/api_op_CancelRotateSecret.go @@ -23,33 +23,32 @@ import ( // successfully start a rotation, the staging label AWSPENDING must be in one of // the following states: // -// * Not attached to any version at all +// * Not attached to any version at all // -// * Attached -// to the same version as the staging label AWSCURRENT +// * Attached to the +// same version as the staging label AWSCURRENT // -// If the staging label -// AWSPENDING attached to a different version than the version with AWSCURRENT then -// the attempt to rotate fails. Minimum permissions To run this command, you must -// have the following permissions: +// If the staging label AWSPENDING +// attached to a different version than the version with AWSCURRENT then the +// attempt to rotate fails. Minimum permissions To run this command, you must have +// the following permissions: // -// * -// secretsmanager:CancelRotateSecret +// * secretsmanager:CancelRotateSecret // -// Related operations +// Related +// operations // -// * To configure -// rotation for a secret or to manually trigger a rotation, use RotateSecret. +// * To configure rotation for a secret or to manually trigger a +// rotation, use RotateSecret. // +// * To get the rotation configuration details for a +// secret, use DescribeSecret. // -// * To get the rotation configuration details for a secret, use DescribeSecret. +// * To list all of the currently available secrets, +// use ListSecrets. // -// -// * To list all of the currently available secrets, use ListSecrets. -// -// * To -// list all of the versions currently associated with a secret, use -// ListSecretVersionIds. +// * To list all of the versions currently associated with a +// secret, use ListSecretVersionIds. func (c *Client) CancelRotateSecret(ctx context.Context, params *CancelRotateSecretInput, optFns ...func(*Options)) (*CancelRotateSecretOutput, error) { if params == nil { params = &CancelRotateSecretInput{} diff --git a/service/secretsmanager/api_op_CreateSecret.go b/service/secretsmanager/api_op_CreateSecret.go index bb3fa65327e..5e83d716457 100644 --- a/service/secretsmanager/api_op_CreateSecret.go +++ b/service/secretsmanager/api_op_CreateSecret.go @@ -26,7 +26,7 @@ import ( // Secrets Manager also creates an initial secret version and automatically // attaches the staging label AWSCURRENT to the new version. // -// * If you call an +// * If you call an // operation to encrypt or decrypt the SecretString or SecretBinary for a secret in // the same account as the calling user and that secret doesn't specify a AWS KMS // encryption key, Secrets Manager uses the account's default AWS managed customer @@ -37,13 +37,13 @@ import ( // the account's AWS-managed CMK, it can result in a one-time significant delay in // returning the result. // -// * If the secret resides in a different AWS account -// from the credentials calling an API that requires encryption or decryption of -// the secret value then you must create and use a custom AWS KMS CMK because you -// can't access the default CMK for the account using credentials from a different -// AWS account. Store the ARN of the CMK in the secret when you create the secret -// or when you update it by including it in the KMSKeyId. If you call an API that -// must encrypt or decrypt SecretString or SecretBinary using credentials from a +// * If the secret resides in a different AWS account from +// the credentials calling an API that requires encryption or decryption of the +// secret value then you must create and use a custom AWS KMS CMK because you can't +// access the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret or +// when you update it by including it in the KMSKeyId. If you call an API that must +// encrypt or decrypt SecretString or SecretBinary using credentials from a // different account then the AWS KMS key policy must grant cross-account access to // that other account's user or role for both the kms:GenerateDataKey and // kms:Decrypt operations. @@ -51,40 +51,40 @@ import ( // Minimum permissions To run this command, you must have // the following permissions: // -// * secretsmanager:CreateSecret +// * secretsmanager:CreateSecret // -// * -// kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to -// encrypt the secret. You do not need this permission to use the account default -// AWS managed CMK for Secrets Manager. +// * kms:GenerateDataKey +// - needed only if you use a customer-managed AWS KMS key to encrypt the secret. +// You do not need this permission to use the account default AWS managed CMK for +// Secrets Manager. // -// * kms:Decrypt - needed only if you use -// a customer-managed AWS KMS key to encrypt the secret. You do not need this -// permission to use the account default AWS managed CMK for Secrets Manager. +// * kms:Decrypt - needed only if you use a customer-managed AWS +// KMS key to encrypt the secret. You do not need this permission to use the +// account default AWS managed CMK for Secrets Manager. // -// -// * secretsmanager:TagResource - needed only if you include the Tags +// * +// secretsmanager:TagResource - needed only if you include the Tags // parameter. // // Related operations // -// * To delete a secret, use DeleteSecret. -// +// * To delete a secret, use DeleteSecret. // -// * To modify an existing secret, use UpdateSecret. +// * To +// modify an existing secret, use UpdateSecret. // -// * To create a new version -// of a secret, use PutSecretValue. +// * To create a new version of a +// secret, use PutSecretValue. // -// * To retrieve the encrypted secure string -// and secure binary values, use GetSecretValue. +// * To retrieve the encrypted secure string and +// secure binary values, use GetSecretValue. // -// * To retrieve all other -// details for a secret, use DescribeSecret. This does not include the encrypted -// secure string and secure binary values. +// * To retrieve all other details for a +// secret, use DescribeSecret. This does not include the encrypted secure string +// and secure binary values. // -// * To retrieve the list of secret -// versions associated with the current secret, use DescribeSecret and examine the +// * To retrieve the list of secret versions associated +// with the current secret, use DescribeSecret and examine the // SecretVersionsToStages response value. func (c *Client) CreateSecret(ctx context.Context, params *CreateSecretInput, optFns ...func(*Options)) (*CreateSecretOutput, error) { if params == nil { @@ -127,19 +127,19 @@ type CreateSecretInput struct { // (https://wikipedia.org/wiki/Universally_unique_identifier) value to ensure // uniqueness of your versions within the specified secret. // - // * If the + // * If the // ClientRequestToken value isn't already associated with a version of the secret // then a new version of the secret is created. // - // * If a version with this value + // * If a version with this value // already exists and the version SecretString and SecretBinary values are the same // as those in the request, then the request is ignored. // - // * If a version with - // this value already exists and that version's SecretString and SecretBinary - // values are different from those in the request then the request fails because - // you cannot modify an existing version. Instead, use PutSecretValue to create a - // new version. + // * If a version with this + // value already exists and that version's SecretString and SecretBinary values are + // different from those in the request then the request fails because you cannot + // modify an existing version. Instead, use PutSecretValue to create a new + // version. // // This value becomes the VersionId of the new version. ClientRequestToken *string @@ -193,19 +193,19 @@ type CreateSecretInput struct { // appends tags to the existing list of tags. To remove tags, you must use // UntagResource. // - // * Secrets Manager tag key names are case sensitive. A tag - // with the key "ABC" is a different tag from one with key "abc". + // * Secrets Manager tag key names are case sensitive. A tag with + // the key "ABC" is a different tag from one with key "abc". // - // * If you - // check tags in IAM policy Condition elements as part of your security strategy, - // then adding or removing a tag can change permissions. If the successful - // completion of this operation would result in you losing your permissions for - // this secret, then this operation is blocked and returns an Access Denied - // error. + // * If you check tags + // in IAM policy Condition elements as part of your security strategy, then adding + // or removing a tag can change permissions. If the successful completion of this + // operation would result in you losing your permissions for this secret, then this + // operation is blocked and returns an Access Denied error. // - // This parameter requires a JSON text string argument. For information on - // how to format a JSON parameter for the various command line tool environments, - // see Using JSON for Parameters + // This parameter + // requires a JSON text string argument. For information on how to format a JSON + // parameter for the various command line tool environments, see Using JSON for + // Parameters // (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) // in the AWS CLI User Guide. For example: // [{"Key":"CostCenter","Value":"12345"},{"Key":"environment","Value":"production"}] @@ -213,27 +213,26 @@ type CreateSecretInput struct { // you should use single quotes to avoid confusion with the double quotes required // in the JSON text. The following basic restrictions apply to tags: // - // * Maximum + // * Maximum // number of tags per secret—50 // - // * Maximum key length—127 Unicode characters in + // * Maximum key length—127 Unicode characters in // UTF-8 // - // * Maximum value length—255 Unicode characters in UTF-8 + // * Maximum value length—255 Unicode characters in UTF-8 // - // * Tag - // keys and values are case sensitive. + // * Tag keys and + // values are case sensitive. // - // * Do not use the aws: prefix in your - // tag names or values because AWS reserves it for AWS use. You can't edit or - // delete tag names or values with this prefix. Tags with this prefix do not count - // against your tags per secret limit. + // * Do not use the aws: prefix in your tag names or + // values because AWS reserves it for AWS use. You can't edit or delete tag names + // or values with this prefix. Tags with this prefix do not count against your tags + // per secret limit. // - // * If you use your tagging schema across - // multiple services and resources, remember other services might have restrictions - // on allowed characters. Generally allowed characters: letters, spaces, and - // numbers representable in UTF-8, plus the following special characters: + - = . _ - // : / @. + // * If you use your tagging schema across multiple services and + // resources, remember other services might have restrictions on allowed + // characters. Generally allowed characters: letters, spaces, and numbers + // representable in UTF-8, plus the following special characters: + - = . _ : / @. Tags []*types.Tag } diff --git a/service/secretsmanager/api_op_DeleteResourcePolicy.go b/service/secretsmanager/api_op_DeleteResourcePolicy.go index 84508ae5335..f61778e18b2 100644 --- a/service/secretsmanager/api_op_DeleteResourcePolicy.go +++ b/service/secretsmanager/api_op_DeleteResourcePolicy.go @@ -13,20 +13,19 @@ import ( // Deletes the resource-based permission policy attached to the secret. Minimum // permissions To run this command, you must have the following permissions: // -// * +// * // secretsmanager:DeleteResourcePolicy // // Related operations // -// * To attach a -// resource policy to a secret, use PutResourcePolicy. +// * To attach a resource +// policy to a secret, use PutResourcePolicy. // -// * To retrieve the -// current resource-based policy that's attached to a secret, use -// GetResourcePolicy. +// * To retrieve the current +// resource-based policy that's attached to a secret, use GetResourcePolicy. // -// * To list all of the currently available secrets, use -// ListSecrets. +// * To +// list all of the currently available secrets, use ListSecrets. func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) { if params == nil { params = &DeleteResourcePolicyInput{} diff --git a/service/secretsmanager/api_op_DeleteSecret.go b/service/secretsmanager/api_op_DeleteSecret.go index bc04ce391ed..26d39f564d0 100644 --- a/service/secretsmanager/api_op_DeleteSecret.go +++ b/service/secretsmanager/api_op_DeleteSecret.go @@ -22,30 +22,30 @@ import ( // scheduled for deletion. If you need to access that information, you must cancel // the deletion with RestoreSecret and then retrieve the information. // -// * There -// is no explicit operation to delete a version of a secret. Instead, remove all +// * There is +// no explicit operation to delete a version of a secret. Instead, remove all // staging labels from the VersionStage field of a version. That marks the version // as deprecated and allows Secrets Manager to delete it as needed. Versions that // do not have any staging labels do not show up in ListSecretVersionIds unless you // specify IncludeDeprecated. // -// * The permanent secret deletion at the end of -// the waiting period is performed as a background task with low priority. There is -// no guarantee of a specific time after the recovery window for the actual delete +// * The permanent secret deletion at the end of the +// waiting period is performed as a background task with low priority. There is no +// guarantee of a specific time after the recovery window for the actual delete // operation to occur. // // Minimum permissions To run this command, you must have the // following permissions: // -// * secretsmanager:DeleteSecret +// * secretsmanager:DeleteSecret // // Related operations // +// * To +// create a secret, use CreateSecret. // -// * To create a secret, use CreateSecret. -// -// * To cancel deletion of a version -// of a secret before the recovery window has expired, use RestoreSecret. +// * To cancel deletion of a version of a +// secret before the recovery window has expired, use RestoreSecret. func (c *Client) DeleteSecret(ctx context.Context, params *DeleteSecretInput, optFns ...func(*Options)) (*DeleteSecretOutput, error) { if params == nil { params = &DeleteSecretInput{} diff --git a/service/secretsmanager/api_op_DescribeSecret.go b/service/secretsmanager/api_op_DescribeSecret.go index 131e5f39717..6959ec6c7d3 100644 --- a/service/secretsmanager/api_op_DescribeSecret.go +++ b/service/secretsmanager/api_op_DescribeSecret.go @@ -17,21 +17,21 @@ import ( // Minimum permissions To run this command, you must have the following // permissions: // -// * secretsmanager:DescribeSecret +// * secretsmanager:DescribeSecret // // Related operations // -// * To -// create a secret, use CreateSecret. +// * To create a +// secret, use CreateSecret. // -// * To modify a secret, use -// UpdateSecret. +// * To modify a secret, use UpdateSecret. // -// * To retrieve the encrypted secret information in a version -// of the secret, use GetSecretValue. +// * To +// retrieve the encrypted secret information in a version of the secret, use +// GetSecretValue. // -// * To list all of the secrets in the AWS -// account, use ListSecrets. +// * To list all of the secrets in the AWS account, use +// ListSecrets. func (c *Client) DescribeSecret(ctx context.Context, params *DescribeSecretInput, optFns ...func(*Options)) (*DescribeSecretOutput, error) { if params == nil { params = &DescribeSecretInput{} diff --git a/service/secretsmanager/api_op_GetRandomPassword.go b/service/secretsmanager/api_op_GetRandomPassword.go index 7cb214ade29..fa54a8e7130 100644 --- a/service/secretsmanager/api_op_GetRandomPassword.go +++ b/service/secretsmanager/api_op_GetRandomPassword.go @@ -16,7 +16,7 @@ import ( // that the system you are generating a password for can support. Minimum // permissions To run this command, you must have the following permissions: // -// * +// * // secretsmanager:GetRandomPassword func (c *Client) GetRandomPassword(ctx context.Context, params *GetRandomPasswordInput, optFns ...func(*Options)) (*GetRandomPasswordOutput, error) { if params == nil { diff --git a/service/secretsmanager/api_op_GetResourcePolicy.go b/service/secretsmanager/api_op_GetResourcePolicy.go index 01f229b1f1a..6642a9b1525 100644 --- a/service/secretsmanager/api_op_GetResourcePolicy.go +++ b/service/secretsmanager/api_op_GetResourcePolicy.go @@ -16,18 +16,18 @@ import ( // your input as a single line JSON string. Minimum permissions To run this // command, you must have the following permissions: // -// * +// * // secretsmanager:GetResourcePolicy // // Related operations // -// * To attach a resource +// * To attach a resource // policy to a secret, use PutResourcePolicy. // -// * To delete the resource-based +// * To delete the resource-based // policy attached to a secret, use DeleteResourcePolicy. // -// * To list all of the +// * To list all of the // currently available secrets, use ListSecrets. func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { if params == nil { diff --git a/service/secretsmanager/api_op_GetSecretValue.go b/service/secretsmanager/api_op_GetSecretValue.go index 9f2543b8fe0..b0d04a93f70 100644 --- a/service/secretsmanager/api_op_GetSecretValue.go +++ b/service/secretsmanager/api_op_GetSecretValue.go @@ -15,20 +15,20 @@ import ( // the specified version of a secret, whichever contains content. Minimum // permissions To run this command, you must have the following permissions: // -// * +// * // secretsmanager:GetSecretValue // -// * kms:Decrypt - required only if you use a +// * kms:Decrypt - required only if you use a // customer-managed AWS KMS key to encrypt the secret. You do not need this // permission to use the account's default AWS managed CMK for Secrets // Manager. // // Related operations // -// * To create a new version of the secret with +// * To create a new version of the secret with // different encrypted information, use PutSecretValue. // -// * To retrieve the +// * To retrieve the // non-encrypted details for the secret, use DescribeSecret. func (c *Client) GetSecretValue(ctx context.Context, params *GetSecretValueInput, optFns ...func(*Options)) (*GetSecretValueOutput, error) { if params == nil { diff --git a/service/secretsmanager/api_op_ListSecretVersionIds.go b/service/secretsmanager/api_op_ListSecretVersionIds.go index 7828a68d884..9a1e897eeaa 100644 --- a/service/secretsmanager/api_op_ListSecretVersionIds.go +++ b/service/secretsmanager/api_op_ListSecretVersionIds.go @@ -21,13 +21,13 @@ import ( // next call to the same API to request the next part of the list. Minimum // permissions To run this command, you must have the following permissions: // -// * +// * // secretsmanager:ListSecretVersionIds // // Related operations // -// * To list the -// secrets in an account, use ListSecrets. +// * To list the secrets +// in an account, use ListSecrets. func (c *Client) ListSecretVersionIds(ctx context.Context, params *ListSecretVersionIdsInput, optFns ...func(*Options)) (*ListSecretVersionIdsOutput, error) { if params == nil { params = &ListSecretVersionIdsInput{} diff --git a/service/secretsmanager/api_op_ListSecrets.go b/service/secretsmanager/api_op_ListSecrets.go index bde5db47e4d..d7bf5e6943d 100644 --- a/service/secretsmanager/api_op_ListSecrets.go +++ b/service/secretsmanager/api_op_ListSecrets.go @@ -22,13 +22,13 @@ import ( // the next call to the same API to request the next part of the list. Minimum // permissions To run this command, you must have the following permissions: // -// * +// * // secretsmanager:ListSecrets // // Related operations // -// * To list the versions -// attached to a secret, use ListSecretVersionIds. +// * To list the versions attached +// to a secret, use ListSecretVersionIds. func (c *Client) ListSecrets(ctx context.Context, params *ListSecretsInput, optFns ...func(*Options)) (*ListSecretsOutput, error) { if params == nil { params = &ListSecretsInput{} diff --git a/service/secretsmanager/api_op_PutResourcePolicy.go b/service/secretsmanager/api_op_PutResourcePolicy.go index cc6dfd0df91..10da773fda1 100644 --- a/service/secretsmanager/api_op_PutResourcePolicy.go +++ b/service/secretsmanager/api_op_PutResourcePolicy.go @@ -24,19 +24,19 @@ import ( // the IAM User Guide. Minimum permissions To run this command, you must have the // following permissions: // -// * secretsmanager:PutResourcePolicy +// * secretsmanager:PutResourcePolicy // // Related // operations // -// * To retrieve the resource policy attached to a secret, use +// * To retrieve the resource policy attached to a secret, use // GetResourcePolicy. // -// * To delete the resource-based policy that's attached to -// a secret, use DeleteResourcePolicy. +// * To delete the resource-based policy that's attached to a +// secret, use DeleteResourcePolicy. // -// * To list all of the currently -// available secrets, use ListSecrets. +// * To list all of the currently available +// secrets, use ListSecrets. func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { if params == nil { params = &PutResourcePolicyInput{} diff --git a/service/secretsmanager/api_op_PutSecretValue.go b/service/secretsmanager/api_op_PutSecretValue.go index c37611807e1..9370cba74a3 100644 --- a/service/secretsmanager/api_op_PutSecretValue.go +++ b/service/secretsmanager/api_op_PutSecretValue.go @@ -19,71 +19,71 @@ import ( // a secret with the SecretBinary field you must use the AWS CLI or one of the AWS // SDKs. // -// * If this operation creates the first version for the secret then -// Secrets Manager automatically attaches the staging label AWSCURRENT to the new +// * If this operation creates the first version for the secret then Secrets +// Manager automatically attaches the staging label AWSCURRENT to the new // version. // -// * If another version of this secret already exists, then this +// * If another version of this secret already exists, then this // operation does not automatically move any staging labels other than those that // you explicitly specify in the VersionStages parameter. // -// * If this operation +// * If this operation // moves the staging label AWSCURRENT from another version to this version (because // you included it in the StagingLabels parameter) then Secrets Manager also // automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT // was removed from. // -// * This operation is idempotent. If a version with a -// VersionId with the same value as the ClientRequestToken parameter already exists -// and you specify the same secret data, the operation succeeds but does nothing. -// However, if the secret data is different, then the operation fails because you -// cannot modify an existing version; you can only create new ones. +// * This operation is idempotent. If a version with a VersionId +// with the same value as the ClientRequestToken parameter already exists and you +// specify the same secret data, the operation succeeds but does nothing. However, +// if the secret data is different, then the operation fails because you cannot +// modify an existing version; you can only create new ones. // -// * If you -// call an operation to encrypt or decrypt the SecretString or SecretBinary for a -// secret in the same account as the calling user and that secret doesn't specify a -// AWS KMS encryption key, Secrets Manager uses the account's default AWS managed -// customer master key (CMK) with the alias aws/secretsmanager. If this key doesn't -// already exist in your account then Secrets Manager creates it for you -// automatically. All users and roles in the same AWS account automatically have -// access to use the default CMK. Note that if an Secrets Manager API call results -// in AWS creating the account's AWS-managed CMK, it can result in a one-time -// significant delay in returning the result. +// * If you call an +// operation to encrypt or decrypt the SecretString or SecretBinary for a secret in +// the same account as the calling user and that secret doesn't specify a AWS KMS +// encryption key, Secrets Manager uses the account's default AWS managed customer +// master key (CMK) with the alias aws/secretsmanager. If this key doesn't already +// exist in your account then Secrets Manager creates it for you automatically. All +// users and roles in the same AWS account automatically have access to use the +// default CMK. Note that if an Secrets Manager API call results in AWS creating +// the account's AWS-managed CMK, it can result in a one-time significant delay in +// returning the result. // -// * If the secret resides in a -// different AWS account from the credentials calling an API that requires -// encryption or decryption of the secret value then you must create and use a -// custom AWS KMS CMK because you can't access the default CMK for the account -// using credentials from a different AWS account. Store the ARN of the CMK in the -// secret when you create the secret or when you update it by including it in the -// KMSKeyId. If you call an API that must encrypt or decrypt SecretString or -// SecretBinary using credentials from a different account then the AWS KMS key -// policy must grant cross-account access to that other account's user or role for -// both the kms:GenerateDataKey and kms:Decrypt operations. +// * If the secret resides in a different AWS account from +// the credentials calling an API that requires encryption or decryption of the +// secret value then you must create and use a custom AWS KMS CMK because you can't +// access the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret or +// when you update it by including it in the KMSKeyId. If you call an API that must +// encrypt or decrypt SecretString or SecretBinary using credentials from a +// different account then the AWS KMS key policy must grant cross-account access to +// that other account's user or role for both the kms:GenerateDataKey and +// kms:Decrypt operations. // -// Minimum permissions To -// run this command, you must have the following permissions: +// Minimum permissions To run this command, you must have +// the following permissions: // -// * -// secretsmanager:PutSecretValue +// * secretsmanager:PutSecretValue // -// * kms:GenerateDataKey - needed only if you -// use a customer-managed AWS KMS key to encrypt the secret. You do not need this -// permission to use the account's default AWS managed CMK for Secrets -// Manager. +// * +// kms:GenerateDataKey - needed only if you use a customer-managed AWS KMS key to +// encrypt the secret. You do not need this permission to use the account's default +// AWS managed CMK for Secrets Manager. // // Related operations // -// * To retrieve the encrypted value you store in -// the version of a secret, use GetSecretValue. +// * To retrieve the +// encrypted value you store in the version of a secret, use GetSecretValue. // -// * To create a secret, use -// CreateSecret. +// * To +// create a secret, use CreateSecret. // -// * To get the details for a secret, use DescribeSecret. +// * To get the details for a secret, use +// DescribeSecret. // -// * -// To list the versions attached to a secret, use ListSecretVersionIds. +// * To list the versions attached to a secret, use +// ListSecretVersionIds. func (c *Client) PutSecretValue(ctx context.Context, params *PutSecretValueInput, optFns ...func(*Options)) (*PutSecretValueOutput, error) { if params == nil { params = &PutSecretValueInput{} @@ -134,22 +134,22 @@ type PutSecretValueInput struct { // generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) // value to ensure uniqueness within the specified secret. // - // * If the + // * If the // ClientRequestToken value isn't already associated with a version of the secret // then a new version of the secret is created. // - // * If a version with this value + // * If a version with this value // already exists and that version's SecretString or SecretBinary values are the // same as those in the request then the request is ignored (the operation is // idempotent). // - // * If a version with this value already exists and the version - // of the SecretString and SecretBinary values are different from those in the - // request then the request fails because you cannot modify an existing secret - // version. You can only create new versions to store new secret values. + // * If a version with this value already exists and the version of + // the SecretString and SecretBinary values are different from those in the request + // then the request fails because you cannot modify an existing secret version. You + // can only create new versions to store new secret values. // - // This - // value becomes the VersionId of the new version. + // This value becomes the + // VersionId of the new version. ClientRequestToken *string // (Optional) Specifies binary data that you want to encrypt and store in the new diff --git a/service/secretsmanager/api_op_RestoreSecret.go b/service/secretsmanager/api_op_RestoreSecret.go index c3c586d3563..bdaf29c4a51 100644 --- a/service/secretsmanager/api_op_RestoreSecret.go +++ b/service/secretsmanager/api_op_RestoreSecret.go @@ -14,12 +14,12 @@ import ( // stamp. This makes the secret accessible to query once again. Minimum permissions // To run this command, you must have the following permissions: // -// * +// * // secretsmanager:RestoreSecret // // Related operations // -// * To delete a secret, use +// * To delete a secret, use // DeleteSecret. func (c *Client) RestoreSecret(ctx context.Context, params *RestoreSecretInput, optFns ...func(*Options)) (*RestoreSecretOutput, error) { if params == nil { diff --git a/service/secretsmanager/api_op_RotateSecret.go b/service/secretsmanager/api_op_RotateSecret.go index 3dcc086a7f9..a27dacfd208 100644 --- a/service/secretsmanager/api_op_RotateSecret.go +++ b/service/secretsmanager/api_op_RotateSecret.go @@ -36,37 +36,36 @@ import ( // rotation function must end with the versions of the secret in one of two // states: // -// * The AWSPENDING and AWSCURRENT staging labels are attached to the -// same version of the secret, or +// * The AWSPENDING and AWSCURRENT staging labels are attached to the same +// version of the secret, or // -// * The AWSPENDING staging label is not -// attached to any version of the secret. +// * The AWSPENDING staging label is not attached to any +// version of the secret. // -// If the AWSPENDING staging label is -// present but not attached to the same version as AWSCURRENT then any later -// invocation of RotateSecret assumes that a previous rotation request is still in -// progress and returns an error. Minimum permissions To run this command, you must -// have the following permissions: +// If the AWSPENDING staging label is present but not +// attached to the same version as AWSCURRENT then any later invocation of +// RotateSecret assumes that a previous rotation request is still in progress and +// returns an error. Minimum permissions To run this command, you must have the +// following permissions: // -// * secretsmanager:RotateSecret +// * secretsmanager:RotateSecret // -// * -// lambda:InvokeFunction (on the function specified in the secret's -// metadata) +// * lambda:InvokeFunction +// (on the function specified in the secret's metadata) // // Related operations // -// * To list the secrets in your account, use -// ListSecrets. +// * To +// list the secrets in your account, use ListSecrets. // -// * To get the details for a version of a secret, use -// DescribeSecret. +// * To get the details for a +// version of a secret, use DescribeSecret. // -// * To create a new version of a secret, use CreateSecret. +// * To create a new version of a secret, +// use CreateSecret. // -// -// * To attach staging labels to or remove staging labels from a version of a -// secret, use UpdateSecretVersionStage. +// * To attach staging labels to or remove staging labels from a +// version of a secret, use UpdateSecretVersionStage. func (c *Client) RotateSecret(ctx context.Context, params *RotateSecretInput, optFns ...func(*Options)) (*RotateSecretOutput, error) { if params == nil { params = &RotateSecretInput{} diff --git a/service/secretsmanager/api_op_TagResource.go b/service/secretsmanager/api_op_TagResource.go index e5010214f46..ca02d06df8a 100644 --- a/service/secretsmanager/api_op_TagResource.go +++ b/service/secretsmanager/api_op_TagResource.go @@ -17,44 +17,44 @@ import ( // tags to the existing list of tags. To remove tags, you must use UntagResource. // The following basic restrictions apply to tags: // -// * Maximum number of tags -// per secret—50 +// * Maximum number of tags per +// secret—50 // -// * Maximum key length—127 Unicode characters in UTF-8 +// * Maximum key length—127 Unicode characters in UTF-8 // -// * -// Maximum value length—255 Unicode characters in UTF-8 +// * Maximum value +// length—255 Unicode characters in UTF-8 // -// * Tag keys and values -// are case sensitive. +// * Tag keys and values are case +// sensitive. // -// * Do not use the aws: prefix in your tag names or -// values because AWS reserves it for AWS use. You can't edit or delete tag names -// or values with this prefix. Tags with this prefix do not count against your tags -// per secret limit. +// * Do not use the aws: prefix in your tag names or values because AWS +// reserves it for AWS use. You can't edit or delete tag names or values with this +// prefix. Tags with this prefix do not count against your tags per secret +// limit. // -// * If you use your tagging schema across multiple services -// and resources, remember other services might have restrictions on allowed -// characters. Generally allowed characters: letters, spaces, and numbers -// representable in UTF-8, plus the following special characters: + - = . _ : / -// @. +// * If you use your tagging schema across multiple services and resources, +// remember other services might have restrictions on allowed characters. Generally +// allowed characters: letters, spaces, and numbers representable in UTF-8, plus +// the following special characters: + - = . _ : / @. // -// If you use tags as part of your security strategy, then adding or removing a -// tag can change permissions. If successfully completing this operation would -// result in you losing your permissions for this secret, then the operation is -// blocked and returns an Access Denied error. Minimum permissions To run this -// command, you must have the following permissions: +// If you use tags as part of +// your security strategy, then adding or removing a tag can change permissions. If +// successfully completing this operation would result in you losing your +// permissions for this secret, then the operation is blocked and returns an Access +// Denied error. Minimum permissions To run this command, you must have the +// following permissions: // -// * -// secretsmanager:TagResource +// * secretsmanager:TagResource // // Related operations // -// * To remove one or more tags -// from the collection attached to a secret, use UntagResource. +// * To +// remove one or more tags from the collection attached to a secret, use +// UntagResource. // -// * To view the -// list of tags attached to a secret, use DescribeSecret. +// * To view the list of tags attached to a secret, use +// DescribeSecret. func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} diff --git a/service/secretsmanager/api_op_UntagResource.go b/service/secretsmanager/api_op_UntagResource.go index 5141e683d09..93c2dc1ab74 100644 --- a/service/secretsmanager/api_op_UntagResource.go +++ b/service/secretsmanager/api_op_UntagResource.go @@ -19,15 +19,15 @@ import ( // Minimum permissions To run this command, you must have the following // permissions: // -// * secretsmanager:UntagResource +// * secretsmanager:UntagResource // // Related operations // -// * To -// add one or more tags to the collection attached to a secret, use TagResource. +// * To add one +// or more tags to the collection attached to a secret, use TagResource. // -// -// * To view the list of tags attached to a secret, use DescribeSecret. +// * To view +// the list of tags attached to a secret, use DescribeSecret. func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} diff --git a/service/secretsmanager/api_op_UpdateSecret.go b/service/secretsmanager/api_op_UpdateSecret.go index d102782e6cb..aec6e171a6f 100644 --- a/service/secretsmanager/api_op_UpdateSecret.go +++ b/service/secretsmanager/api_op_UpdateSecret.go @@ -19,16 +19,16 @@ import ( // text string. To encrypt and store binary data as part of the version of a // secret, you must use either the AWS CLI or one of the AWS SDKs. // -// * If a -// version with a VersionId with the same value as the ClientRequestToken parameter -// already exists, the operation results in an error. You cannot modify an existing +// * If a version +// with a VersionId with the same value as the ClientRequestToken parameter already +// exists, the operation results in an error. You cannot modify an existing // version, you can only create a new version. // -// * If you include SecretString -// or SecretBinary to create a new secret version, Secrets Manager automatically +// * If you include SecretString or +// SecretBinary to create a new secret version, Secrets Manager automatically // attaches the staging label AWSCURRENT to the new version. // -// * If you call an +// * If you call an // operation to encrypt or decrypt the SecretString or SecretBinary for a secret in // the same account as the calling user and that secret doesn't specify a AWS KMS // encryption key, Secrets Manager uses the account's default AWS managed customer @@ -39,13 +39,13 @@ import ( // the account's AWS-managed CMK, it can result in a one-time significant delay in // returning the result. // -// * If the secret resides in a different AWS account -// from the credentials calling an API that requires encryption or decryption of -// the secret value then you must create and use a custom AWS KMS CMK because you -// can't access the default CMK for the account using credentials from a different -// AWS account. Store the ARN of the CMK in the secret when you create the secret -// or when you update it by including it in the KMSKeyId. If you call an API that -// must encrypt or decrypt SecretString or SecretBinary using credentials from a +// * If the secret resides in a different AWS account from +// the credentials calling an API that requires encryption or decryption of the +// secret value then you must create and use a custom AWS KMS CMK because you can't +// access the default CMK for the account using credentials from a different AWS +// account. Store the ARN of the CMK in the secret when you create the secret or +// when you update it by including it in the KMSKeyId. If you call an API that must +// encrypt or decrypt SecretString or SecretBinary using credentials from a // different account then the AWS KMS key policy must grant cross-account access to // that other account's user or role for both the kms:GenerateDataKey and // kms:Decrypt operations. @@ -53,30 +53,29 @@ import ( // Minimum permissions To run this command, you must have // the following permissions: // -// * secretsmanager:UpdateSecret +// * secretsmanager:UpdateSecret // -// * -// kms:GenerateDataKey - needed only if you use a custom AWS KMS key to encrypt the -// secret. You do not need this permission to use the account's AWS managed CMK for -// Secrets Manager. +// * kms:GenerateDataKey +// - needed only if you use a custom AWS KMS key to encrypt the secret. You do not +// need this permission to use the account's AWS managed CMK for Secrets +// Manager. // -// * kms:Decrypt - needed only if you use a custom AWS KMS -// key to encrypt the secret. You do not need this permission to use the account's -// AWS managed CMK for Secrets Manager. +// * kms:Decrypt - needed only if you use a custom AWS KMS key to encrypt +// the secret. You do not need this permission to use the account's AWS managed CMK +// for Secrets Manager. // // Related operations // -// * To create a new -// secret, use CreateSecret. +// * To create a new secret, use +// CreateSecret. // -// * To add only a new version to an existing -// secret, use PutSecretValue. +// * To add only a new version to an existing secret, use +// PutSecretValue. // -// * To get the details for a secret, use -// DescribeSecret. +// * To get the details for a secret, use DescribeSecret. // -// * To list the versions contained in a secret, use -// ListSecretVersionIds. +// * To +// list the versions contained in a secret, use ListSecretVersionIds. func (c *Client) UpdateSecret(ctx context.Context, params *UpdateSecretInput, optFns ...func(*Options)) (*UpdateSecretOutput, error) { if params == nil { params = &UpdateSecretInput{} @@ -130,21 +129,21 @@ type UpdateSecretInput struct { // prevent the accidental creation of duplicate versions if there are failures and // retries during the Lambda rotation function's processing. // - // * If the + // * If the // ClientRequestToken value isn't already associated with a version of the secret // then a new version of the secret is created. // - // * If a version with this value + // * If a version with this value // already exists and that version's SecretString and SecretBinary values are the // same as those in the request then the request is ignored (the operation is // idempotent). // - // * If a version with this value already exists and that - // version's SecretString and SecretBinary values are different from the request - // then an error occurs because you cannot modify an existing secret value. + // * If a version with this value already exists and that version's + // SecretString and SecretBinary values are different from the request then an + // error occurs because you cannot modify an existing secret value. // - // This - // value becomes the VersionId of the new version. + // This value + // becomes the VersionId of the new version. ClientRequestToken *string // (Optional) Specifies an updated user-provided description of the secret. diff --git a/service/secretsmanager/api_op_UpdateSecretVersionStage.go b/service/secretsmanager/api_op_UpdateSecretVersionStage.go index 3df1d987af4..effb36e71e5 100644 --- a/service/secretsmanager/api_op_UpdateSecretVersionStage.go +++ b/service/secretsmanager/api_op_UpdateSecretVersionStage.go @@ -27,13 +27,13 @@ import ( // Secrets Manager. Minimum permissions To run this command, you must have the // following permissions: // -// * secretsmanager:UpdateSecretVersionStage +// * secretsmanager:UpdateSecretVersionStage // // Related // operations // -// * To get the list of staging labels that are currently -// associated with a version of a secret, use DescribeSecret and examine the +// * To get the list of staging labels that are currently associated +// with a version of a secret, use DescribeSecret and examine the // SecretVersionsToStages response value. func (c *Client) UpdateSecretVersionStage(ctx context.Context, params *UpdateSecretVersionStageInput, optFns ...func(*Options)) (*UpdateSecretVersionStageOutput, error) { if params == nil { diff --git a/service/secretsmanager/types/errors.go b/service/secretsmanager/types/errors.go index 33db50ebd23..f5d3c7088df 100644 --- a/service/secretsmanager/types/errors.go +++ b/service/secretsmanager/types/errors.go @@ -100,10 +100,10 @@ func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smit // You provided a parameter value that is not valid for the current state of the // resource. Possible causes: // -// * You tried to perform the operation on a secret +// * You tried to perform the operation on a secret // that's currently marked deleted. // -// * You tried to enable rotation on a secret +// * You tried to enable rotation on a secret // that doesn't already have a Lambda function ARN configured and you didn't // include such an ARN as a parameter in this call. type InvalidRequestException struct { diff --git a/service/securityhub/api_op_BatchImportFindings.go b/service/securityhub/api_op_BatchImportFindings.go index 3c9f59b03e7..5c0714551f4 100644 --- a/service/securityhub/api_op_BatchImportFindings.go +++ b/service/securityhub/api_op_BatchImportFindings.go @@ -19,25 +19,25 @@ import ( // fields and objects, which Security Hub customers use to manage their // investigation workflow. // -// * Confidence +// * Confidence // -// * Criticality +// * Criticality // -// * Note +// * Note // -// * +// * // RelatedFindings // -// * Severity +// * Severity // -// * Types +// * Types // -// * UserDefinedFields +// * UserDefinedFields // -// * +// * // VerificationState // -// * Workflow +// * Workflow func (c *Client) BatchImportFindings(ctx context.Context, params *BatchImportFindingsInput, optFns ...func(*Options)) (*BatchImportFindingsOutput, error) { if params == nil { params = &BatchImportFindingsInput{} diff --git a/service/securityhub/api_op_BatchUpdateFindings.go b/service/securityhub/api_op_BatchUpdateFindings.go index 5ba79526155..93e6f08edcd 100644 --- a/service/securityhub/api_op_BatchUpdateFindings.go +++ b/service/securityhub/api_op_BatchUpdateFindings.go @@ -18,31 +18,30 @@ import ( // affect the value of UpdatedAt for a finding. Master and member accounts can use // BatchUpdateFindings to update the following finding fields and objects. // -// * +// * // Confidence // -// * Criticality +// * Criticality // -// * Note +// * Note // -// * RelatedFindings +// * RelatedFindings // -// * -// Severity +// * Severity // -// * Types +// * Types // -// * UserDefinedFields +// * +// UserDefinedFields // -// * VerificationState +// * VerificationState // -// * -// Workflow +// * Workflow // -// You can configure IAM policies to restrict access to fields and field -// values. For example, you might not want member accounts to be able to suppress -// findings or change the finding severity. See Configuring access to -// BatchUpdateFindings +// You can configure IAM +// policies to restrict access to fields and field values. For example, you might +// not want member accounts to be able to suppress findings or change the finding +// severity. See Configuring access to BatchUpdateFindings // (https://docs.aws.amazon.com/securityhub/latest/userguide/finding-update-batchupdatefindings.html#batchupdatefindings-configure-access) // in the AWS Security Hub User Guide. func (c *Client) BatchUpdateFindings(ctx context.Context, params *BatchUpdateFindingsInput, optFns ...func(*Options)) (*BatchUpdateFindingsOutput, error) { @@ -94,17 +93,17 @@ type BatchUpdateFindingsInput struct { // One or more finding types in the format of namespace/category/classifier that // classify a finding. Valid namespace values are as follows. // - // * Software and + // * Software and // Configuration Checks // - // * TTPs + // * TTPs // - // * Effects + // * Effects // - // * Unusual Behaviors + // * Unusual Behaviors // - // * - // Sensitive Data Identifications + // * Sensitive Data + // Identifications Types []*string // A list of name/value string pairs associated with the finding. These are custom, @@ -114,15 +113,15 @@ type BatchUpdateFindingsInput struct { // Indicates the veracity of a finding. The available values for VerificationState // are as follows. // - // * UNKNOWN – The default disposition of a security finding + // * UNKNOWN – The default disposition of a security finding // + // * + // TRUE_POSITIVE – The security finding is confirmed // - // * TRUE_POSITIVE – The security finding is confirmed - // - // * FALSE_POSITIVE – The + // * FALSE_POSITIVE – The // security finding was determined to be a false alarm // - // * BENIGN_POSITIVE – A + // * BENIGN_POSITIVE – A // special case of TRUE_POSITIVE where the finding doesn't pose any threat, is // expected, or both VerificationState types.VerificationState diff --git a/service/securityhub/api_op_EnableSecurityHub.go b/service/securityhub/api_op_EnableSecurityHub.go index cffb675733f..cfbba7cc5fd 100644 --- a/service/securityhub/api_op_EnableSecurityHub.go +++ b/service/securityhub/api_op_EnableSecurityHub.go @@ -16,14 +16,14 @@ import ( // integrated with Security Hub. When you use the EnableSecurityHub operation to // enable Security Hub, you also automatically enable the following standards. // +// * +// CIS AWS Foundations // -// * CIS AWS Foundations +// * AWS Foundational Security Best Practices // -// * AWS Foundational Security Best Practices -// -// You do -// not enable the Payment Card Industry Data Security Standard (PCI DSS) standard. -// To not enable the automatically enabled standards, set EnableDefaultStandards to +// You do not +// enable the Payment Card Industry Data Security Standard (PCI DSS) standard. To +// not enable the automatically enabled standards, set EnableDefaultStandards to // false. After you enable Security Hub, to enable a standard, use the // BatchEnableStandards operation. To disable a standard, use the // BatchDisableStandards operation. To learn more, see Setting Up AWS Security Hub diff --git a/service/securityhub/doc.go b/service/securityhub/doc.go index bc947793a5c..6090c8982c0 100644 --- a/service/securityhub/doc.go +++ b/service/securityhub/doc.go @@ -23,19 +23,19 @@ // invitation was sent from. The following throttling limits apply to using // Security Hub API operations. // -// * BatchEnableStandards - RateLimit of 1 -// request per second, BurstLimit of 1 request per second. +// * BatchEnableStandards - RateLimit of 1 request +// per second, BurstLimit of 1 request per second. // -// * GetFindings - -// RateLimit of 3 requests per second. BurstLimit of 6 requests per second. +// * GetFindings - RateLimit of 3 +// requests per second. BurstLimit of 6 requests per second. // -// * -// UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 requests per -// second. +// * UpdateFindings - +// RateLimit of 1 request per second. BurstLimit of 5 requests per second. // -// * UpdateStandardsControl - RateLimit of 1 request per second, -// BurstLimit of 5 requests per second. +// * +// UpdateStandardsControl - RateLimit of 1 request per second, BurstLimit of 5 +// requests per second. // -// * All other operations - RateLimit of -// 10 requests per second. BurstLimit of 30 requests per second. +// * All other operations - RateLimit of 10 requests per +// second. BurstLimit of 30 requests per second. package securityhub diff --git a/service/securityhub/types/enums.go b/service/securityhub/types/enums.go index fad4700f24e..6be26218d74 100644 --- a/service/securityhub/types/enums.go +++ b/service/securityhub/types/enums.go @@ -24,10 +24,10 @@ type ComplianceStatus string // Enum values for ComplianceStatus const ( - ComplianceStatusPassed ComplianceStatus = "PASSED" - ComplianceStatusWarning ComplianceStatus = "WARNING" - ComplianceStatusFailed ComplianceStatus = "FAILED" - ComplianceStatusNot_available ComplianceStatus = "NOT_AVAILABLE" + ComplianceStatusPassed ComplianceStatus = "PASSED" + ComplianceStatusWarning ComplianceStatus = "WARNING" + ComplianceStatusFailed ComplianceStatus = "FAILED" + ComplianceStatusNotAvailable ComplianceStatus = "NOT_AVAILABLE" ) // Values returns all known values for ComplianceStatus. Note that this can be @@ -80,8 +80,8 @@ type IntegrationType string // Enum values for IntegrationType const ( - IntegrationTypeSend_findings_to_security_hub IntegrationType = "SEND_FINDINGS_TO_SECURITY_HUB" - IntegrationTypeReceive_findings_from_security_hub IntegrationType = "RECEIVE_FINDINGS_FROM_SECURITY_HUB" + IntegrationTypeSendFindingsToSecurityHub IntegrationType = "SEND_FINDINGS_TO_SECURITY_HUB" + IntegrationTypeReceiveFindingsFromSecurityHub IntegrationType = "RECEIVE_FINDINGS_FROM_SECURITY_HUB" ) // Values returns all known values for IntegrationType. Note that this can be @@ -98,9 +98,9 @@ type MalwareState string // Enum values for MalwareState const ( - MalwareStateObserved MalwareState = "OBSERVED" - MalwareStateRemoval_failed MalwareState = "REMOVAL_FAILED" - MalwareStateRemoved MalwareState = "REMOVED" + MalwareStateObserved MalwareState = "OBSERVED" + MalwareStateRemovalFailed MalwareState = "REMOVAL_FAILED" + MalwareStateRemoved MalwareState = "REMOVED" ) // Values returns all known values for MalwareState. Note that this can be expanded @@ -118,21 +118,21 @@ type MalwareType string // Enum values for MalwareType const ( - MalwareTypeAdware MalwareType = "ADWARE" - MalwareTypeBlended_threat MalwareType = "BLENDED_THREAT" - MalwareTypeBotnet_agent MalwareType = "BOTNET_AGENT" - MalwareTypeCoin_miner MalwareType = "COIN_MINER" - MalwareTypeExploit_kit MalwareType = "EXPLOIT_KIT" - MalwareTypeKeylogger MalwareType = "KEYLOGGER" - MalwareTypeMacro MalwareType = "MACRO" - MalwareTypePotentially_unwanted MalwareType = "POTENTIALLY_UNWANTED" - MalwareTypeSpyware MalwareType = "SPYWARE" - MalwareTypeRansomware MalwareType = "RANSOMWARE" - MalwareTypeRemote_access MalwareType = "REMOTE_ACCESS" - MalwareTypeRootkit MalwareType = "ROOTKIT" - MalwareTypeTrojan MalwareType = "TROJAN" - MalwareTypeVirus MalwareType = "VIRUS" - MalwareTypeWorm MalwareType = "WORM" + MalwareTypeAdware MalwareType = "ADWARE" + MalwareTypeBlendedThreat MalwareType = "BLENDED_THREAT" + MalwareTypeBotnetAgent MalwareType = "BOTNET_AGENT" + MalwareTypeCoinMiner MalwareType = "COIN_MINER" + MalwareTypeExploitKit MalwareType = "EXPLOIT_KIT" + MalwareTypeKeylogger MalwareType = "KEYLOGGER" + MalwareTypeMacro MalwareType = "MACRO" + MalwareTypePotentiallyUnwanted MalwareType = "POTENTIALLY_UNWANTED" + MalwareTypeSpyware MalwareType = "SPYWARE" + MalwareTypeRansomware MalwareType = "RANSOMWARE" + MalwareTypeRemoteAccess MalwareType = "REMOTE_ACCESS" + MalwareTypeRootkit MalwareType = "ROOTKIT" + MalwareTypeTrojan MalwareType = "TROJAN" + MalwareTypeVirus MalwareType = "VIRUS" + MalwareTypeWorm MalwareType = "WORM" ) // Values returns all known values for MalwareType. Note that this can be expanded @@ -162,8 +162,8 @@ type MapFilterComparison string // Enum values for MapFilterComparison const ( - MapFilterComparisonEquals MapFilterComparison = "EQUALS" - MapFilterComparisonNot_equals MapFilterComparison = "NOT_EQUALS" + MapFilterComparisonEquals MapFilterComparison = "EQUALS" + MapFilterComparisonNotEquals MapFilterComparison = "NOT_EQUALS" ) // Values returns all known values for MapFilterComparison. Note that this can be @@ -198,9 +198,9 @@ type Partition string // Enum values for Partition const ( - PartitionAws Partition = "aws" - PartitionAws_cn Partition = "aws-cn" - PartitionAws_us_gov Partition = "aws-us-gov" + PartitionAws Partition = "aws" + PartitionAwsCn Partition = "aws-cn" + PartitionAwsUsGov Partition = "aws-us-gov" ) // Values returns all known values for Partition. Note that this can be expanded in @@ -324,10 +324,10 @@ type StringFilterComparison string // Enum values for StringFilterComparison const ( - StringFilterComparisonEquals StringFilterComparison = "EQUALS" - StringFilterComparisonPrefix StringFilterComparison = "PREFIX" - StringFilterComparisonNot_equals StringFilterComparison = "NOT_EQUALS" - StringFilterComparisonPrefix_not_equals StringFilterComparison = "PREFIX_NOT_EQUALS" + StringFilterComparisonEquals StringFilterComparison = "EQUALS" + StringFilterComparisonPrefix StringFilterComparison = "PREFIX" + StringFilterComparisonNotEquals StringFilterComparison = "NOT_EQUALS" + StringFilterComparisonPrefixNotEquals StringFilterComparison = "PREFIX_NOT_EQUALS" ) // Values returns all known values for StringFilterComparison. Note that this can @@ -346,12 +346,12 @@ type ThreatIntelIndicatorCategory string // Enum values for ThreatIntelIndicatorCategory const ( - ThreatIntelIndicatorCategoryBackdoor ThreatIntelIndicatorCategory = "BACKDOOR" - ThreatIntelIndicatorCategoryCard_stealer ThreatIntelIndicatorCategory = "CARD_STEALER" - ThreatIntelIndicatorCategoryCommand_and_control ThreatIntelIndicatorCategory = "COMMAND_AND_CONTROL" - ThreatIntelIndicatorCategoryDrop_site ThreatIntelIndicatorCategory = "DROP_SITE" - ThreatIntelIndicatorCategoryExploit_site ThreatIntelIndicatorCategory = "EXPLOIT_SITE" - ThreatIntelIndicatorCategoryKeylogger ThreatIntelIndicatorCategory = "KEYLOGGER" + ThreatIntelIndicatorCategoryBackdoor ThreatIntelIndicatorCategory = "BACKDOOR" + ThreatIntelIndicatorCategoryCardStealer ThreatIntelIndicatorCategory = "CARD_STEALER" + ThreatIntelIndicatorCategoryCommandAndControl ThreatIntelIndicatorCategory = "COMMAND_AND_CONTROL" + ThreatIntelIndicatorCategoryDropSite ThreatIntelIndicatorCategory = "DROP_SITE" + ThreatIntelIndicatorCategoryExploitSite ThreatIntelIndicatorCategory = "EXPLOIT_SITE" + ThreatIntelIndicatorCategoryKeylogger ThreatIntelIndicatorCategory = "KEYLOGGER" ) // Values returns all known values for ThreatIntelIndicatorCategory. Note that this @@ -372,17 +372,17 @@ type ThreatIntelIndicatorType string // Enum values for ThreatIntelIndicatorType const ( - ThreatIntelIndicatorTypeDomain ThreatIntelIndicatorType = "DOMAIN" - ThreatIntelIndicatorTypeEmail_address ThreatIntelIndicatorType = "EMAIL_ADDRESS" - ThreatIntelIndicatorTypeHash_md5 ThreatIntelIndicatorType = "HASH_MD5" - ThreatIntelIndicatorTypeHash_sha1 ThreatIntelIndicatorType = "HASH_SHA1" - ThreatIntelIndicatorTypeHash_sha256 ThreatIntelIndicatorType = "HASH_SHA256" - ThreatIntelIndicatorTypeHash_sha512 ThreatIntelIndicatorType = "HASH_SHA512" - ThreatIntelIndicatorTypeIpv4_address ThreatIntelIndicatorType = "IPV4_ADDRESS" - ThreatIntelIndicatorTypeIpv6_address ThreatIntelIndicatorType = "IPV6_ADDRESS" - ThreatIntelIndicatorTypeMutex ThreatIntelIndicatorType = "MUTEX" - ThreatIntelIndicatorTypeProcess ThreatIntelIndicatorType = "PROCESS" - ThreatIntelIndicatorTypeUrl ThreatIntelIndicatorType = "URL" + ThreatIntelIndicatorTypeDomain ThreatIntelIndicatorType = "DOMAIN" + ThreatIntelIndicatorTypeEmailAddress ThreatIntelIndicatorType = "EMAIL_ADDRESS" + ThreatIntelIndicatorTypeHashMd5 ThreatIntelIndicatorType = "HASH_MD5" + ThreatIntelIndicatorTypeHashSha1 ThreatIntelIndicatorType = "HASH_SHA1" + ThreatIntelIndicatorTypeHashSha256 ThreatIntelIndicatorType = "HASH_SHA256" + ThreatIntelIndicatorTypeHashSha512 ThreatIntelIndicatorType = "HASH_SHA512" + ThreatIntelIndicatorTypeIpv4Address ThreatIntelIndicatorType = "IPV4_ADDRESS" + ThreatIntelIndicatorTypeIpv6Address ThreatIntelIndicatorType = "IPV6_ADDRESS" + ThreatIntelIndicatorTypeMutex ThreatIntelIndicatorType = "MUTEX" + ThreatIntelIndicatorTypeProcess ThreatIntelIndicatorType = "PROCESS" + ThreatIntelIndicatorTypeUrl ThreatIntelIndicatorType = "URL" ) // Values returns all known values for ThreatIntelIndicatorType. Note that this can @@ -408,10 +408,10 @@ type VerificationState string // Enum values for VerificationState const ( - VerificationStateUnknown VerificationState = "UNKNOWN" - VerificationStateTrue_positive VerificationState = "TRUE_POSITIVE" - VerificationStateFalse_positive VerificationState = "FALSE_POSITIVE" - VerificationStateBenign_positive VerificationState = "BENIGN_POSITIVE" + VerificationStateUnknown VerificationState = "UNKNOWN" + VerificationStateTruePositive VerificationState = "TRUE_POSITIVE" + VerificationStateFalsePositive VerificationState = "FALSE_POSITIVE" + VerificationStateBenignPositive VerificationState = "BENIGN_POSITIVE" ) // Values returns all known values for VerificationState. Note that this can be @@ -430,11 +430,11 @@ type WorkflowState string // Enum values for WorkflowState const ( - WorkflowStateNew WorkflowState = "NEW" - WorkflowStateAssigned WorkflowState = "ASSIGNED" - WorkflowStateIn_progress WorkflowState = "IN_PROGRESS" - WorkflowStateDeferred WorkflowState = "DEFERRED" - WorkflowStateResolved WorkflowState = "RESOLVED" + WorkflowStateNew WorkflowState = "NEW" + WorkflowStateAssigned WorkflowState = "ASSIGNED" + WorkflowStateInProgress WorkflowState = "IN_PROGRESS" + WorkflowStateDeferred WorkflowState = "DEFERRED" + WorkflowStateResolved WorkflowState = "RESOLVED" ) // Values returns all known values for WorkflowState. Note that this can be diff --git a/service/securityhub/types/types.go b/service/securityhub/types/types.go index 7393cde25c9..8f9ad7cdc7e 100644 --- a/service/securityhub/types/types.go +++ b/service/securityhub/types/types.go @@ -231,12 +231,12 @@ type AwsApiGatewayStageDetails struct { // alphanumeric and underscore characters. Variable values can contain the // following characters: // - // * Uppercase and lowercase letters + // * Uppercase and lowercase letters // - // * Numbers + // * Numbers // - // - // * Special characters -._~:/?#&=, + // * Special + // characters -._~:/?#&=, Variables map[string]*string // The ARN of the web ACL associated with the stage. @@ -357,12 +357,12 @@ type AwsApiGatewayV2StageDetails struct { // alphanumeric and underscore characters. Variable values can contain the // following characters: // - // * Uppercase and lowercase letters - // - // * Numbers + // * Uppercase and lowercase letters // + // * Numbers // - // * Special characters -._~:/?#&=, + // * Special + // characters -._~:/?#&=, StageVariables map[string]*string } @@ -504,11 +504,11 @@ type AwsCertificateManagerCertificateDetails struct { // Contains information about one of the following: // -// * The initial validation -// of each domain name that occurs as a result of the RequestCertificate request -// +// * The initial validation of +// each domain name that occurs as a result of the RequestCertificate request // -// * The validation of each domain name in the certificate, as it pertains to AWS +// * +// The validation of each domain name in the certificate, as it pertains to AWS // Certificate Manager managed renewal type AwsCertificateManagerCertificateDomainValidationOption struct { @@ -608,15 +608,15 @@ type AwsCloudFrontDistributionCacheBehavior struct { // The protocol that viewers can use to access the files in an origin. You can // specify the following options: // - // * allow-all - Viewers can use HTTP or - // HTTPS. + // * allow-all - Viewers can use HTTP or HTTPS. // - // * redirect-to-https - CloudFront responds to HTTP requests with an - // HTTP status code of 301 (Moved Permanently) and the HTTPS URL. The viewer then - // uses the new URL to resubmit. + // * + // redirect-to-https - CloudFront responds to HTTP requests with an HTTP status + // code of 301 (Moved Permanently) and the HTTPS URL. The viewer then uses the new + // URL to resubmit. // - // * https-only - CloudFront responds to HTTP - // request with an HTTP status code of 403 (Forbidden). + // * https-only - CloudFront responds to HTTP request with an + // HTTP status code of 403 (Forbidden). ViewerProtocolPolicy *string } @@ -633,15 +633,15 @@ type AwsCloudFrontDistributionDefaultCacheBehavior struct { // The protocol that viewers can use to access the files in an origin. You can // specify the following options: // - // * allow-all - Viewers can use HTTP or - // HTTPS. + // * allow-all - Viewers can use HTTP or HTTPS. // - // * redirect-to-https - CloudFront responds to HTTP requests with an - // HTTP status code of 301 (Moved Permanently) and the HTTPS URL. The viewer then - // uses the new URL to resubmit. + // * + // redirect-to-https - CloudFront responds to HTTP requests with an HTTP status + // code of 301 (Moved Permanently) and the HTTPS URL. The viewer then uses the new + // URL to resubmit. // - // * https-only - CloudFront responds to HTTP - // request with an HTTP status code of 403 (Forbidden). + // * https-only - CloudFront responds to HTTP request with an + // HTTP status code of 403 (Forbidden). ViewerProtocolPolicy *string } @@ -866,16 +866,16 @@ type AwsCodeBuildProjectEnvironment struct { // The type of credentials AWS CodeBuild uses to pull images in your build. Valid // values: // - // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. - // This requires that you modify your ECR repository policy to trust the AWS - // CodeBuild service principal. + // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This + // requires that you modify your ECR repository policy to trust the AWS CodeBuild + // service principal. // - // * SERVICE_ROLE specifies that AWS CodeBuild - // uses your build project's service role. + // * SERVICE_ROLE specifies that AWS CodeBuild uses your build + // project's service role. // - // When you use a cross-account or private - // registry image, you must use SERVICE_ROLE credentials. When you use an AWS - // CodeBuild curated image, you must use CODEBUILD credentials. + // When you use a cross-account or private registry image, + // you must use SERVICE_ROLE credentials. When you use an AWS CodeBuild curated + // image, you must use CODEBUILD credentials. ImagePullCredentialsType *string // The credentials for access to a private registry. @@ -924,57 +924,57 @@ type AwsCodeBuildProjectSource struct { // Information about the location of the source code to be built. Valid values // include: // - // * For source code settings that are specified in the source action - // of a pipeline in AWS CodePipeline, location should not be specified. If it is + // * For source code settings that are specified in the source action of + // a pipeline in AWS CodePipeline, location should not be specified. If it is // specified, AWS CodePipeline ignores it. This is because AWS CodePipeline uses // the settings in a pipeline's source action instead of this value. // - // * For - // source code in an AWS CodeCommit repository, the HTTPS clone URL to the - // repository that contains the source code and the build spec file (for example, + // * For source + // code in an AWS CodeCommit repository, the HTTPS clone URL to the repository that + // contains the source code and the build spec file (for example, // https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name ). // - // * For + // * For // source code in an S3 input bucket, one of the following. // - // * The path to - // the ZIP file that contains the source code (for example, + // * The path to the ZIP + // file that contains the source code (for example, // bucket-name/path/to/object-name.zip). // - // * The path to the folder that - // contains the source code (for example, - // bucket-name/path/to/source-code/folder/). + // * The path to the folder that contains + // the source code (for example, bucket-name/path/to/source-code/folder/). // - // * For source code in a GitHub + // * For + // source code in a GitHub repository, the HTTPS clone URL to the repository that + // contains the source and the build spec file. + // + // * For source code in a Bitbucket // repository, the HTTPS clone URL to the repository that contains the source and // the build spec file. - // - // * For source code in a Bitbucket repository, the HTTPS - // clone URL to the repository that contains the source and the build spec file. Location *string // The type of repository that contains the source code to be built. Valid values // are: // - // * BITBUCKET - The source code is in a Bitbucket repository. - // - // * - // CODECOMMIT - The source code is in an AWS CodeCommit repository. + // * BITBUCKET - The source code is in a Bitbucket repository. // - // * - // CODEPIPELINE - The source code settings are specified in the source action of a - // pipeline in AWS CodePipeline. + // * CODECOMMIT + // - The source code is in an AWS CodeCommit repository. // - // * GITHUB - The source code is in a GitHub - // repository. + // * CODEPIPELINE - The + // source code settings are specified in the source action of a pipeline in AWS + // CodePipeline. // - // * GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise - // repository. + // * GITHUB - The source code is in a GitHub repository. // - // * NO_SOURCE - The project does not have input source code. + // * + // GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise repository. // + // * + // NO_SOURCE - The project does not have input source code. // - // * S3 - The source code is in an S3 input bucket. + // * S3 - The source code + // is in an S3 input bucket. Type *string } @@ -1639,10 +1639,10 @@ type AwsElasticsearchDomainDomainEndpointOptions struct { // The TLS security policy to apply to the HTTPS endpoint of the Elasticsearch // domain. Valid values: // - // * Policy-Min-TLS-1-0-2019-07, which supports TLSv1.0 - // and higher + // * Policy-Min-TLS-1-0-2019-07, which supports TLSv1.0 and + // higher // - // * Policy-Min-TLS-1-2-2019-07, which only supports TLSv1.2 + // * Policy-Min-TLS-1-2-2019-07, which only supports TLSv1.2 TLSSecurityPolicy *string } @@ -2760,15 +2760,15 @@ type AwsRdsDbInstanceAssociatedRole struct { // Describes the state of the association between the IAM role and the DB instance. // The Status property returns one of the following values: // - // * ACTIVE - The IAM + // * ACTIVE - The IAM // role ARN is associated with the DB instance and can be used to access other AWS // services on your behalf. // - // * PENDING - The IAM role ARN is being associated - // with the DB instance. + // * PENDING - The IAM role ARN is being associated with + // the DB instance. // - // * INVALID - The IAM role ARN is associated with the - // DB instance. But the DB instance is unable to assume the IAM role in order to + // * INVALID - The IAM role ARN is associated with the DB + // instance. But the DB instance is unable to assume the IAM role in order to // access other AWS services on your behalf. Status *string } @@ -2869,13 +2869,13 @@ type AwsRdsDbInstanceDetails struct { // accounts is enabled, and otherwise false. IAM database authentication can be // enabled for the following database engines. // - // * For MySQL 5.6, minor version + // * For MySQL 5.6, minor version // 5.6.34 or higher // - // * For MySQL 5.7, minor version 5.7.16 or higher + // * For MySQL 5.7, minor version 5.7.16 or higher // - // * - // Aurora 5.6 or higher + // * Aurora 5.6 + // or higher IAMDatabaseAuthenticationEnabled *bool // Indicates when the DB instance was created. Uses the date-time format specified @@ -3360,20 +3360,19 @@ type AwsRedshiftClusterDetails struct { // The availability status of the cluster for queries. Possible values are the // following: // - // * Available - The cluster is available for queries. + // * Available - The cluster is available for queries. // - // * - // Unavailable - The cluster is not available for queries. + // * Unavailable - + // The cluster is not available for queries. // - // * Maintenance - The - // cluster is intermittently available for queries due to maintenance activities. + // * Maintenance - The cluster is + // intermittently available for queries due to maintenance activities. // + // * Modifying + // -The cluster is intermittently available for queries due to changes that modify + // the cluster. // - // * Modifying -The cluster is intermittently available for queries due to changes - // that modify the cluster. - // - // * Failed - The cluster failed and is not available - // for queries. + // * Failed - The cluster failed and is not available for queries. ClusterAvailabilityStatus *string // Indicates when the cluster was created. Uses the date-time format specified in @@ -4232,18 +4231,18 @@ type AwsSecurityFindingFilters struct { // The status of the investigation into a finding. Allowed values are the // following. // - // * NEW - The initial state of a finding, before it is reviewed. - // + // * NEW - The initial state of a finding, before it is reviewed. // - // * NOTIFIED - Indicates that the resource owner has been notified about the + // * + // NOTIFIED - Indicates that the resource owner has been notified about the // security issue. Used when the initial reviewer is not the resource owner, and // needs intervention from the resource owner. // - // * SUPPRESSED - The finding will - // not be reviewed again and will not be acted upon. + // * SUPPRESSED - The finding will not + // be reviewed again and will not be acted upon. // - // * RESOLVED - The finding - // was reviewed and remediated and is now considered resolved. + // * RESOLVED - The finding was + // reviewed and remediated and is now considered resolved. WorkflowStatus []*StringFilter } @@ -4408,21 +4407,20 @@ type Compliance struct { // The result of a standards check. The valid values for Status are as follows. // + // * + // PASSED - Standards check passed for all evaluated resources. // - // * PASSED - Standards check passed for all evaluated resources. - // - // * - // WARNING - Some information is missing or this check is not supported for your - // configuration. + // * WARNING - Some + // information is missing or this check is not supported for your configuration. // - // * FAILED - Standards check failed for at least one - // evaluated resource. + // * + // FAILED - Standards check failed for at least one evaluated resource. // - // * NOT_AVAILABLE - Check could not be performed due - // to a service outage, API error, or because the result of the AWS Config - // evaluation was NOT_APPLICABLE. If the AWS Config evaluation result was - // NOT_APPLICABLE, then after 3 days, Security Hub automatically archives the - // finding. + // * + // NOT_AVAILABLE - Check could not be performed due to a service outage, API error, + // or because the result of the AWS Config evaluation was NOT_APPLICABLE. If the + // AWS Config evaluation result was NOT_APPLICABLE, then after 3 days, Security Hub + // automatically archives the finding. Status ComplianceStatus // For findings generated from controls, a list of reasons behind the value of @@ -4693,25 +4691,25 @@ type Member struct { // The status of the relationship between the member account and its master // account. The status can have one of the following values: // - // * CREATED - - // Indicates that the master account added the member account, but has not yet - // invited the member account. + // * CREATED - Indicates + // that the master account added the member account, but has not yet invited the + // member account. // - // * INVITED - Indicates that the master account - // invited the member account. The member account has not yet responded to the - // invitation. + // * INVITED - Indicates that the master account invited the + // member account. The member account has not yet responded to the invitation. // - // * ASSOCIATED - Indicates that the member account accepted the - // invitation. + // * + // ASSOCIATED - Indicates that the member account accepted the invitation. // - // * REMOVED - Indicates that the master account disassociated the - // member account. + // * + // REMOVED - Indicates that the master account disassociated the member account. // - // * RESIGNED - Indicates that the member account - // disassociated themselves from the master account. + // * + // RESIGNED - Indicates that the member account disassociated themselves from the + // master account. // - // * DELETED - Indicates - // that the master account deleted the member account. + // * DELETED - Indicates that the master account deleted the + // member account. MemberStatus *string // The timestamp for the date and time when the member account was updated. @@ -4966,10 +4964,10 @@ type Product struct { // The types of integration that the product supports. Available values are the // following. // - // * SEND_FINDINGS_TO_SECURITY_HUB - Indicates that the integration + // * SEND_FINDINGS_TO_SECURITY_HUB - Indicates that the integration // sends findings to Security Hub. // - // * RECEIVE_FINDINGS_FROM_SECURITY_HUB - + // * RECEIVE_FINDINGS_FROM_SECURITY_HUB - // Indicates that the integration receives findings from Security Hub. IntegrationTypes []IntegrationType @@ -5179,14 +5177,14 @@ type ResourceDetails struct { // Details about a resource that are not available in a type-specific details // object. Use the Other object in the following cases. // - // * The type-specific - // object does not contain all of the fields that you want to populate. In this - // case, first use the type-specific object to populate those fields. Use the Other + // * The type-specific object + // does not contain all of the fields that you want to populate. In this case, + // first use the type-specific object to populate those fields. Use the Other // object to populate the fields that are missing from the type-specific object. // - // - // * The resource type does not have a corresponding object. This includes - // resources for which the type is Other. + // * + // The resource type does not have a corresponding object. This includes resources + // for which the type is Other. Other map[string]*string } @@ -5211,34 +5209,34 @@ type Severity struct { // The severity value of the finding. The allowed values are the following. // - // * + // * // INFORMATIONAL - No issue was found. // - // * LOW - The issue does not require - // action on its own. - // - // * MEDIUM - The issue must be addressed but not - // urgently. + // * LOW - The issue does not require action + // on its own. // - // * HIGH - The issue must be addressed as a priority. + // * MEDIUM - The issue must be addressed but not urgently. // - // * - // CRITICAL - The issue must be remediated immediately to avoid it escalating. + // * HIGH - + // The issue must be addressed as a priority. // - // If - // you provide Normalized and do not provide Label, then Label is set automatically - // as follows. + // * CRITICAL - The issue must be + // remediated immediately to avoid it escalating. // - // * 0 - INFORMATIONAL + // If you provide Normalized and do + // not provide Label, then Label is set automatically as follows. // - // * 1–39 - LOW + // * 0 - + // INFORMATIONAL // - // * 40–69 - MEDIUM + // * 1–39 - LOW // + // * 40–69 - MEDIUM // // * 70–89 - HIGH // - // * 90–100 - CRITICAL + // * 90–100 - + // CRITICAL Label SeverityLabel // Deprecated. The normalized severity of a finding. This attribute is being @@ -5246,16 +5244,16 @@ type Severity struct { // and do not provide Normalized, then Normalized is set automatically as // follows. // - // * INFORMATIONAL - 0 + // * INFORMATIONAL - 0 // - // * LOW - 1 + // * LOW - 1 // - // * MEDIUM - 40 + // * MEDIUM - 40 // - // * HIGH - // - 70 + // * HIGH - 70 // - // * CRITICAL - 90 + // * CRITICAL + // - 90 Normalized *int32 // The native severity from the finding product that generated the finding. @@ -5272,35 +5270,35 @@ type SeverityUpdate struct { // The severity value of the finding. The allowed values are the following. // - // * + // * // INFORMATIONAL - No issue was found. // - // * LOW - The issue does not require - // action on its own. + // * LOW - The issue does not require action + // on its own. // - // * MEDIUM - The issue must be addressed but not - // urgently. + // * MEDIUM - The issue must be addressed but not urgently. // - // * HIGH - The issue must be addressed as a priority. + // * HIGH - + // The issue must be addressed as a priority. // - // * - // CRITICAL - The issue must be remediated immediately to avoid it escalating. + // * CRITICAL - The issue must be + // remediated immediately to avoid it escalating. Label SeverityLabel // The normalized severity for the finding. This attribute is to be deprecated in // favor of Label. If you provide Normalized and do not provide Label, Label is set // automatically as follows. // - // * 0 - INFORMATIONAL + // * 0 - INFORMATIONAL // - // * 1–39 - LOW + // * 1–39 - LOW // - // * - // 40–69 - MEDIUM + // * 40–69 - + // MEDIUM // - // * 70–89 - HIGH + // * 70–89 - HIGH // - // * 90–100 - CRITICAL + // * 90–100 - CRITICAL Normalized *int32 // The native severity as defined by the AWS service or integrated partner product @@ -5457,31 +5455,30 @@ type StringFilter struct { // for values that contain the filter criteria value, use one of the following // comparison operators: // - // * To search for values that exactly match the filter + // * To search for values that exactly match the filter // value, use EQUALS. For example, the filter ResourceType EQUALS // AwsEc2SecurityGroup only matches findings that have a resource type of // AwsEc2SecurityGroup. // - // * To search for values that start with the filter - // value, use PREFIX. For example, the filter ResourceType PREFIX AwsIam matches - // findings that have a resource type that starts with AwsIam. Findings with a - // resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would all - // match. - // - // EQUALS and PREFIX filters on the same field are joined by OR. A finding - // matches if it matches any one of those filters. To search for values that do not - // contain the filter criteria value, use one of the following comparison - // operators: - // - // * To search for values that do not exactly match the filter - // value, use NOT_EQUALS. For example, the filter ResourceType NOT_EQUALS - // AwsIamPolicy matches findings that have a resource type other than - // AwsIamPolicy. - // - // * To search for values that do not start with the filter - // value, use PREFIX_NOT_EQUALS. For example, the filter ResourceType - // PREFIX_NOT_EQUALS AwsIam matches findings that have a resource type that does - // not start with AwsIam. Findings with a resource type of AwsIamPolicy, + // * To search for values that start with the filter value, + // use PREFIX. For example, the filter ResourceType PREFIX AwsIam matches findings + // that have a resource type that starts with AwsIam. Findings with a resource type + // of AwsIamPolicy, AwsIamRole, or AwsIamUser would all match. + // + // EQUALS and PREFIX + // filters on the same field are joined by OR. A finding matches if it matches any + // one of those filters. To search for values that do not contain the filter + // criteria value, use one of the following comparison operators: + // + // * To search for + // values that do not exactly match the filter value, use NOT_EQUALS. For example, + // the filter ResourceType NOT_EQUALS AwsIamPolicy matches findings that have a + // resource type other than AwsIamPolicy. + // + // * To search for values that do not start + // with the filter value, use PREFIX_NOT_EQUALS. For example, the filter + // ResourceType PREFIX_NOT_EQUALS AwsIam matches findings that have a resource type + // that does not start with AwsIam. Findings with a resource type of AwsIamPolicy, // AwsIamRole, or AwsIamUser would all be excluded from the results. // // NOT_EQUALS @@ -5497,15 +5494,15 @@ type StringFilter struct { // or AwsEc2. It then excludes findings that have a resource type of AwsIamPolicy // and findings that have a resource type of AwsEc2NetworkInterface. // - // * + // * // ResourceType PREFIX AwsIam // - // * ResourceType PREFIX AwsEc2 + // * ResourceType PREFIX AwsEc2 // - // * ResourceType + // * ResourceType // NOT_EQUALS AwsIamPolicy // - // * ResourceType NOT_EQUALS AwsEc2NetworkInterface + // * ResourceType NOT_EQUALS AwsEc2NetworkInterface Comparison StringFilterComparison // The string filter value. Filter values are case sensitive. For example, the @@ -5598,16 +5595,15 @@ type WafAction struct { // Specifies how you want AWS WAF to respond to requests that match the settings in // a rule. Valid settings include the following: // - // * ALLOW - AWS WAF allows + // * ALLOW - AWS WAF allows // requests // - // * BLOCK - AWS WAF blocks requests + // * BLOCK - AWS WAF blocks requests // - // * COUNT - AWS WAF - // increments a counter of the requests that match all of the conditions in the - // rule. AWS WAF then continues to inspect the web request based on the remaining - // rules in the web ACL. You can't specify COUNT for the default action for a - // WebACL. + // * COUNT - AWS WAF increments a + // counter of the requests that match all of the conditions in the rule. AWS WAF + // then continues to inspect the web request based on the remaining rules in the + // web ACL. You can't specify COUNT for the default action for a WebACL. Type *string } @@ -5632,17 +5628,17 @@ type Workflow struct { // The status of the investigation into the finding. The allowed values are the // following. // - // * NEW - The initial state of a finding, before it is reviewed. - // + // * NEW - The initial state of a finding, before it is reviewed. // - // * NOTIFIED - Indicates that you notified the resource owner about the security + // * + // NOTIFIED - Indicates that you notified the resource owner about the security // issue. Used when the initial reviewer is not the resource owner, and needs // intervention from the resource owner. // - // * SUPPRESSED - The finding will not - // be reviewed again and will not be acted upon. + // * SUPPRESSED - The finding will not be + // reviewed again and will not be acted upon. // - // * RESOLVED - The finding was + // * RESOLVED - The finding was // reviewed and remediated and is now considered resolved. Status WorkflowStatus } @@ -5653,17 +5649,17 @@ type WorkflowUpdate struct { // The status of the investigation into the finding. The allowed values are the // following. // - // * NEW - The initial state of a finding, before it is reviewed. + // * NEW - The initial state of a finding, before it is reviewed. // - // - // * NOTIFIED - Indicates that you notified the resource owner about the security + // * + // NOTIFIED - Indicates that you notified the resource owner about the security // issue. Used when the initial reviewer is not the resource owner, and needs // intervention from the resource owner. // - // * RESOLVED - The finding was reviewed - // and remediated and is now considered resolved. + // * RESOLVED - The finding was reviewed and + // remediated and is now considered resolved. // - // * SUPPRESSED - The finding - // will not be reviewed again and will not be acted upon. + // * SUPPRESSED - The finding will not + // be reviewed again and will not be acted upon. Status WorkflowStatus } diff --git a/service/serverlessapplicationrepository/doc.go b/service/serverlessapplicationrepository/doc.go index ae6e9fe0b98..a34f3718a65 100644 --- a/service/serverlessapplicationrepository/doc.go +++ b/service/serverlessapplicationrepository/doc.go @@ -23,7 +23,7 @@ // repository.The AWS Serverless Application Repository Developer Guide contains // more information about the two developer experiences available: // -// * Consuming +// * Consuming // Applications – Browse for applications and view information about them, // including source code and readme files. Also install, configure, and deploy // applications of your choosing. Publishing Applications – Configure and upload diff --git a/service/serverlessapplicationrepository/types/enums.go b/service/serverlessapplicationrepository/types/enums.go index ad57778b11a..9723fa30ccd 100644 --- a/service/serverlessapplicationrepository/types/enums.go +++ b/service/serverlessapplicationrepository/types/enums.go @@ -6,10 +6,10 @@ type Capability string // Enum values for Capability const ( - CapabilityCapability_iam Capability = "CAPABILITY_IAM" - CapabilityCapability_named_iam Capability = "CAPABILITY_NAMED_IAM" - CapabilityCapability_auto_expand Capability = "CAPABILITY_AUTO_EXPAND" - CapabilityCapability_resource_policy Capability = "CAPABILITY_RESOURCE_POLICY" + CapabilityCapabilityIam Capability = "CAPABILITY_IAM" + CapabilityCapabilityNamedIam Capability = "CAPABILITY_NAMED_IAM" + CapabilityCapabilityAutoExpand Capability = "CAPABILITY_AUTO_EXPAND" + CapabilityCapabilityResourcePolicy Capability = "CAPABILITY_RESOURCE_POLICY" ) // Values returns all known values for Capability. Note that this can be expanded diff --git a/service/servicecatalog/api_op_AcceptPortfolioShare.go b/service/servicecatalog/api_op_AcceptPortfolioShare.go index 974ba163e3c..09606c2d1e8 100644 --- a/service/servicecatalog/api_op_AcceptPortfolioShare.go +++ b/service/servicecatalog/api_op_AcceptPortfolioShare.go @@ -36,29 +36,27 @@ type AcceptPortfolioShareInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The type of shared portfolios to accept. The default is to accept imported // portfolios. // - // * AWS_ORGANIZATIONS - Accept portfolios shared by the - // management account of your organization. + // * AWS_ORGANIZATIONS - Accept portfolios shared by the management + // account of your organization. // - // * IMPORTED - Accept imported - // portfolios. + // * IMPORTED - Accept imported portfolios. // - // * AWS_SERVICECATALOG - Not supported. (Throws - // ResourceNotFoundException.) + // * + // AWS_SERVICECATALOG - Not supported. (Throws ResourceNotFoundException.) // - // For example, aws servicecatalog - // accept-portfolio-share --portfolio-id "port-2qwzkwxt3y5fk" - // --portfolio-share-type AWS_ORGANIZATIONS + // For + // example, aws servicecatalog accept-portfolio-share --portfolio-id + // "port-2qwzkwxt3y5fk" --portfolio-share-type AWS_ORGANIZATIONS PortfolioShareType types.PortfolioShareType } diff --git a/service/servicecatalog/api_op_AssociatePrincipalWithPortfolio.go b/service/servicecatalog/api_op_AssociatePrincipalWithPortfolio.go index 6730d206495..bd667f2d3b9 100644 --- a/service/servicecatalog/api_op_AssociatePrincipalWithPortfolio.go +++ b/service/servicecatalog/api_op_AssociatePrincipalWithPortfolio.go @@ -46,12 +46,11 @@ type AssociatePrincipalWithPortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_AssociateProductWithPortfolio.go b/service/servicecatalog/api_op_AssociateProductWithPortfolio.go index 07ec3c2bbeb..be20f41375b 100644 --- a/service/servicecatalog/api_op_AssociateProductWithPortfolio.go +++ b/service/servicecatalog/api_op_AssociateProductWithPortfolio.go @@ -41,12 +41,11 @@ type AssociateProductWithPortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The identifier of the source portfolio. diff --git a/service/servicecatalog/api_op_AssociateServiceActionWithProvisioningArtifact.go b/service/servicecatalog/api_op_AssociateServiceActionWithProvisioningArtifact.go index 985151fd2a9..035ac00acb3 100644 --- a/service/servicecatalog/api_op_AssociateServiceActionWithProvisioningArtifact.go +++ b/service/servicecatalog/api_op_AssociateServiceActionWithProvisioningArtifact.go @@ -45,12 +45,11 @@ type AssociateServiceActionWithProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_BatchAssociateServiceActionWithProvisioningArtifact.go b/service/servicecatalog/api_op_BatchAssociateServiceActionWithProvisioningArtifact.go index 683fb57eef0..c2c97e00247 100644 --- a/service/servicecatalog/api_op_BatchAssociateServiceActionWithProvisioningArtifact.go +++ b/service/servicecatalog/api_op_BatchAssociateServiceActionWithProvisioningArtifact.go @@ -37,12 +37,11 @@ type BatchAssociateServiceActionWithProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_BatchDisassociateServiceActionFromProvisioningArtifact.go b/service/servicecatalog/api_op_BatchDisassociateServiceActionFromProvisioningArtifact.go index fd9bcc0d8e8..a7532d172a2 100644 --- a/service/servicecatalog/api_op_BatchDisassociateServiceActionFromProvisioningArtifact.go +++ b/service/servicecatalog/api_op_BatchDisassociateServiceActionFromProvisioningArtifact.go @@ -38,12 +38,11 @@ type BatchDisassociateServiceActionFromProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_CopyProduct.go b/service/servicecatalog/api_op_CopyProduct.go index c243001c3b3..722f10ba6cf 100644 --- a/service/servicecatalog/api_op_CopyProduct.go +++ b/service/servicecatalog/api_op_CopyProduct.go @@ -48,12 +48,11 @@ type CopyProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The copy options. If the value is CopyTags, the tags from the source product are diff --git a/service/servicecatalog/api_op_CreateConstraint.go b/service/servicecatalog/api_op_CreateConstraint.go index e8a6e5c15fc..5d8a7560598 100644 --- a/service/servicecatalog/api_op_CreateConstraint.go +++ b/service/servicecatalog/api_op_CreateConstraint.go @@ -78,28 +78,27 @@ type CreateConstraintInput struct { // The type of constraint. // - // * LAUNCH + // * LAUNCH // - // * NOTIFICATION + // * NOTIFICATION // - // * - // RESOURCE_UPDATE + // * RESOURCE_UPDATE // - // * STACKSET + // * + // STACKSET // - // * TEMPLATE + // * TEMPLATE // // This member is required. Type *string // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The description of the constraint. diff --git a/service/servicecatalog/api_op_CreatePortfolio.go b/service/servicecatalog/api_op_CreatePortfolio.go index a65bd048834..01bf174dae9 100644 --- a/service/servicecatalog/api_op_CreatePortfolio.go +++ b/service/servicecatalog/api_op_CreatePortfolio.go @@ -49,12 +49,11 @@ type CreatePortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The description of the portfolio. diff --git a/service/servicecatalog/api_op_CreatePortfolioShare.go b/service/servicecatalog/api_op_CreatePortfolioShare.go index d149f9ee5e5..429fc4dd689 100644 --- a/service/servicecatalog/api_op_CreatePortfolioShare.go +++ b/service/servicecatalog/api_op_CreatePortfolioShare.go @@ -43,12 +43,11 @@ type CreatePortfolioShareInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The AWS account ID. For example, 123456789012. diff --git a/service/servicecatalog/api_op_CreateProduct.go b/service/servicecatalog/api_op_CreateProduct.go index faff333370b..87519651eb2 100644 --- a/service/servicecatalog/api_op_CreateProduct.go +++ b/service/servicecatalog/api_op_CreateProduct.go @@ -59,12 +59,11 @@ type CreateProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The description of the product. diff --git a/service/servicecatalog/api_op_CreateProvisionedProductPlan.go b/service/servicecatalog/api_op_CreateProvisionedProductPlan.go index c935c26ff5a..d0c7a3d426f 100644 --- a/service/servicecatalog/api_op_CreateProvisionedProductPlan.go +++ b/service/servicecatalog/api_op_CreateProvisionedProductPlan.go @@ -71,12 +71,11 @@ type CreateProvisionedProductPlanInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related diff --git a/service/servicecatalog/api_op_CreateProvisioningArtifact.go b/service/servicecatalog/api_op_CreateProvisioningArtifact.go index 44fdc1db1f1..c9d093a45ea 100644 --- a/service/servicecatalog/api_op_CreateProvisioningArtifact.go +++ b/service/servicecatalog/api_op_CreateProvisioningArtifact.go @@ -51,12 +51,11 @@ type CreateProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_CreateServiceAction.go b/service/servicecatalog/api_op_CreateServiceAction.go index d5a52369240..32348018e55 100644 --- a/service/servicecatalog/api_op_CreateServiceAction.go +++ b/service/servicecatalog/api_op_CreateServiceAction.go @@ -64,12 +64,11 @@ type CreateServiceActionInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The self-service action description. diff --git a/service/servicecatalog/api_op_DeleteConstraint.go b/service/servicecatalog/api_op_DeleteConstraint.go index 902bc1800fb..caadd7fcf88 100644 --- a/service/servicecatalog/api_op_DeleteConstraint.go +++ b/service/servicecatalog/api_op_DeleteConstraint.go @@ -36,12 +36,11 @@ type DeleteConstraintInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DeletePortfolio.go b/service/servicecatalog/api_op_DeletePortfolio.go index 96c14091979..a81a65d937c 100644 --- a/service/servicecatalog/api_op_DeletePortfolio.go +++ b/service/servicecatalog/api_op_DeletePortfolio.go @@ -37,12 +37,11 @@ type DeletePortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DeletePortfolioShare.go b/service/servicecatalog/api_op_DeletePortfolioShare.go index a7e0c2dcfb4..ca1c53661fb 100644 --- a/service/servicecatalog/api_op_DeletePortfolioShare.go +++ b/service/servicecatalog/api_op_DeletePortfolioShare.go @@ -40,12 +40,11 @@ type DeletePortfolioShareInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The AWS account ID. diff --git a/service/servicecatalog/api_op_DeleteProduct.go b/service/servicecatalog/api_op_DeleteProduct.go index 6c805742f9a..429df6a1bbb 100644 --- a/service/servicecatalog/api_op_DeleteProduct.go +++ b/service/servicecatalog/api_op_DeleteProduct.go @@ -37,12 +37,11 @@ type DeleteProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DeleteProvisionedProductPlan.go b/service/servicecatalog/api_op_DeleteProvisionedProductPlan.go index 4d86c0d9a4b..7991209f50a 100644 --- a/service/servicecatalog/api_op_DeleteProvisionedProductPlan.go +++ b/service/servicecatalog/api_op_DeleteProvisionedProductPlan.go @@ -35,12 +35,11 @@ type DeleteProvisionedProductPlanInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // If set to true, AWS Service Catalog stops managing the specified provisioned diff --git a/service/servicecatalog/api_op_DeleteProvisioningArtifact.go b/service/servicecatalog/api_op_DeleteProvisioningArtifact.go index 4ff49714d31..e463e12222f 100644 --- a/service/servicecatalog/api_op_DeleteProvisioningArtifact.go +++ b/service/servicecatalog/api_op_DeleteProvisioningArtifact.go @@ -44,12 +44,11 @@ type DeleteProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DeleteServiceAction.go b/service/servicecatalog/api_op_DeleteServiceAction.go index bad6121dd65..cea726a0f25 100644 --- a/service/servicecatalog/api_op_DeleteServiceAction.go +++ b/service/servicecatalog/api_op_DeleteServiceAction.go @@ -35,12 +35,11 @@ type DeleteServiceActionInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DescribeConstraint.go b/service/servicecatalog/api_op_DescribeConstraint.go index 70004a76c28..98c51de5146 100644 --- a/service/servicecatalog/api_op_DescribeConstraint.go +++ b/service/servicecatalog/api_op_DescribeConstraint.go @@ -36,12 +36,11 @@ type DescribeConstraintInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DescribeCopyProductStatus.go b/service/servicecatalog/api_op_DescribeCopyProductStatus.go index 9d22e25b132..ba46e800754 100644 --- a/service/servicecatalog/api_op_DescribeCopyProductStatus.go +++ b/service/servicecatalog/api_op_DescribeCopyProductStatus.go @@ -36,12 +36,11 @@ type DescribeCopyProductStatusInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DescribePortfolio.go b/service/servicecatalog/api_op_DescribePortfolio.go index 911c6fdb0d1..ceede146b1d 100644 --- a/service/servicecatalog/api_op_DescribePortfolio.go +++ b/service/servicecatalog/api_op_DescribePortfolio.go @@ -37,12 +37,11 @@ type DescribePortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DescribeProduct.go b/service/servicecatalog/api_op_DescribeProduct.go index b528947f70f..4ffcba14433 100644 --- a/service/servicecatalog/api_op_DescribeProduct.go +++ b/service/servicecatalog/api_op_DescribeProduct.go @@ -31,12 +31,11 @@ type DescribeProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The product identifier. diff --git a/service/servicecatalog/api_op_DescribeProductAsAdmin.go b/service/servicecatalog/api_op_DescribeProductAsAdmin.go index 3947a39e038..5af5e8b2780 100644 --- a/service/servicecatalog/api_op_DescribeProductAsAdmin.go +++ b/service/servicecatalog/api_op_DescribeProductAsAdmin.go @@ -32,12 +32,11 @@ type DescribeProductAsAdminInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The product identifier. diff --git a/service/servicecatalog/api_op_DescribeProductView.go b/service/servicecatalog/api_op_DescribeProductView.go index fc945dc7bde..635cc5b532e 100644 --- a/service/servicecatalog/api_op_DescribeProductView.go +++ b/service/servicecatalog/api_op_DescribeProductView.go @@ -36,12 +36,11 @@ type DescribeProductViewInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DescribeProvisionedProduct.go b/service/servicecatalog/api_op_DescribeProvisionedProduct.go index 61b24fca96b..0a7410a0b78 100644 --- a/service/servicecatalog/api_op_DescribeProvisionedProduct.go +++ b/service/servicecatalog/api_op_DescribeProvisionedProduct.go @@ -35,12 +35,11 @@ type DescribeProvisionedProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The provisioned product identifier. You must provide the name or ID, but not diff --git a/service/servicecatalog/api_op_DescribeProvisionedProductPlan.go b/service/servicecatalog/api_op_DescribeProvisionedProductPlan.go index ef8d81fdce4..bad646d3b0c 100644 --- a/service/servicecatalog/api_op_DescribeProvisionedProductPlan.go +++ b/service/servicecatalog/api_op_DescribeProvisionedProductPlan.go @@ -36,12 +36,11 @@ type DescribeProvisionedProductPlanInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_DescribeProvisioningArtifact.go b/service/servicecatalog/api_op_DescribeProvisioningArtifact.go index c054b81519a..9bab207d932 100644 --- a/service/servicecatalog/api_op_DescribeProvisioningArtifact.go +++ b/service/servicecatalog/api_op_DescribeProvisioningArtifact.go @@ -32,12 +32,11 @@ type DescribeProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The product identifier. diff --git a/service/servicecatalog/api_op_DescribeProvisioningParameters.go b/service/servicecatalog/api_op_DescribeProvisioningParameters.go index d6f5e392885..136dbdfd8db 100644 --- a/service/servicecatalog/api_op_DescribeProvisioningParameters.go +++ b/service/servicecatalog/api_op_DescribeProvisioningParameters.go @@ -38,12 +38,11 @@ type DescribeProvisioningParametersInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The path identifier of the product. This value is optional if the product has a diff --git a/service/servicecatalog/api_op_DescribeRecord.go b/service/servicecatalog/api_op_DescribeRecord.go index 3d369dcf01e..9c8a10b8941 100644 --- a/service/servicecatalog/api_op_DescribeRecord.go +++ b/service/servicecatalog/api_op_DescribeRecord.go @@ -44,12 +44,11 @@ type DescribeRecordInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_DescribeServiceAction.go b/service/servicecatalog/api_op_DescribeServiceAction.go index cf1131a3a5d..d76a958c165 100644 --- a/service/servicecatalog/api_op_DescribeServiceAction.go +++ b/service/servicecatalog/api_op_DescribeServiceAction.go @@ -36,12 +36,11 @@ type DescribeServiceActionInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DescribeServiceActionExecutionParameters.go b/service/servicecatalog/api_op_DescribeServiceActionExecutionParameters.go index f70d9c21009..5d5a26bf77a 100644 --- a/service/servicecatalog/api_op_DescribeServiceActionExecutionParameters.go +++ b/service/servicecatalog/api_op_DescribeServiceActionExecutionParameters.go @@ -42,12 +42,11 @@ type DescribeServiceActionExecutionParametersInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DisassociatePrincipalFromPortfolio.go b/service/servicecatalog/api_op_DisassociatePrincipalFromPortfolio.go index f9f069a40ad..b1ed6120735 100644 --- a/service/servicecatalog/api_op_DisassociatePrincipalFromPortfolio.go +++ b/service/servicecatalog/api_op_DisassociatePrincipalFromPortfolio.go @@ -40,12 +40,11 @@ type DisassociatePrincipalFromPortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DisassociateProductFromPortfolio.go b/service/servicecatalog/api_op_DisassociateProductFromPortfolio.go index 80e3cb32689..a36ea057f37 100644 --- a/service/servicecatalog/api_op_DisassociateProductFromPortfolio.go +++ b/service/servicecatalog/api_op_DisassociateProductFromPortfolio.go @@ -41,12 +41,11 @@ type DisassociateProductFromPortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_DisassociateServiceActionFromProvisioningArtifact.go b/service/servicecatalog/api_op_DisassociateServiceActionFromProvisioningArtifact.go index 1a38b6e0b31..d19899d00ce 100644 --- a/service/servicecatalog/api_op_DisassociateServiceActionFromProvisioningArtifact.go +++ b/service/servicecatalog/api_op_DisassociateServiceActionFromProvisioningArtifact.go @@ -46,12 +46,11 @@ type DisassociateServiceActionFromProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_ExecuteProvisionedProductPlan.go b/service/servicecatalog/api_op_ExecuteProvisionedProductPlan.go index 9470d42a949..5961e82562d 100644 --- a/service/servicecatalog/api_op_ExecuteProvisionedProductPlan.go +++ b/service/servicecatalog/api_op_ExecuteProvisionedProductPlan.go @@ -45,12 +45,11 @@ type ExecuteProvisionedProductPlanInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_ExecuteProvisionedProductServiceAction.go b/service/servicecatalog/api_op_ExecuteProvisionedProductServiceAction.go index 5e54b3ab681..70a6a3ad717 100644 --- a/service/servicecatalog/api_op_ExecuteProvisionedProductServiceAction.go +++ b/service/servicecatalog/api_op_ExecuteProvisionedProductServiceAction.go @@ -47,12 +47,11 @@ type ExecuteProvisionedProductServiceActionInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // A map of all self-service action parameters and their values. If a provided diff --git a/service/servicecatalog/api_op_GetProvisionedProductOutputs.go b/service/servicecatalog/api_op_GetProvisionedProductOutputs.go index 2b8adfcc790..ad09b5ce2a6 100644 --- a/service/servicecatalog/api_op_GetProvisionedProductOutputs.go +++ b/service/servicecatalog/api_op_GetProvisionedProductOutputs.go @@ -33,12 +33,11 @@ type GetProvisionedProductOutputsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The list of keys that the API should return with their values. If none are diff --git a/service/servicecatalog/api_op_ListAcceptedPortfolioShares.go b/service/servicecatalog/api_op_ListAcceptedPortfolioShares.go index 16b9cf7e88f..58c10079353 100644 --- a/service/servicecatalog/api_op_ListAcceptedPortfolioShares.go +++ b/service/servicecatalog/api_op_ListAcceptedPortfolioShares.go @@ -31,12 +31,11 @@ type ListAcceptedPortfolioSharesInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. @@ -49,13 +48,13 @@ type ListAcceptedPortfolioSharesInput struct { // The type of shared portfolios to list. The default is to list imported // portfolios. // - // * AWS_ORGANIZATIONS - List portfolios shared by the management + // * AWS_ORGANIZATIONS - List portfolios shared by the management // account of your organization // - // * AWS_SERVICECATALOG - List default - // portfolios + // * AWS_SERVICECATALOG - List default portfolios // - // * IMPORTED - List imported portfolios + // * + // IMPORTED - List imported portfolios PortfolioShareType types.PortfolioShareType } diff --git a/service/servicecatalog/api_op_ListBudgetsForResource.go b/service/servicecatalog/api_op_ListBudgetsForResource.go index 9e64ad6f513..13f8397184c 100644 --- a/service/servicecatalog/api_op_ListBudgetsForResource.go +++ b/service/servicecatalog/api_op_ListBudgetsForResource.go @@ -36,12 +36,11 @@ type ListBudgetsForResourceInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListConstraintsForPortfolio.go b/service/servicecatalog/api_op_ListConstraintsForPortfolio.go index ea1803665b7..07c51947b01 100644 --- a/service/servicecatalog/api_op_ListConstraintsForPortfolio.go +++ b/service/servicecatalog/api_op_ListConstraintsForPortfolio.go @@ -36,12 +36,11 @@ type ListConstraintsForPortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListLaunchPaths.go b/service/servicecatalog/api_op_ListLaunchPaths.go index d925f7f5b16..cfb273d9b4d 100644 --- a/service/servicecatalog/api_op_ListLaunchPaths.go +++ b/service/servicecatalog/api_op_ListLaunchPaths.go @@ -38,12 +38,11 @@ type ListLaunchPathsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListOrganizationPortfolioAccess.go b/service/servicecatalog/api_op_ListOrganizationPortfolioAccess.go index 73be865385b..e09075729ac 100644 --- a/service/servicecatalog/api_op_ListOrganizationPortfolioAccess.go +++ b/service/servicecatalog/api_op_ListOrganizationPortfolioAccess.go @@ -34,15 +34,15 @@ type ListOrganizationPortfolioAccessInput struct { // The organization node type that will be returned in the output. // - // * - // ORGANIZATION - Organization that has access to the portfolio. + // * ORGANIZATION + // - Organization that has access to the portfolio. // - // * - // ORGANIZATIONAL_UNIT - Organizational unit that has access to the portfolio - // within your organization. + // * ORGANIZATIONAL_UNIT - + // Organizational unit that has access to the portfolio within your + // organization. // - // * ACCOUNT - Account that has access to the - // portfolio within your organization. + // * ACCOUNT - Account that has access to the portfolio within your + // organization. // // This member is required. OrganizationNodeType types.OrganizationNodeType @@ -54,12 +54,11 @@ type ListOrganizationPortfolioAccessInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListPortfolioAccess.go b/service/servicecatalog/api_op_ListPortfolioAccess.go index 68d26a5a068..e874c94d95a 100644 --- a/service/servicecatalog/api_op_ListPortfolioAccess.go +++ b/service/servicecatalog/api_op_ListPortfolioAccess.go @@ -38,12 +38,11 @@ type ListPortfolioAccessInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The ID of an organization node the portfolio is shared with. All children of diff --git a/service/servicecatalog/api_op_ListPortfolios.go b/service/servicecatalog/api_op_ListPortfolios.go index 15b6ac846ee..de6bfb6718c 100644 --- a/service/servicecatalog/api_op_ListPortfolios.go +++ b/service/servicecatalog/api_op_ListPortfolios.go @@ -31,12 +31,11 @@ type ListPortfoliosInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListPortfoliosForProduct.go b/service/servicecatalog/api_op_ListPortfoliosForProduct.go index 6f4e50a1d82..3b298043c1a 100644 --- a/service/servicecatalog/api_op_ListPortfoliosForProduct.go +++ b/service/servicecatalog/api_op_ListPortfoliosForProduct.go @@ -36,12 +36,11 @@ type ListPortfoliosForProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListPrincipalsForPortfolio.go b/service/servicecatalog/api_op_ListPrincipalsForPortfolio.go index 765076fa6ef..867cbe94d9f 100644 --- a/service/servicecatalog/api_op_ListPrincipalsForPortfolio.go +++ b/service/servicecatalog/api_op_ListPrincipalsForPortfolio.go @@ -36,12 +36,11 @@ type ListPrincipalsForPortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListProvisionedProductPlans.go b/service/servicecatalog/api_op_ListProvisionedProductPlans.go index 769800fad1d..2793b1fa7fa 100644 --- a/service/servicecatalog/api_op_ListProvisionedProductPlans.go +++ b/service/servicecatalog/api_op_ListProvisionedProductPlans.go @@ -32,12 +32,11 @@ type ListProvisionedProductPlansInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The access level to use to obtain results. The default is User. diff --git a/service/servicecatalog/api_op_ListProvisioningArtifacts.go b/service/servicecatalog/api_op_ListProvisioningArtifacts.go index 7b4c4dd2798..9a381c82b4d 100644 --- a/service/servicecatalog/api_op_ListProvisioningArtifacts.go +++ b/service/servicecatalog/api_op_ListProvisioningArtifacts.go @@ -37,12 +37,11 @@ type ListProvisioningArtifactsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_ListProvisioningArtifactsForServiceAction.go b/service/servicecatalog/api_op_ListProvisioningArtifactsForServiceAction.go index 0376adf52b4..c8ee30e94b4 100644 --- a/service/servicecatalog/api_op_ListProvisioningArtifactsForServiceAction.go +++ b/service/servicecatalog/api_op_ListProvisioningArtifactsForServiceAction.go @@ -37,12 +37,11 @@ type ListProvisioningArtifactsForServiceActionInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListRecordHistory.go b/service/servicecatalog/api_op_ListRecordHistory.go index b2b8c36f4d5..7d661ad2f82 100644 --- a/service/servicecatalog/api_op_ListRecordHistory.go +++ b/service/servicecatalog/api_op_ListRecordHistory.go @@ -31,12 +31,11 @@ type ListRecordHistoryInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The access level to use to obtain results. The default is User. diff --git a/service/servicecatalog/api_op_ListResourcesForTagOption.go b/service/servicecatalog/api_op_ListResourcesForTagOption.go index 9e8c28b9412..92a2d8ab4da 100644 --- a/service/servicecatalog/api_op_ListResourcesForTagOption.go +++ b/service/servicecatalog/api_op_ListResourcesForTagOption.go @@ -43,9 +43,9 @@ type ListResourcesForTagOptionInput struct { // The resource type. // - // * Portfolio + // * Portfolio // - // * Product + // * Product ResourceType *string } diff --git a/service/servicecatalog/api_op_ListServiceActions.go b/service/servicecatalog/api_op_ListServiceActions.go index 7911a00f3d4..ca612bfb39b 100644 --- a/service/servicecatalog/api_op_ListServiceActions.go +++ b/service/servicecatalog/api_op_ListServiceActions.go @@ -31,12 +31,11 @@ type ListServiceActionsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListServiceActionsForProvisioningArtifact.go b/service/servicecatalog/api_op_ListServiceActionsForProvisioningArtifact.go index 546f6e266b9..19b17d8a159 100644 --- a/service/servicecatalog/api_op_ListServiceActionsForProvisioningArtifact.go +++ b/service/servicecatalog/api_op_ListServiceActionsForProvisioningArtifact.go @@ -42,12 +42,11 @@ type ListServiceActionsForProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ListStackInstancesForProvisionedProduct.go b/service/servicecatalog/api_op_ListStackInstancesForProvisionedProduct.go index 8be8177875c..83f720457d5 100644 --- a/service/servicecatalog/api_op_ListStackInstancesForProvisionedProduct.go +++ b/service/servicecatalog/api_op_ListStackInstancesForProvisionedProduct.go @@ -38,12 +38,11 @@ type ListStackInstancesForProvisionedProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The maximum number of items to return with this call. diff --git a/service/servicecatalog/api_op_ProvisionProduct.go b/service/servicecatalog/api_op_ProvisionProduct.go index d01b0d237e5..dc58bdb5e16 100644 --- a/service/servicecatalog/api_op_ProvisionProduct.go +++ b/service/servicecatalog/api_op_ProvisionProduct.go @@ -49,12 +49,11 @@ type ProvisionProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // Passed to CloudFormation. The SNS topic ARNs to which to publish stack-related diff --git a/service/servicecatalog/api_op_RejectPortfolioShare.go b/service/servicecatalog/api_op_RejectPortfolioShare.go index bacaadaf525..7515ff97bdb 100644 --- a/service/servicecatalog/api_op_RejectPortfolioShare.go +++ b/service/servicecatalog/api_op_RejectPortfolioShare.go @@ -36,29 +36,27 @@ type RejectPortfolioShareInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The type of shared portfolios to reject. The default is to reject imported // portfolios. // - // * AWS_ORGANIZATIONS - Reject portfolios shared by the - // management account of your organization. + // * AWS_ORGANIZATIONS - Reject portfolios shared by the management + // account of your organization. // - // * IMPORTED - Reject imported - // portfolios. + // * IMPORTED - Reject imported portfolios. // - // * AWS_SERVICECATALOG - Not supported. (Throws - // ResourceNotFoundException.) + // * + // AWS_SERVICECATALOG - Not supported. (Throws ResourceNotFoundException.) // - // For example, aws servicecatalog - // reject-portfolio-share --portfolio-id "port-2qwzkwxt3y5fk" - // --portfolio-share-type AWS_ORGANIZATIONS + // For + // example, aws servicecatalog reject-portfolio-share --portfolio-id + // "port-2qwzkwxt3y5fk" --portfolio-share-type AWS_ORGANIZATIONS PortfolioShareType types.PortfolioShareType } diff --git a/service/servicecatalog/api_op_ScanProvisionedProducts.go b/service/servicecatalog/api_op_ScanProvisionedProducts.go index ae19f5b94eb..70cdafaecd8 100644 --- a/service/servicecatalog/api_op_ScanProvisionedProducts.go +++ b/service/servicecatalog/api_op_ScanProvisionedProducts.go @@ -32,12 +32,11 @@ type ScanProvisionedProductsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The access level to use to obtain results. The default is User. diff --git a/service/servicecatalog/api_op_SearchProducts.go b/service/servicecatalog/api_op_SearchProducts.go index 4daa42536bf..f5464ed1f7c 100644 --- a/service/servicecatalog/api_op_SearchProducts.go +++ b/service/servicecatalog/api_op_SearchProducts.go @@ -31,12 +31,11 @@ type SearchProductsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The search filters. If no search filters are specified, the output includes all diff --git a/service/servicecatalog/api_op_SearchProductsAsAdmin.go b/service/servicecatalog/api_op_SearchProductsAsAdmin.go index 4a66349c24e..c8d2f57b7ba 100644 --- a/service/servicecatalog/api_op_SearchProductsAsAdmin.go +++ b/service/servicecatalog/api_op_SearchProductsAsAdmin.go @@ -31,12 +31,11 @@ type SearchProductsAsAdminInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The search filters. If no search filters are specified, the output includes all diff --git a/service/servicecatalog/api_op_SearchProvisionedProducts.go b/service/servicecatalog/api_op_SearchProvisionedProducts.go index a29471fe7b4..b7b4258d8ca 100644 --- a/service/servicecatalog/api_op_SearchProvisionedProducts.go +++ b/service/servicecatalog/api_op_SearchProvisionedProducts.go @@ -32,12 +32,11 @@ type SearchProvisionedProductsInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The access level to use to obtain results. The default is User. diff --git a/service/servicecatalog/api_op_TerminateProvisionedProduct.go b/service/servicecatalog/api_op_TerminateProvisionedProduct.go index 6539275145c..d5e4651f67d 100644 --- a/service/servicecatalog/api_op_TerminateProvisionedProduct.go +++ b/service/servicecatalog/api_op_TerminateProvisionedProduct.go @@ -42,12 +42,11 @@ type TerminateProvisionedProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // If set to true, AWS Service Catalog stops managing the specified provisioned diff --git a/service/servicecatalog/api_op_UpdateConstraint.go b/service/servicecatalog/api_op_UpdateConstraint.go index 4f8579bfc17..cea2d9db80e 100644 --- a/service/servicecatalog/api_op_UpdateConstraint.go +++ b/service/servicecatalog/api_op_UpdateConstraint.go @@ -36,12 +36,11 @@ type UpdateConstraintInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The updated description of the constraint. diff --git a/service/servicecatalog/api_op_UpdatePortfolio.go b/service/servicecatalog/api_op_UpdatePortfolio.go index 3393bd771d1..59533519ebb 100644 --- a/service/servicecatalog/api_op_UpdatePortfolio.go +++ b/service/servicecatalog/api_op_UpdatePortfolio.go @@ -37,12 +37,11 @@ type UpdatePortfolioInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The tags to add. diff --git a/service/servicecatalog/api_op_UpdateProduct.go b/service/servicecatalog/api_op_UpdateProduct.go index 068a2142de2..bf8d47c1d35 100644 --- a/service/servicecatalog/api_op_UpdateProduct.go +++ b/service/servicecatalog/api_op_UpdateProduct.go @@ -36,12 +36,11 @@ type UpdateProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The tags to add to the product. diff --git a/service/servicecatalog/api_op_UpdateProvisionedProduct.go b/service/servicecatalog/api_op_UpdateProvisionedProduct.go index 39dafd9eb56..2090806beb3 100644 --- a/service/servicecatalog/api_op_UpdateProvisionedProduct.go +++ b/service/servicecatalog/api_op_UpdateProvisionedProduct.go @@ -41,12 +41,11 @@ type UpdateProvisionedProductInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // The path identifier. This value is optional if the product has a default path, diff --git a/service/servicecatalog/api_op_UpdateProvisionedProductProperties.go b/service/servicecatalog/api_op_UpdateProvisionedProductProperties.go index 8aa8da99cc6..8ac5edd86b0 100644 --- a/service/servicecatalog/api_op_UpdateProvisionedProductProperties.go +++ b/service/servicecatalog/api_op_UpdateProvisionedProductProperties.go @@ -68,12 +68,11 @@ type UpdateProvisionedProductPropertiesInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string } diff --git a/service/servicecatalog/api_op_UpdateProvisioningArtifact.go b/service/servicecatalog/api_op_UpdateProvisioningArtifact.go index b9df5122aed..23c32f83ffc 100644 --- a/service/servicecatalog/api_op_UpdateProvisioningArtifact.go +++ b/service/servicecatalog/api_op_UpdateProvisioningArtifact.go @@ -43,12 +43,11 @@ type UpdateProvisioningArtifactInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // Indicates whether the product version is active. Inactive provisioning artifacts diff --git a/service/servicecatalog/api_op_UpdateServiceAction.go b/service/servicecatalog/api_op_UpdateServiceAction.go index d1cc68d23c1..8229241aa0b 100644 --- a/service/servicecatalog/api_op_UpdateServiceAction.go +++ b/service/servicecatalog/api_op_UpdateServiceAction.go @@ -36,12 +36,11 @@ type UpdateServiceActionInput struct { // The language code. // - // * en - English (default) + // * en - English (default) // - // * jp - Japanese + // * jp - Japanese // - // * zh - // - Chinese + // * zh - Chinese AcceptLanguage *string // A map that defines the self-service action. diff --git a/service/servicecatalog/types/enums.go b/service/servicecatalog/types/enums.go index 2008b7e076d..c27e363cae6 100644 --- a/service/servicecatalog/types/enums.go +++ b/service/servicecatalog/types/enums.go @@ -26,9 +26,9 @@ type AccessStatus string // Enum values for AccessStatus const ( - AccessStatusEnabled AccessStatus = "ENABLED" - AccessStatusUnder_change AccessStatus = "UNDER_CHANGE" - AccessStatusDisabled AccessStatus = "DISABLED" + AccessStatusEnabled AccessStatus = "ENABLED" + AccessStatusUnderChange AccessStatus = "UNDER_CHANGE" + AccessStatusDisabled AccessStatus = "DISABLED" ) // Values returns all known values for AccessStatus. Note that this can be expanded @@ -82,9 +82,9 @@ type CopyProductStatus string // Enum values for CopyProductStatus const ( - CopyProductStatusSucceeded CopyProductStatus = "SUCCEEDED" - CopyProductStatusIn_progress CopyProductStatus = "IN_PROGRESS" - CopyProductStatusFailed CopyProductStatus = "FAILED" + CopyProductStatusSucceeded CopyProductStatus = "SUCCEEDED" + CopyProductStatusInProgress CopyProductStatus = "IN_PROGRESS" + CopyProductStatusFailed CopyProductStatus = "FAILED" ) // Values returns all known values for CopyProductStatus. Note that this can be @@ -120,9 +120,9 @@ type OrganizationNodeType string // Enum values for OrganizationNodeType const ( - OrganizationNodeTypeOrganization OrganizationNodeType = "ORGANIZATION" - OrganizationNodeTypeOrganizational_unit OrganizationNodeType = "ORGANIZATIONAL_UNIT" - OrganizationNodeTypeAccount OrganizationNodeType = "ACCOUNT" + OrganizationNodeTypeOrganization OrganizationNodeType = "ORGANIZATION" + OrganizationNodeTypeOrganizationalUnit OrganizationNodeType = "ORGANIZATIONAL_UNIT" + OrganizationNodeTypeAccount OrganizationNodeType = "ACCOUNT" ) // Values returns all known values for OrganizationNodeType. Note that this can be @@ -140,9 +140,9 @@ type PortfolioShareType string // Enum values for PortfolioShareType const ( - PortfolioShareTypeImported PortfolioShareType = "IMPORTED" - PortfolioShareTypeAws_servicecatalog PortfolioShareType = "AWS_SERVICECATALOG" - PortfolioShareTypeAws_organizations PortfolioShareType = "AWS_ORGANIZATIONS" + PortfolioShareTypeImported PortfolioShareType = "IMPORTED" + PortfolioShareTypeAwsServicecatalog PortfolioShareType = "AWS_SERVICECATALOG" + PortfolioShareTypeAwsOrganizations PortfolioShareType = "AWS_ORGANIZATIONS" ) // Values returns all known values for PortfolioShareType. Note that this can be @@ -192,8 +192,8 @@ type ProductType string // Enum values for ProductType const ( - ProductTypeCloud_formation_template ProductType = "CLOUD_FORMATION_TEMPLATE" - ProductTypeMarketplace ProductType = "MARKETPLACE" + ProductTypeCloudFormationTemplate ProductType = "CLOUD_FORMATION_TEMPLATE" + ProductTypeMarketplace ProductType = "MARKETPLACE" ) // Values returns all known values for ProductType. Note that this can be expanded @@ -270,12 +270,12 @@ type ProvisionedProductPlanStatus string // Enum values for ProvisionedProductPlanStatus const ( - ProvisionedProductPlanStatusCreate_in_progress ProvisionedProductPlanStatus = "CREATE_IN_PROGRESS" - ProvisionedProductPlanStatusCreate_success ProvisionedProductPlanStatus = "CREATE_SUCCESS" - ProvisionedProductPlanStatusCreate_failed ProvisionedProductPlanStatus = "CREATE_FAILED" - ProvisionedProductPlanStatusExecute_in_progress ProvisionedProductPlanStatus = "EXECUTE_IN_PROGRESS" - ProvisionedProductPlanStatusExecute_success ProvisionedProductPlanStatus = "EXECUTE_SUCCESS" - ProvisionedProductPlanStatusExecute_failed ProvisionedProductPlanStatus = "EXECUTE_FAILED" + ProvisionedProductPlanStatusCreateInProgress ProvisionedProductPlanStatus = "CREATE_IN_PROGRESS" + ProvisionedProductPlanStatusCreateSuccess ProvisionedProductPlanStatus = "CREATE_SUCCESS" + ProvisionedProductPlanStatusCreateFailed ProvisionedProductPlanStatus = "CREATE_FAILED" + ProvisionedProductPlanStatusExecuteInProgress ProvisionedProductPlanStatus = "EXECUTE_IN_PROGRESS" + ProvisionedProductPlanStatusExecuteSuccess ProvisionedProductPlanStatus = "EXECUTE_SUCCESS" + ProvisionedProductPlanStatusExecuteFailed ProvisionedProductPlanStatus = "EXECUTE_FAILED" ) // Values returns all known values for ProvisionedProductPlanStatus. Note that this @@ -312,11 +312,11 @@ type ProvisionedProductStatus string // Enum values for ProvisionedProductStatus const ( - ProvisionedProductStatusAvailable ProvisionedProductStatus = "AVAILABLE" - ProvisionedProductStatusUnder_change ProvisionedProductStatus = "UNDER_CHANGE" - ProvisionedProductStatusTainted ProvisionedProductStatus = "TAINTED" - ProvisionedProductStatusError ProvisionedProductStatus = "ERROR" - ProvisionedProductStatusPlan_in_progress ProvisionedProductStatus = "PLAN_IN_PROGRESS" + ProvisionedProductStatusAvailable ProvisionedProductStatus = "AVAILABLE" + ProvisionedProductStatusUnderChange ProvisionedProductStatus = "UNDER_CHANGE" + ProvisionedProductStatusTainted ProvisionedProductStatus = "TAINTED" + ProvisionedProductStatusError ProvisionedProductStatus = "ERROR" + ProvisionedProductStatusPlanInProgress ProvisionedProductStatus = "PLAN_IN_PROGRESS" ) // Values returns all known values for ProvisionedProductStatus. Note that this can @@ -388,9 +388,9 @@ type ProvisioningArtifactType string // Enum values for ProvisioningArtifactType const ( - ProvisioningArtifactTypeCloud_formation_template ProvisioningArtifactType = "CLOUD_FORMATION_TEMPLATE" - ProvisioningArtifactTypeMarketplace_ami ProvisioningArtifactType = "MARKETPLACE_AMI" - ProvisioningArtifactTypeMarketplace_car ProvisioningArtifactType = "MARKETPLACE_CAR" + ProvisioningArtifactTypeCloudFormationTemplate ProvisioningArtifactType = "CLOUD_FORMATION_TEMPLATE" + ProvisioningArtifactTypeMarketplaceAmi ProvisioningArtifactType = "MARKETPLACE_AMI" + ProvisioningArtifactTypeMarketplaceCar ProvisioningArtifactType = "MARKETPLACE_CAR" ) // Values returns all known values for ProvisioningArtifactType. Note that this can @@ -408,11 +408,11 @@ type RecordStatus string // Enum values for RecordStatus const ( - RecordStatusCreated RecordStatus = "CREATED" - RecordStatusIn_progress RecordStatus = "IN_PROGRESS" - RecordStatusIn_progress_in_error RecordStatus = "IN_PROGRESS_IN_ERROR" - RecordStatusSucceeded RecordStatus = "SUCCEEDED" - RecordStatusFailed RecordStatus = "FAILED" + RecordStatusCreated RecordStatus = "CREATED" + RecordStatusInProgress RecordStatus = "IN_PROGRESS" + RecordStatusInProgressInError RecordStatus = "IN_PROGRESS_IN_ERROR" + RecordStatusSucceeded RecordStatus = "SUCCEEDED" + RecordStatusFailed RecordStatus = "FAILED" ) // Values returns all known values for RecordStatus. Note that this can be expanded @@ -561,11 +561,11 @@ type ShareStatus string // Enum values for ShareStatus const ( - ShareStatusNot_started ShareStatus = "NOT_STARTED" - ShareStatusIn_progress ShareStatus = "IN_PROGRESS" - ShareStatusCompleted ShareStatus = "COMPLETED" - ShareStatusCompleted_with_errors ShareStatus = "COMPLETED_WITH_ERRORS" - ShareStatusError ShareStatus = "ERROR" + ShareStatusNotStarted ShareStatus = "NOT_STARTED" + ShareStatusInProgress ShareStatus = "IN_PROGRESS" + ShareStatusCompleted ShareStatus = "COMPLETED" + ShareStatusCompletedWithErrors ShareStatus = "COMPLETED_WITH_ERRORS" + ShareStatusError ShareStatus = "ERROR" ) // Values returns all known values for ShareStatus. Note that this can be expanded diff --git a/service/servicecatalog/types/types.go b/service/servicecatalog/types/types.go index 49c9138f38a..7545b08d066 100644 --- a/service/servicecatalog/types/types.go +++ b/service/servicecatalog/types/types.go @@ -11,13 +11,13 @@ type AccessLevelFilter struct { // The access level. // - // * Account - Filter results based on the account. + // * Account - Filter results based on the account. // - // * - // Role - Filter results based on the federated role of the specified user. + // * Role - + // Filter results based on the federated role of the specified user. // - // * - // User - Filter results based on the specified user. + // * User - + // Filter results based on the specified user. Key AccessLevelFilterKey // The user to which the access level applies. The only supported value is Self. @@ -60,14 +60,13 @@ type ConstraintDetail struct { // The type of constraint. // - // * LAUNCH + // * LAUNCH // - // * NOTIFICATION + // * NOTIFICATION // - // * STACKSET + // * STACKSET // - // * - // TEMPLATE + // * TEMPLATE Type *string } @@ -79,14 +78,13 @@ type ConstraintSummary struct { // The type of constraint. // - // * LAUNCH + // * LAUNCH // - // * NOTIFICATION + // * NOTIFICATION // - // * STACKSET + // * STACKSET // - // * - // TEMPLATE + // * TEMPLATE Type *string } @@ -155,10 +153,10 @@ type ListRecordHistorySearchFilter struct { // The filter key. // - // * product - Filter results based on the specified product + // * product - Filter results based on the specified product // identifier. // - // * provisionedproduct - Filter results based on the provisioned + // * provisionedproduct - Filter results based on the provisioned // product identifier. Key *string @@ -253,13 +251,13 @@ type ProductViewDetail struct { // The status of the product. // - // * AVAILABLE - The product is ready for use. + // * AVAILABLE - The product is ready for use. // + // * + // CREATING - Product creation has started; the product is not ready for use. // - // * CREATING - Product creation has started; the product is not ready for use. - // - // - // * FAILED - An action failed. + // * + // FAILED - An action failed. Status Status } @@ -326,15 +324,14 @@ type ProvisionedProductAttribute struct { // The record identifier of the last request performed on this provisioned product // of the following types: // - // * ProvisionedProduct + // * ProvisionedProduct // - // * - // UpdateProvisionedProduct + // * UpdateProvisionedProduct // - // * ExecuteProvisionedProductPlan + // * + // ExecuteProvisionedProductPlan // - // * - // TerminateProvisionedProduct + // * TerminateProvisionedProduct LastProvisioningRecordId *string // The record identifier of the last request performed on this provisioned product. @@ -343,14 +340,14 @@ type ProvisionedProductAttribute struct { // The record identifier of the last successful request performed on this // provisioned product of the following types: // - // * ProvisionedProduct + // * ProvisionedProduct // - // * + // * // UpdateProvisionedProduct // - // * ExecuteProvisionedProductPlan + // * ExecuteProvisionedProductPlan // - // * + // * // TerminateProvisionedProduct LastSuccessfulProvisioningRecordId *string @@ -375,28 +372,28 @@ type ProvisionedProductAttribute struct { // The current status of the provisioned product. // - // * AVAILABLE - Stable state, + // * AVAILABLE - Stable state, // ready to perform any operation. The most recent operation succeeded and // completed. // - // * UNDER_CHANGE - Transitive state. Operations performed might - // not have valid results. Wait for an AVAILABLE status before performing + // * UNDER_CHANGE - Transitive state. Operations performed might not + // have valid results. Wait for an AVAILABLE status before performing // operations. // - // * TAINTED - Stable state, ready to perform any operation. The - // stack has completed the requested operation but is not exactly what was - // requested. For example, a request to update to a new version failed and the - // stack rolled back to the current version. + // * TAINTED - Stable state, ready to perform any operation. The stack + // has completed the requested operation but is not exactly what was requested. For + // example, a request to update to a new version failed and the stack rolled back + // to the current version. // - // * ERROR - An unexpected error - // occurred. The provisioned product exists but the stack is not running. For - // example, CloudFormation received a parameter value that was not valid and could - // not launch the stack. + // * ERROR - An unexpected error occurred. The provisioned + // product exists but the stack is not running. For example, CloudFormation + // received a parameter value that was not valid and could not launch the stack. // - // * PLAN_IN_PROGRESS - Transitive state. The plan - // operations were performed to provision a new product, but resources have not yet - // been created. After reviewing the list of resources to be created, execute the - // plan. Wait for an AVAILABLE status before performing operations. + // * + // PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to + // provision a new product, but resources have not yet been created. After + // reviewing the list of resources to be created, execute the plan. Wait for an + // AVAILABLE status before performing operations. Status ProvisionedProductStatus // The current status message of the provisioned product. @@ -436,15 +433,14 @@ type ProvisionedProductDetail struct { // The record identifier of the last request performed on this provisioned product // of the following types: // - // * ProvisionedProduct + // * ProvisionedProduct // - // * - // UpdateProvisionedProduct + // * UpdateProvisionedProduct // - // * ExecuteProvisionedProductPlan + // * + // ExecuteProvisionedProductPlan // - // * - // TerminateProvisionedProduct + // * TerminateProvisionedProduct LastProvisioningRecordId *string // The record identifier of the last request performed on this provisioned product. @@ -453,14 +449,14 @@ type ProvisionedProductDetail struct { // The record identifier of the last successful request performed on this // provisioned product of the following types: // - // * ProvisionedProduct + // * ProvisionedProduct // - // * + // * // UpdateProvisionedProduct // - // * ExecuteProvisionedProductPlan + // * ExecuteProvisionedProductPlan // - // * + // * // TerminateProvisionedProduct LastSuccessfulProvisioningRecordId *string @@ -478,28 +474,28 @@ type ProvisionedProductDetail struct { // The current status of the provisioned product. // - // * AVAILABLE - Stable state, + // * AVAILABLE - Stable state, // ready to perform any operation. The most recent operation succeeded and // completed. // - // * UNDER_CHANGE - Transitive state. Operations performed might - // not have valid results. Wait for an AVAILABLE status before performing + // * UNDER_CHANGE - Transitive state. Operations performed might not + // have valid results. Wait for an AVAILABLE status before performing // operations. // - // * TAINTED - Stable state, ready to perform any operation. The - // stack has completed the requested operation but is not exactly what was - // requested. For example, a request to update to a new version failed and the - // stack rolled back to the current version. + // * TAINTED - Stable state, ready to perform any operation. The stack + // has completed the requested operation but is not exactly what was requested. For + // example, a request to update to a new version failed and the stack rolled back + // to the current version. // - // * ERROR - An unexpected error - // occurred. The provisioned product exists but the stack is not running. For - // example, CloudFormation received a parameter value that was not valid and could - // not launch the stack. + // * ERROR - An unexpected error occurred. The provisioned + // product exists but the stack is not running. For example, CloudFormation + // received a parameter value that was not valid and could not launch the stack. // - // * PLAN_IN_PROGRESS - Transitive state. The plan - // operations were performed to provision a new product, but resources have not yet - // been created. After reviewing the list of resources to be created, execute the - // plan. Wait for an AVAILABLE status before performing operations. + // * + // PLAN_IN_PROGRESS - Transitive state. The plan operations were performed to + // provision a new product, but resources have not yet been created. After + // reviewing the list of resources to be created, execute the plan. Wait for an + // AVAILABLE status before performing operations. Status ProvisionedProductStatus // The current status message of the provisioned product. @@ -631,12 +627,12 @@ type ProvisioningArtifactDetail struct { // The type of provisioning artifact. // - // * CLOUD_FORMATION_TEMPLATE - AWS + // * CLOUD_FORMATION_TEMPLATE - AWS // CloudFormation template // - // * MARKETPLACE_AMI - AWS Marketplace AMI + // * MARKETPLACE_AMI - AWS Marketplace AMI // - // * + // * // MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources Type ProvisioningArtifactType } @@ -721,12 +717,12 @@ type ProvisioningArtifactProperties struct { // The type of provisioning artifact. // - // * CLOUD_FORMATION_TEMPLATE - AWS + // * CLOUD_FORMATION_TEMPLATE - AWS // CloudFormation template // - // * MARKETPLACE_AMI - AWS Marketplace AMI + // * MARKETPLACE_AMI - AWS Marketplace AMI // - // * + // * // MARKETPLACE_CAR - AWS Marketplace Clusters and AWS Resources Type ProvisioningArtifactType } @@ -876,31 +872,31 @@ type RecordDetail struct { // The record type. // - // * PROVISION_PRODUCT + // * PROVISION_PRODUCT // - // * UPDATE_PROVISIONED_PRODUCT + // * UPDATE_PROVISIONED_PRODUCT // - // - // * TERMINATE_PROVISIONED_PRODUCT + // * + // TERMINATE_PROVISIONED_PRODUCT RecordType *string // The status of the provisioned product. // - // * CREATED - The request was created - // but the operation has not started. + // * CREATED - The request was created but + // the operation has not started. // - // * IN_PROGRESS - The requested operation - // is in progress. + // * IN_PROGRESS - The requested operation is in + // progress. // - // * IN_PROGRESS_IN_ERROR - The provisioned product is under - // change but the requested operation failed and some remediation is occurring. For - // example, a rollback. + // * IN_PROGRESS_IN_ERROR - The provisioned product is under change but + // the requested operation failed and some remediation is occurring. For example, a + // rollback. // - // * SUCCEEDED - The requested operation has successfully - // completed. + // * SUCCEEDED - The requested operation has successfully completed. // - // * FAILED - The requested operation has unsuccessfully completed. - // Investigate using the error messages returned. + // * + // FAILED - The requested operation has unsuccessfully completed. Investigate using + // the error messages returned. Status RecordStatus // The time when the record was last updated. @@ -1104,20 +1100,20 @@ type StackInstance struct { // The status of the stack instance, in terms of its synchronization with its // associated stack set. // - // * INOPERABLE: A DeleteStackInstances operation has - // failed and left the stack in an unstable state. Stacks in this state are - // excluded from further UpdateStackSet operations. You might need to perform a + // * INOPERABLE: A DeleteStackInstances operation has failed + // and left the stack in an unstable state. Stacks in this state are excluded from + // further UpdateStackSet operations. You might need to perform a // DeleteStackInstances operation, with RetainStacks set to true, to delete the // stack instance, and then delete the stack manually. // - // * OUTDATED: The stack - // isn't currently up to date with the stack set because either the associated - // stack failed during a CreateStackSet or UpdateStackSet operation, or the stack - // was part of a CreateStackSet or UpdateStackSet operation that failed or was - // stopped before the stack was created or updated. + // * OUTDATED: The stack isn't + // currently up to date with the stack set because either the associated stack + // failed during a CreateStackSet or UpdateStackSet operation, or the stack was + // part of a CreateStackSet or UpdateStackSet operation that failed or was stopped + // before the stack was created or updated. // - // * CURRENT: The stack is - // currently up to date with the stack set. + // * CURRENT: The stack is currently up + // to date with the stack set. StackInstanceStatus StackInstanceStatus } diff --git a/service/servicediscovery/api_op_CreateService.go b/service/servicediscovery/api_op_CreateService.go index 24d2361fcdc..2f30641f071 100644 --- a/service/servicediscovery/api_op_CreateService.go +++ b/service/servicediscovery/api_op_CreateService.go @@ -12,27 +12,27 @@ import ( smithyhttp "github.com/awslabs/smithy-go/transport/http" ) -// Creates a service, which defines the configuration for the following entities: +// Creates a service, which defines the configuration for the following +// entities: // +// * For public and private DNS namespaces, one of the following +// combinations of DNS records in Amazon Route 53: // -// * For public and private DNS namespaces, one of the following combinations of -// DNS records in Amazon Route 53: +// * A // -// * A +// * AAAA // -// * AAAA +// * A and AAAA // -// * A and -// AAAA +// * +// SRV // -// * SRV +// * CNAME // -// * CNAME +// * Optionally, a health check // -// * Optionally, a health check -// -// After -// you create the service, you can submit a RegisterInstance +// After you create the service, you +// can submit a RegisterInstance // (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) // request, and AWS Cloud Map uses the values in the configuration to create the // specified entities. For the current quota on the number of instances that you @@ -62,14 +62,14 @@ type CreateServiceInput struct { // that requires a specific SRV format, such as HAProxy (http://www.haproxy.org/), // specify the following for Name: // - // * Start the name with an underscore (_), - // such as _exampleservice + // * Start the name with an underscore (_), such + // as _exampleservice // - // * End the name with ._protocol, such as ._tcp + // * End the name with ._protocol, such as ._tcp // - // When - // you register an instance, AWS Cloud Map creates an SRV record and assigns a name - // to the record by concatenating the service name and the namespace name, for + // When you + // register an instance, AWS Cloud Map creates an SRV record and assigns a name to + // the record by concatenating the service name and the namespace name, for // example: _exampleservice._tcp.example.com // // This member is required. diff --git a/service/servicediscovery/api_op_RegisterInstance.go b/service/servicediscovery/api_op_RegisterInstance.go index 865ccd71a97..4be1507a3a8 100644 --- a/service/servicediscovery/api_op_RegisterInstance.go +++ b/service/servicediscovery/api_op_RegisterInstance.go @@ -15,15 +15,15 @@ import ( // based on the settings in a specified service. When you submit a RegisterInstance // request, the following occurs: // -// * For each DNS record that you define in the +// * For each DNS record that you define in the // service that is specified by ServiceId, a record is created or updated in the // hosted zone that is associated with the corresponding namespace. // -// * If the +// * If the // service includes HealthCheckConfig, a health check is created based on the // settings in the health check configuration. // -// * The health check, if any, is +// * The health check, if any, is // associated with each of the new or updated records. // // One RegisterInstance @@ -33,20 +33,19 @@ import ( // AWS Cloud Map receives a DNS query for the specified DNS name, it returns the // applicable value: // -// * If the health check is healthy: returns all the -// records +// * If the health check is healthy: returns all the records // -// * If the health check is unhealthy: returns the applicable value -// for the last healthy instance +// * +// If the health check is unhealthy: returns the applicable value for the last +// healthy instance // -// * If you didn't specify a health check -// configuration: returns all the records +// * If you didn't specify a health check configuration: returns +// all the records // -// For the current quota on the number of -// instances that you can register using the same namespace and using the same -// service, see AWS Cloud Map Limits -// (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) in the -// AWS Cloud Map Developer Guide. +// For the current quota on the number of instances that you can +// register using the same namespace and using the same service, see AWS Cloud Map +// Limits (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) +// in the AWS Cloud Map Developer Guide. func (c *Client) RegisterInstance(ctx context.Context, params *RegisterInstanceInput, optFns ...func(*Options)) (*RegisterInstanceOutput, error) { if params == nil { params = &RegisterInstanceInput{} @@ -67,73 +66,72 @@ type RegisterInstanceInput struct { // A string map that contains the following information for the service that you // specify in ServiceId: // - // * The attributes that apply to the records that are + // * The attributes that apply to the records that are // defined in the service. // - // * For each attribute, the applicable - // value. + // * For each attribute, the applicable value. // - // Supported attribute keys include the following: AWS_ALIAS_DNS_NAME If - // you want AWS Cloud Map to create an Amazon Route 53 alias record that routes - // traffic to an Elastic Load Balancing load balancer, specify the DNS name that is - // associated with the load balancer. For information about how to get the DNS - // name, see "DNSName" in the topic AliasTarget + // Supported + // attribute keys include the following: AWS_ALIAS_DNS_NAME If you want AWS Cloud + // Map to create an Amazon Route 53 alias record that routes traffic to an Elastic + // Load Balancing load balancer, specify the DNS name that is associated with the + // load balancer. For information about how to get the DNS name, see "DNSName" in + // the topic AliasTarget // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html) // in the Route 53 API Reference. Note the following: // - // * The configuration for - // the service that is specified by ServiceId must include settings for an A - // record, an AAAA record, or both. - // - // * In the service that is specified by - // ServiceId, the value of RoutingPolicy must be WEIGHTED. - // - // * If the service - // that is specified by ServiceId includes HealthCheckConfig settings, AWS Cloud - // Map will create the Route 53 health check, but it won't associate the health - // check with the alias record. - // - // * Auto naming currently doesn't support - // creating alias records that route traffic to AWS resources other than Elastic - // Load Balancing load balancers. - // - // * If you specify a value for - // AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE - // attributes. - // - // AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance - // ID for the instance. If the AWS_EC2_INSTANCE_ID attribute is specified, then the - // only other attribute that can be specified is AWS_INIT_HEALTH_STATUS. When the - // AWS_EC2_INSTANCE_ID attribute is specified, then the AWS_INSTANCE_IPV4 attribute - // will be filled out with the primary private IPv4 address. AWS_INIT_HEALTH_STATUS - // If the service configuration includes HealthCheckCustomConfig, you can - // optionally use AWS_INIT_HEALTH_STATUS to specify the initial status of the - // custom health check, HEALTHY or UNHEALTHY. If you don't specify a value for - // AWS_INIT_HEALTH_STATUS, the initial status is HEALTHY. AWS_INSTANCE_CNAME If the - // service configuration includes a CNAME record, the domain name that you want - // Route 53 to return in response to DNS queries, for example, example.com. This - // value is required if the service specified by ServiceId includes settings for an - // CNAME record. AWS_INSTANCE_IPV4 If the service configuration includes an A - // record, the IPv4 address that you want Route 53 to return in response to DNS - // queries, for example, 192.0.2.44. This value is required if the service - // specified by ServiceId includes settings for an A record. If the service - // includes settings for an SRV record, you must specify a value for - // AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_IPV6 If the service - // configuration includes an AAAA record, the IPv6 address that you want Route 53 - // to return in response to DNS queries, for example, - // 2001:0db8:85a3:0000:0000:abcd:0001:2345. This value is required if the service - // specified by ServiceId includes settings for an AAAA record. If the service - // includes settings for an SRV record, you must specify a value for - // AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_PORT If the service - // includes an SRV record, the value that you want Route 53 to return for the port. - // If the service includes HealthCheckConfig, the port on the endpoint that you - // want Route 53 to send requests to. This value is required if you specified - // settings for an SRV record or a Route 53 health check when you created the - // service. Custom attributes You can add up to 30 custom attributes. For each - // key-value pair, the maximum length of the attribute name is 255 characters, and - // the maximum length of the attribute value is 1,024 characters. The total size of - // all provided attributes (sum of all keys and values) must not exceed 5,000 - // characters. + // * The configuration for the + // service that is specified by ServiceId must include settings for an A record, an + // AAAA record, or both. + // + // * In the service that is specified by ServiceId, the + // value of RoutingPolicy must be WEIGHTED. + // + // * If the service that is specified by + // ServiceId includes HealthCheckConfig settings, AWS Cloud Map will create the + // Route 53 health check, but it won't associate the health check with the alias + // record. + // + // * Auto naming currently doesn't support creating alias records that + // route traffic to AWS resources other than Elastic Load Balancing load + // balancers. + // + // * If you specify a value for AWS_ALIAS_DNS_NAME, don't specify + // values for any of the AWS_INSTANCE attributes. + // + // AWS_EC2_INSTANCE_ID HTTP + // namespaces only. The Amazon EC2 instance ID for the instance. If the + // AWS_EC2_INSTANCE_ID attribute is specified, then the only other attribute that + // can be specified is AWS_INIT_HEALTH_STATUS. When the AWS_EC2_INSTANCE_ID + // attribute is specified, then the AWS_INSTANCE_IPV4 attribute will be filled out + // with the primary private IPv4 address. AWS_INIT_HEALTH_STATUS If the service + // configuration includes HealthCheckCustomConfig, you can optionally use + // AWS_INIT_HEALTH_STATUS to specify the initial status of the custom health check, + // HEALTHY or UNHEALTHY. If you don't specify a value for AWS_INIT_HEALTH_STATUS, + // the initial status is HEALTHY. AWS_INSTANCE_CNAME If the service configuration + // includes a CNAME record, the domain name that you want Route 53 to return in + // response to DNS queries, for example, example.com. This value is required if the + // service specified by ServiceId includes settings for an CNAME record. + // AWS_INSTANCE_IPV4 If the service configuration includes an A record, the IPv4 + // address that you want Route 53 to return in response to DNS queries, for + // example, 192.0.2.44. This value is required if the service specified by + // ServiceId includes settings for an A record. If the service includes settings + // for an SRV record, you must specify a value for AWS_INSTANCE_IPV4, + // AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_IPV6 If the service configuration + // includes an AAAA record, the IPv6 address that you want Route 53 to return in + // response to DNS queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. + // This value is required if the service specified by ServiceId includes settings + // for an AAAA record. If the service includes settings for an SRV record, you must + // specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. + // AWS_INSTANCE_PORT If the service includes an SRV record, the value that you want + // Route 53 to return for the port. If the service includes HealthCheckConfig, the + // port on the endpoint that you want Route 53 to send requests to. This value is + // required if you specified settings for an SRV record or a Route 53 health check + // when you created the service. Custom attributes You can add up to 30 custom + // attributes. For each key-value pair, the maximum length of the attribute name is + // 255 characters, and the maximum length of the attribute value is 1,024 + // characters. The total size of all provided attributes (sum of all keys and + // values) must not exceed 5,000 characters. // // This member is required. Attributes map[string]*string @@ -141,24 +139,23 @@ type RegisterInstanceInput struct { // An identifier that you want to associate with the instance. Note the // following: // - // * If the service that is specified by ServiceId includes - // settings for an SRV record, the value of InstanceId is automatically included as - // part of the value for the SRV record. For more information, see DnsRecord > Type + // * If the service that is specified by ServiceId includes settings + // for an SRV record, the value of InstanceId is automatically included as part of + // the value for the SRV record. For more information, see DnsRecord > Type // (https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type). // + // * + // You can use this value to update an existing instance. // - // * You can use this value to update an existing instance. - // - // * To register a - // new instance, you must specify a value that is unique among instances that you + // * To register a new + // instance, you must specify a value that is unique among instances that you // register by using the same service. // - // * If you specify an existing InstanceId - // and ServiceId, AWS Cloud Map updates the existing DNS records, if any. If - // there's also an existing health check, AWS Cloud Map deletes the old health - // check and creates a new one. The health check isn't deleted immediately, so it - // will still appear for a while if you submit a ListHealthChecks request, for - // example. + // * If you specify an existing InstanceId and + // ServiceId, AWS Cloud Map updates the existing DNS records, if any. If there's + // also an existing health check, AWS Cloud Map deletes the old health check and + // creates a new one. The health check isn't deleted immediately, so it will still + // appear for a while if you submit a ListHealthChecks request, for example. // // This member is required. InstanceId *string diff --git a/service/servicediscovery/api_op_UpdateService.go b/service/servicediscovery/api_op_UpdateService.go index 9c1eee15cf9..f4c60dfd33b 100644 --- a/service/servicediscovery/api_op_UpdateService.go +++ b/service/servicediscovery/api_op_UpdateService.go @@ -13,27 +13,27 @@ import ( // Submits a request to perform the following operations: // -// * Update the TTL -// setting for existing DnsRecords configurations +// * Update the TTL setting +// for existing DnsRecords configurations // -// * Add, update, or delete +// * Add, update, or delete // HealthCheckConfig for a specified service You can't add, update, or delete a // HealthCheckCustomConfig configuration. // // For public and private DNS namespaces, // note the following: // -// * If you omit any existing DnsRecords or -// HealthCheckConfig configurations from an UpdateService request, the -// configurations are deleted from the service. +// * If you omit any existing DnsRecords or HealthCheckConfig +// configurations from an UpdateService request, the configurations are deleted +// from the service. // -// * If you omit an existing -// HealthCheckCustomConfig configuration from an UpdateService request, the -// configuration is not deleted from the service. +// * If you omit an existing HealthCheckCustomConfig +// configuration from an UpdateService request, the configuration is not deleted +// from the service. // -// When you update settings for a -// service, AWS Cloud Map also updates the corresponding settings in all the -// records and health checks that were created by using the specified service. +// When you update settings for a service, AWS Cloud Map also +// updates the corresponding settings in all the records and health checks that +// were created by using the specified service. func (c *Client) UpdateService(ctx context.Context, params *UpdateServiceInput, optFns ...func(*Options)) (*UpdateServiceOutput, error) { if params == nil { params = &UpdateServiceInput{} diff --git a/service/servicediscovery/types/enums.go b/service/servicediscovery/types/enums.go index b1a41adc53f..4cc090aeb87 100644 --- a/service/servicediscovery/types/enums.go +++ b/service/servicediscovery/types/enums.go @@ -120,9 +120,9 @@ type NamespaceType string // Enum values for NamespaceType const ( - NamespaceTypeDns_public NamespaceType = "DNS_PUBLIC" - NamespaceTypeDns_private NamespaceType = "DNS_PRIVATE" - NamespaceTypeHttp NamespaceType = "HTTP" + NamespaceTypeDnsPublic NamespaceType = "DNS_PUBLIC" + NamespaceTypeDnsPrivate NamespaceType = "DNS_PRIVATE" + NamespaceTypeHttp NamespaceType = "HTTP" ) // Values returns all known values for NamespaceType. Note that this can be @@ -140,11 +140,11 @@ type OperationFilterName string // Enum values for OperationFilterName const ( - OperationFilterNameNamespace_id OperationFilterName = "NAMESPACE_ID" - OperationFilterNameService_id OperationFilterName = "SERVICE_ID" - OperationFilterNameStatus OperationFilterName = "STATUS" - OperationFilterNameType OperationFilterName = "TYPE" - OperationFilterNameUpdate_date OperationFilterName = "UPDATE_DATE" + OperationFilterNameNamespaceId OperationFilterName = "NAMESPACE_ID" + OperationFilterNameServiceId OperationFilterName = "SERVICE_ID" + OperationFilterNameStatus OperationFilterName = "STATUS" + OperationFilterNameType OperationFilterName = "TYPE" + OperationFilterNameUpdateDate OperationFilterName = "UPDATE_DATE" ) // Values returns all known values for OperationFilterName. Note that this can be @@ -206,11 +206,11 @@ type OperationType string // Enum values for OperationType const ( - OperationTypeCreate_namespace OperationType = "CREATE_NAMESPACE" - OperationTypeDelete_namespace OperationType = "DELETE_NAMESPACE" - OperationTypeUpdate_service OperationType = "UPDATE_SERVICE" - OperationTypeRegister_instance OperationType = "REGISTER_INSTANCE" - OperationTypeDeregister_instance OperationType = "DEREGISTER_INSTANCE" + OperationTypeCreateNamespace OperationType = "CREATE_NAMESPACE" + OperationTypeDeleteNamespace OperationType = "DELETE_NAMESPACE" + OperationTypeUpdateService OperationType = "UPDATE_SERVICE" + OperationTypeRegisterInstance OperationType = "REGISTER_INSTANCE" + OperationTypeDeregisterInstance OperationType = "DEREGISTER_INSTANCE" ) // Values returns all known values for OperationType. Note that this can be @@ -270,7 +270,7 @@ type ServiceFilterName string // Enum values for ServiceFilterName const ( - ServiceFilterNameNamespace_id ServiceFilterName = "NAMESPACE_ID" + ServiceFilterNameNamespaceId ServiceFilterName = "NAMESPACE_ID" ) // Values returns all known values for ServiceFilterName. Note that this can be diff --git a/service/servicediscovery/types/types.go b/service/servicediscovery/types/types.go index 23cdfdfa908..98afb010f51 100644 --- a/service/servicediscovery/types/types.go +++ b/service/servicediscovery/types/types.go @@ -91,21 +91,21 @@ type DnsRecord struct { // returns in response to DNS queries. You can specify values for Type in the // following combinations: // - // * A + // * A // - // * AAAA + // * AAAA // - // * A and AAAA + // * A and AAAA // - // * SRV + // * SRV // - // * - // CNAME + // * CNAME // - // If you want AWS Cloud Map to create a Route 53 alias record when you - // register an instance, specify A or AAAA for Type. You specify other settings, - // such as the IP address for A and AAAA records, when you register an instance. - // For more information, see RegisterInstance + // If you want + // AWS Cloud Map to create a Route 53 alias record when you register an instance, + // specify A or AAAA for Type. You specify other settings, such as the IP address + // for A and AAAA records, when you register an instance. For more information, see + // RegisterInstance // (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html). // The following values are supported: A Route 53 returns the IP address of the // resource in IPv4 format, such as 192.0.2.44. AAAA Route 53 returns the IP @@ -113,59 +113,58 @@ type DnsRecord struct { // 2001:0db8:85a3:0000:0000:abcd:0001:2345. CNAME Route 53 returns the domain name // of the resource, such as www.example.com. Note the following: // - // * You specify - // the domain name that you want to route traffic to when you register an instance. - // For more information, see Attributes + // * You specify the + // domain name that you want to route traffic to when you register an instance. For + // more information, see Attributes // (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html#cloudmap-RegisterInstance-request-Attributes) // in the topic RegisterInstance // (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html). // + // * + // You must specify WEIGHTED for the value of RoutingPolicy. // - // * You must specify WEIGHTED for the value of RoutingPolicy. + // * You can't specify + // both CNAME for Type and settings for HealthCheckConfig. If you do, the request + // will fail with an InvalidInput error. // - // * You can't - // specify both CNAME for Type and settings for HealthCheckConfig. If you do, the - // request will fail with an InvalidInput error. + // SRV Route 53 returns the value for an SRV + // record. The value for an SRV record uses the following values: priority weight + // port service-hostname Note the following about the values: // - // SRV Route 53 returns the value - // for an SRV record. The value for an SRV record uses the following values: - // priority weight port service-hostname Note the following about the values: + // * The values of + // priority and weight are both set to 1 and can't be changed. // - // - // * The values of priority and weight are both set to 1 and can't be changed. - // - // - // * The value of port comes from the value that you specify for the - // AWS_INSTANCE_PORT attribute when you submit a RegisterInstance + // * The value of port + // comes from the value that you specify for the AWS_INSTANCE_PORT attribute when + // you submit a RegisterInstance // (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) // request. // - // * The value of service-hostname is a concatenation of the - // following values: + // * The value of service-hostname is a concatenation of the following + // values: // - // * The value that you specify for InstanceId when you - // register an instance. + // * The value that you specify for InstanceId when you register an + // instance. // - // * The name of the service. + // * The name of the service. // - // * The name of - // the namespace. + // * The name of the namespace. // - // For example, if the value of InstanceId is test, the name of - // the service is backend, and the name of the namespace is example.com, the value - // of service-hostname is: test.backend.example.com + // For + // example, if the value of InstanceId is test, the name of the service is backend, + // and the name of the namespace is example.com, the value of service-hostname is: + // test.backend.example.com // - // If you specify settings for an - // SRV record, note the following: + // If you specify settings for an SRV record, note the + // following: // - // * If you specify values for - // AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both in the RegisterInstance request, - // AWS Cloud Map automatically creates A and/or AAAA records that have the same - // name as the value of service-hostname in the SRV record. You can ignore these - // records. + // * If you specify values for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or + // both in the RegisterInstance request, AWS Cloud Map automatically creates A + // and/or AAAA records that have the same name as the value of service-hostname in + // the SRV record. You can ignore these records. // - // * If you're using a system that requires a specific SRV format, - // such as HAProxy, see the Name + // * If you're using a system that + // requires a specific SRV format, such as HAProxy, see the Name // (https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html#cloudmap-CreateService-request-Name) // element in the documentation about CreateService for information about how to // specify the correct name format. @@ -201,14 +200,14 @@ type DnsRecord struct { // AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. // Note the following: // -// * Route 53 automatically sets EvaluateTargetHealth to -// true for alias records. When EvaluateTargetHealth is true, the alias record -// inherits the health of the referenced AWS resource. such as an ELB load -// balancer. For more information, see EvaluateTargetHealth +// * Route 53 automatically sets EvaluateTargetHealth to true +// for alias records. When EvaluateTargetHealth is true, the alias record inherits +// the health of the referenced AWS resource. such as an ELB load balancer. For +// more information, see EvaluateTargetHealth // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). // -// -// * If you include HealthCheckConfig and then use the service to register an +// * +// If you include HealthCheckConfig and then use the service to register an // instance that creates an alias record, Route 53 doesn't create the health // check. // @@ -222,21 +221,21 @@ type HealthCheckConfig struct { // after you create a health check. You can create the following types of health // checks: // - // * HTTP: Route 53 tries to establish a TCP connection. If - // successful, Route 53 submits an HTTP request and waits for an HTTP status code - // of 200 or greater and less than 400. + // * HTTP: Route 53 tries to establish a TCP connection. If successful, + // Route 53 submits an HTTP request and waits for an HTTP status code of 200 or + // greater and less than 400. // - // * HTTPS: Route 53 tries to establish a - // TCP connection. If successful, Route 53 submits an HTTPS request and waits for - // an HTTP status code of 200 or greater and less than 400. If you specify HTTPS - // for the value of Type, the endpoint must support TLS v1.0 or later. + // * HTTPS: Route 53 tries to establish a TCP + // connection. If successful, Route 53 submits an HTTPS request and waits for an + // HTTP status code of 200 or greater and less than 400. If you specify HTTPS for + // the value of Type, the endpoint must support TLS v1.0 or later. // - // * TCP: - // Route 53 tries to establish a TCP connection. If you specify TCP for Type, don't - // specify a value for ResourcePath. + // * TCP: Route 53 + // tries to establish a TCP connection. If you specify TCP for Type, don't specify + // a value for ResourcePath. // - // For more information, see How Route 53 - // Determines Whether an Endpoint Is Healthy + // For more information, see How Route 53 Determines + // Whether an Endpoint Is Healthy // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // in the Route 53 Developer Guide. // @@ -265,14 +264,14 @@ type HealthCheckConfig struct { // to evaluate the health of your resources, is useful in the following // circumstances: // -// * You can't use a health check that is defined by +// * You can't use a health check that is defined by // HealthCheckConfig because the resource isn't available over the internet. For // example, you can use a custom health check when the instance is in an Amazon // VPC. (To check the health of resources in a VPC, the health checker must also be // in the VPC.) // -// * You want to use a third-party health checker regardless of -// where your resources are. +// * You want to use a third-party health checker regardless of where +// your resources are. // // If you specify a health check configuration, you can // specify either HealthCheckCustomConfig or HealthCheckConfig but not both. To @@ -282,34 +281,33 @@ type HealthCheckConfig struct { // most recent UpdateInstanceCustomHealthStatus request. Here's how custom health // checks work: // -// * You create a service and specify a value for -// FailureThreshold. The failure threshold indicates the number of 30-second -// intervals you want AWS Cloud Map to wait between the time that your application -// sends an UpdateInstanceCustomHealthStatus +// * You create a service and specify a value for FailureThreshold. +// The failure threshold indicates the number of 30-second intervals you want AWS +// Cloud Map to wait between the time that your application sends an +// UpdateInstanceCustomHealthStatus // (https://docs.aws.amazon.com/cloud-map/latest/api/API_UpdateInstanceCustomHealthStatus.html) // request and the time that AWS Cloud Map stops routing internet traffic to the // corresponding resource. // -// * You register an instance. +// * You register an instance. // -// * You configure a +// * You configure a // third-party health checker to monitor the resource that is associated with the -// new instance. AWS Cloud Map doesn't check the health of the resource directly. -// +// new instance. AWS Cloud Map doesn't check the health of the resource +// directly. // -// * The third-party health-checker determines that the resource is unhealthy and -// notifies your application. +// * The third-party health-checker determines that the resource is +// unhealthy and notifies your application. // -// * Your application submits an +// * Your application submits an // UpdateInstanceCustomHealthStatus request. // -// * AWS Cloud Map waits for +// * AWS Cloud Map waits for // (FailureThreshold x 30) seconds. // -// * If another -// UpdateInstanceCustomHealthStatus request doesn't arrive during that time to -// change the status back to healthy, AWS Cloud Map stops routing traffic to the -// resource. +// * If another UpdateInstanceCustomHealthStatus +// request doesn't arrive during that time to change the status back to healthy, +// AWS Cloud Map stops routing traffic to the resource. type HealthCheckCustomConfig struct { // This parameter has been deprecated and is always set to 1. AWS Cloud Map waits @@ -362,23 +360,23 @@ type Instance struct { // An identifier that you want to associate with the instance. Note the // following: // - // * If the service that is specified by ServiceId includes - // settings for an SRV record, the value of InstanceId is automatically included as - // part of the value for the SRV record. For more information, see DnsRecord > Type + // * If the service that is specified by ServiceId includes settings + // for an SRV record, the value of InstanceId is automatically included as part of + // the value for the SRV record. For more information, see DnsRecord > Type // (https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type). // + // * + // You can use this value to update an existing instance. // - // * You can use this value to update an existing instance. - // - // * To register a - // new instance, you must specify a value that is unique among instances that you + // * To register a new + // instance, you must specify a value that is unique among instances that you // register by using the same service. // - // * If you specify an existing InstanceId - // and ServiceId, AWS Cloud Map updates the existing DNS records. If there's also - // an existing health check, AWS Cloud Map deletes the old health check and creates - // a new one. The health check isn't deleted immediately, so it will still appear - // for a while if you submit a ListHealthChecks request, for example. + // * If you specify an existing InstanceId and + // ServiceId, AWS Cloud Map updates the existing DNS records. If there's also an + // existing health check, AWS Cloud Map deletes the old health check and creates a + // new one. The health check isn't deleted immediately, so it will still appear for + // a while if you submit a ListHealthChecks request, for example. // // This member is required. Id *string @@ -386,55 +384,55 @@ type Instance struct { // A string map that contains the following information for the service that you // specify in ServiceId: // - // * The attributes that apply to the records that are + // * The attributes that apply to the records that are // defined in the service. // - // * For each attribute, the applicable - // value. + // * For each attribute, the applicable value. // - // Supported attribute keys include the following: AWS_ALIAS_DNS_NAME If - // you want AWS Cloud Map to create a Route 53 alias record that routes traffic to - // an Elastic Load Balancing load balancer, specify the DNS name that is associated - // with the load balancer. For information about how to get the DNS name, see - // "DNSName" in the topic AliasTarget + // Supported + // attribute keys include the following: AWS_ALIAS_DNS_NAME If you want AWS Cloud + // Map to create a Route 53 alias record that routes traffic to an Elastic Load + // Balancing load balancer, specify the DNS name that is associated with the load + // balancer. For information about how to get the DNS name, see "DNSName" in the + // topic AliasTarget // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html). // Note the following: // - // * The configuration for the service that is specified - // by ServiceId must include settings for an A record, an AAAA record, or both. + // * The configuration for the service that is specified by + // ServiceId must include settings for an A record, an AAAA record, or both. // + // * In + // the service that is specified by ServiceId, the value of RoutingPolicy must be + // WEIGHTED. // - // * In the service that is specified by ServiceId, the value of RoutingPolicy must - // be WEIGHTED. - // - // * If the service that is specified by ServiceId includes + // * If the service that is specified by ServiceId includes // HealthCheckConfig settings, AWS Cloud Map will create the health check, but it // won't associate the health check with the alias record. // - // * Auto naming - // currently doesn't support creating alias records that route traffic to AWS - // resources other than ELB load balancers. - // - // * If you specify a value for - // AWS_ALIAS_DNS_NAME, don't specify values for any of the AWS_INSTANCE - // attributes. - // - // AWS_EC2_INSTANCE_ID HTTP namespaces only. The Amazon EC2 instance - // ID for the instance. The AWS_INSTANCE_IPV4 attribute contains the primary - // private IPv4 address. AWS_INSTANCE_CNAME If the service configuration includes a - // CNAME record, the domain name that you want Route 53 to return in response to - // DNS queries, for example, example.com. This value is required if the service - // specified by ServiceId includes settings for an CNAME record. AWS_INSTANCE_IPV4 - // If the service configuration includes an A record, the IPv4 address that you - // want Route 53 to return in response to DNS queries, for example, 192.0.2.44. - // This value is required if the service specified by ServiceId includes settings - // for an A record. If the service includes settings for an SRV record, you must - // specify a value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. - // AWS_INSTANCE_IPV6 If the service configuration includes an AAAA record, the IPv6 - // address that you want Route 53 to return in response to DNS queries, for - // example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. This value is required if the - // service specified by ServiceId includes settings for an AAAA record. If the - // service includes settings for an SRV record, you must specify a value for + // * Auto naming currently + // doesn't support creating alias records that route traffic to AWS resources other + // than ELB load balancers. + // + // * If you specify a value for AWS_ALIAS_DNS_NAME, don't + // specify values for any of the AWS_INSTANCE attributes. + // + // AWS_EC2_INSTANCE_ID HTTP + // namespaces only. The Amazon EC2 instance ID for the instance. The + // AWS_INSTANCE_IPV4 attribute contains the primary private IPv4 address. + // AWS_INSTANCE_CNAME If the service configuration includes a CNAME record, the + // domain name that you want Route 53 to return in response to DNS queries, for + // example, example.com. This value is required if the service specified by + // ServiceId includes settings for an CNAME record. AWS_INSTANCE_IPV4 If the + // service configuration includes an A record, the IPv4 address that you want Route + // 53 to return in response to DNS queries, for example, 192.0.2.44. This value is + // required if the service specified by ServiceId includes settings for an A + // record. If the service includes settings for an SRV record, you must specify a + // value for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_IPV6 If + // the service configuration includes an AAAA record, the IPv6 address that you + // want Route 53 to return in response to DNS queries, for example, + // 2001:0db8:85a3:0000:0000:abcd:0001:2345. This value is required if the service + // specified by ServiceId includes settings for an AAAA record. If the service + // includes settings for an SRV record, you must specify a value for // AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both. AWS_INSTANCE_PORT If the service // includes an SRV record, the value that you want Route 53 to return for the port. // If the service includes HealthCheckConfig, the port on the endpoint that you @@ -458,39 +456,39 @@ type InstanceSummary struct { // A string map that contains the following information: // - // * The attributes that - // are associate with the instance. + // * The attributes that are + // associate with the instance. // - // * For each attribute, the applicable + // * For each attribute, the applicable // value. // // Supported attribute keys include the following: // - // * - // AWS_ALIAS_DNS_NAME: For an alias record that routes traffic to an Elastic Load - // Balancing load balancer, the DNS name that is associated with the load - // balancer. - // - // * AWS_EC2_INSTANCE_ID: (HTTP namespaces only) The Amazon EC2 - // instance ID for the instance. When the AWS_EC2_INSTANCE_ID attribute is - // specified, then the AWS_INSTANCE_IPV4 attribute contains the primary private - // IPv4 address. + // * AWS_ALIAS_DNS_NAME: + // For an alias record that routes traffic to an Elastic Load Balancing load + // balancer, the DNS name that is associated with the load balancer. // - // * AWS_INSTANCE_CNAME: For a CNAME record, the domain name - // that Route 53 returns in response to DNS queries, for example, example.com. + // * + // AWS_EC2_INSTANCE_ID: (HTTP namespaces only) The Amazon EC2 instance ID for the + // instance. When the AWS_EC2_INSTANCE_ID attribute is specified, then the + // AWS_INSTANCE_IPV4 attribute contains the primary private IPv4 address. // + // * + // AWS_INSTANCE_CNAME: For a CNAME record, the domain name that Route 53 returns in + // response to DNS queries, for example, example.com. // - // * AWS_INSTANCE_IPV4: For an A record, the IPv4 address that Route 53 returns in - // response to DNS queries, for example, 192.0.2.44. + // * AWS_INSTANCE_IPV4: For an + // A record, the IPv4 address that Route 53 returns in response to DNS queries, for + // example, 192.0.2.44. // - // * AWS_INSTANCE_IPV6: For - // an AAAA record, the IPv6 address that Route 53 returns in response to DNS - // queries, for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345. + // * AWS_INSTANCE_IPV6: For an AAAA record, the IPv6 address + // that Route 53 returns in response to DNS queries, for example, + // 2001:0db8:85a3:0000:0000:abcd:0001:2345. // - // * - // AWS_INSTANCE_PORT: For an SRV record, the value that Route 53 returns for the - // port. In addition, if the service includes HealthCheckConfig, the port on the - // endpoint that Route 53 sends requests to. + // * AWS_INSTANCE_PORT: For an SRV + // record, the value that Route 53 returns for the port. In addition, if the + // service includes HealthCheckConfig, the port on the endpoint that Route 53 sends + // requests to. Attributes map[string]*string // The ID for an instance that you created by using a specified service. @@ -533,14 +531,14 @@ type Namespace struct { // The type of the namespace. The methods for discovering instances depends on the // value that you specify: // - // * HTTP: Instances can be discovered only + // * HTTP: Instances can be discovered only // programmatically, using the AWS Cloud Map DiscoverInstances API. // - // * - // DNS_PUBLIC: Instances can be discovered using public DNS queries and using the + // * DNS_PUBLIC: + // Instances can be discovered using public DNS queries and using the // DiscoverInstances API. // - // * DNS_PRIVATE: Instances can be discovered using DNS + // * DNS_PRIVATE: Instances can be discovered using DNS // queries in VPCs and using the DiscoverInstances API. Type NamespaceType } @@ -563,15 +561,15 @@ type NamespaceFilter struct { // The operator that you want to use to determine whether ListNamespaces returns a // namespace. Valid values for condition include: // - // * EQ: When you specify EQ - // for the condition, you can choose to list only public namespaces or private + // * EQ: When you specify EQ for + // the condition, you can choose to list only public namespaces or private // namespaces, but not both. EQ is the default condition and can be omitted. // - // * - // IN: When you specify IN for the condition, you can choose to list public - // namespaces, private namespaces, or both. + // * IN: + // When you specify IN for the condition, you can choose to list public namespaces, + // private namespaces, or both. // - // * BETWEEN: Not applicable + // * BETWEEN: Not applicable Condition FilterCondition } @@ -629,21 +627,21 @@ type Operation struct { // The code associated with ErrorMessage. Values for ErrorCode include the // following: // - // * ACCESS_DENIED + // * ACCESS_DENIED // - // * CANNOT_CREATE_HOSTED_ZONE + // * CANNOT_CREATE_HOSTED_ZONE // - // * - // EXPIRED_TOKEN + // * EXPIRED_TOKEN // - // * HOSTED_ZONE_NOT_FOUND + // * + // HOSTED_ZONE_NOT_FOUND // - // * INTERNAL_FAILURE + // * INTERNAL_FAILURE // - // * - // INVALID_CHANGE_BATCH + // * INVALID_CHANGE_BATCH // - // * THROTTLED_REQUEST + // * + // THROTTLED_REQUEST ErrorCode *string // If the value of Status is FAIL, the reason that the operation failed. @@ -654,29 +652,28 @@ type Operation struct { // The status of the operation. Values include the following: // - // * SUBMITTED: - // This is the initial state immediately after you submit a request. + // * SUBMITTED: This is + // the initial state immediately after you submit a request. // - // * - // PENDING: AWS Cloud Map is performing the operation. + // * PENDING: AWS Cloud + // Map is performing the operation. // - // * SUCCESS: The - // operation succeeded. + // * SUCCESS: The operation succeeded. // - // * FAIL: The operation failed. For the failure reason, - // see ErrorMessage. + // * FAIL: + // The operation failed. For the failure reason, see ErrorMessage. Status OperationStatus // The name of the target entity that is associated with the operation: // - // * + // * // NAMESPACE: The namespace ID is returned in the ResourceId property. // - // * - // SERVICE: The service ID is returned in the ResourceId property. + // * SERVICE: + // The service ID is returned in the ResourceId property. // - // * INSTANCE: - // The instance ID is returned in the ResourceId property. + // * INSTANCE: The instance + // ID is returned in the ResourceId property. Targets map[string]*string // The name of the operation that is associated with the specified ID. @@ -694,40 +691,40 @@ type OperationFilter struct { // Specify the operations that you want to get: // - // * NAMESPACE_ID: Gets - // operations related to specified namespaces. + // * NAMESPACE_ID: Gets operations + // related to specified namespaces. // - // * SERVICE_ID: Gets operations - // related to specified services. + // * SERVICE_ID: Gets operations related to + // specified services. // - // * STATUS: Gets operations based on the - // status of the operations: SUBMITTED, PENDING, SUCCEED, or FAIL. + // * STATUS: Gets operations based on the status of the + // operations: SUBMITTED, PENDING, SUCCEED, or FAIL. // - // * TYPE: - // Gets specified types of operation. + // * TYPE: Gets specified types + // of operation. // - // * UPDATE_DATE: Gets operations that - // changed status during a specified date/time range. + // * UPDATE_DATE: Gets operations that changed status during a + // specified date/time range. // // This member is required. Name OperationFilterName // Specify values that are applicable to the value that you specify for Name: // + // * + // NAMESPACE_ID: Specify one namespace ID. // - // * NAMESPACE_ID: Specify one namespace ID. - // - // * SERVICE_ID: Specify one service + // * SERVICE_ID: Specify one service // ID. // - // * STATUS: Specify one or more statuses: SUBMITTED, PENDING, SUCCEED, or + // * STATUS: Specify one or more statuses: SUBMITTED, PENDING, SUCCEED, or // FAIL. // - // * TYPE: Specify one or more of the following types: CREATE_NAMESPACE, + // * TYPE: Specify one or more of the following types: CREATE_NAMESPACE, // DELETE_NAMESPACE, UPDATE_SERVICE, REGISTER_INSTANCE, or DEREGISTER_INSTANCE. // - // - // * UPDATE_DATE: Specify a start date and an end date in Unix date/time format and + // * + // UPDATE_DATE: Specify a start date and an end date in Unix date/time format and // Coordinated Universal Time (UTC). The start date must be the first value. // // This member is required. @@ -736,19 +733,19 @@ type OperationFilter struct { // The operator that you want to use to determine whether an operation matches the // specified value. Valid values for condition include: // - // * EQ: When you specify - // EQ for the condition, you can specify only one value. EQ is supported for + // * EQ: When you specify EQ + // for the condition, you can specify only one value. EQ is supported for // NAMESPACE_ID, SERVICE_ID, STATUS, and TYPE. EQ is the default condition and can // be omitted. // - // * IN: When you specify IN for the condition, you can specify a - // list of one or more values. IN is supported for STATUS and TYPE. An operation - // must match one of the specified values to be returned in the response. + // * IN: When you specify IN for the condition, you can specify a list + // of one or more values. IN is supported for STATUS and TYPE. An operation must + // match one of the specified values to be returned in the response. // - // * - // BETWEEN: Specify a start date and an end date in Unix date/time format and - // Coordinated Universal Time (UTC). The start date must be the first value. - // BETWEEN is supported for UPDATE_DATE. + // * BETWEEN: + // Specify a start date and an end date in Unix date/time format and Coordinated + // Universal Time (UTC). The start date must be the first value. BETWEEN is + // supported for UPDATE_DATE. Condition FilterCondition } @@ -763,17 +760,16 @@ type OperationSummary struct { // The status of the operation. Values include the following: // - // * SUBMITTED: - // This is the initial state immediately after you submit a request. + // * SUBMITTED: This is + // the initial state immediately after you submit a request. // - // * - // PENDING: AWS Cloud Map is performing the operation. + // * PENDING: AWS Cloud + // Map is performing the operation. // - // * SUCCESS: The - // operation succeeded. + // * SUCCESS: The operation succeeded. // - // * FAIL: The operation failed. For the failure reason, - // see ErrorMessage. + // * FAIL: + // The operation failed. For the failure reason, see ErrorMessage. Status OperationStatus } @@ -867,14 +863,14 @@ type ServiceChange struct { // AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. // Note the following: // - // * Route 53 automatically sets EvaluateTargetHealth to - // true for alias records. When EvaluateTargetHealth is true, the alias record - // inherits the health of the referenced AWS resource. such as an ELB load - // balancer. For more information, see EvaluateTargetHealth + // * Route 53 automatically sets EvaluateTargetHealth to true + // for alias records. When EvaluateTargetHealth is true, the alias record inherits + // the health of the referenced AWS resource. such as an ELB load balancer. For + // more information, see EvaluateTargetHealth // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). // - // - // * If you include HealthCheckConfig and then use the service to register an + // * + // If you include HealthCheckConfig and then use the service to register an // instance that creates an alias record, Route 53 doesn't create the health // check. // @@ -902,15 +898,15 @@ type ServiceFilter struct { // The operator that you want to use to determine whether a service is returned by // ListServices. Valid values for Condition include the following: // - // * EQ: When - // you specify EQ, specify one namespace ID for Values. EQ is the default condition - // and can be omitted. + // * EQ: When you + // specify EQ, specify one namespace ID for Values. EQ is the default condition and + // can be omitted. // - // * IN: When you specify IN, specify a list of the IDs - // for the namespaces that you want ListServices to return a list of services - // for. + // * IN: When you specify IN, specify a list of the IDs for the + // namespaces that you want ListServices to return a list of services for. // - // * BETWEEN: Not applicable. + // * + // BETWEEN: Not applicable. Condition FilterCondition } @@ -958,14 +954,14 @@ type ServiceSummary struct { // AWS_ALIAS_DNS_NAME attribute, AWS Cloud Map creates a Route 53 alias record. // Note the following: // - // * Route 53 automatically sets EvaluateTargetHealth to - // true for alias records. When EvaluateTargetHealth is true, the alias record - // inherits the health of the referenced AWS resource. such as an ELB load - // balancer. For more information, see EvaluateTargetHealth + // * Route 53 automatically sets EvaluateTargetHealth to true + // for alias records. When EvaluateTargetHealth is true, the alias record inherits + // the health of the referenced AWS resource. such as an ELB load balancer. For + // more information, see EvaluateTargetHealth // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). // - // - // * If you include HealthCheckConfig and then use the service to register an + // * + // If you include HealthCheckConfig and then use the service to register an // instance that creates an alias record, Route 53 doesn't create the health // check. // @@ -979,14 +975,14 @@ type ServiceSummary struct { // to evaluate the health of your resources, is useful in the following // circumstances: // - // * You can't use a health check that is defined by + // * You can't use a health check that is defined by // HealthCheckConfig because the resource isn't available over the internet. For // example, you can use a custom health check when the instance is in an Amazon // VPC. (To check the health of resources in a VPC, the health checker must also be // in the VPC.) // - // * You want to use a third-party health checker regardless of - // where your resources are. + // * You want to use a third-party health checker regardless of where + // your resources are. // // If you specify a health check configuration, you can // specify either HealthCheckCustomConfig or HealthCheckConfig but not both. To @@ -996,34 +992,33 @@ type ServiceSummary struct { // most recent UpdateInstanceCustomHealthStatus request. Here's how custom health // checks work: // - // * You create a service and specify a value for - // FailureThreshold. The failure threshold indicates the number of 30-second - // intervals you want AWS Cloud Map to wait between the time that your application - // sends an UpdateInstanceCustomHealthStatus + // * You create a service and specify a value for FailureThreshold. + // The failure threshold indicates the number of 30-second intervals you want AWS + // Cloud Map to wait between the time that your application sends an + // UpdateInstanceCustomHealthStatus // (https://docs.aws.amazon.com/cloud-map/latest/api/API_UpdateInstanceCustomHealthStatus.html) // request and the time that AWS Cloud Map stops routing internet traffic to the // corresponding resource. // - // * You register an instance. + // * You register an instance. // - // * You configure a + // * You configure a // third-party health checker to monitor the resource that is associated with the - // new instance. AWS Cloud Map doesn't check the health of the resource directly. - // + // new instance. AWS Cloud Map doesn't check the health of the resource + // directly. // - // * The third-party health-checker determines that the resource is unhealthy and - // notifies your application. + // * The third-party health-checker determines that the resource is + // unhealthy and notifies your application. // - // * Your application submits an + // * Your application submits an // UpdateInstanceCustomHealthStatus request. // - // * AWS Cloud Map waits for + // * AWS Cloud Map waits for // (FailureThreshold x 30) seconds. // - // * If another - // UpdateInstanceCustomHealthStatus request doesn't arrive during that time to - // change the status back to healthy, AWS Cloud Map stops routing traffic to the - // resource. + // * If another UpdateInstanceCustomHealthStatus + // request doesn't arrive during that time to change the status back to healthy, + // AWS Cloud Map stops routing traffic to the resource. HealthCheckCustomConfig *HealthCheckCustomConfig // The ID that AWS Cloud Map assigned to the service when you created it. diff --git a/service/servicequotas/api_op_DisassociateServiceQuotaTemplate.go b/service/servicequotas/api_op_DisassociateServiceQuotaTemplate.go index d531b18ec8b..7222f7c3e9a 100644 --- a/service/servicequotas/api_op_DisassociateServiceQuotaTemplate.go +++ b/service/servicequotas/api_op_DisassociateServiceQuotaTemplate.go @@ -15,11 +15,11 @@ import ( // quota template does not apply the quota increase requests from the template. // Related operations // -// * To enable the quota template, call +// * To enable the quota template, call // AssociateServiceQuotaTemplate. // -// * To delete a specific service quota from -// the template, use DeleteServiceQuotaIncreaseRequestFromTemplate. +// * To delete a specific service quota from the +// template, use DeleteServiceQuotaIncreaseRequestFromTemplate. func (c *Client) DisassociateServiceQuotaTemplate(ctx context.Context, params *DisassociateServiceQuotaTemplateInput, optFns ...func(*Options)) (*DisassociateServiceQuotaTemplateOutput, error) { if params == nil { params = &DisassociateServiceQuotaTemplateInput{} diff --git a/service/servicequotas/types/enums.go b/service/servicequotas/types/enums.go index 4822374db02..5e70047383d 100644 --- a/service/servicequotas/types/enums.go +++ b/service/servicequotas/types/enums.go @@ -6,10 +6,10 @@ type ErrorCode string // Enum values for ErrorCode const ( - ErrorCodeDependency_access_denied_error ErrorCode = "DEPENDENCY_ACCESS_DENIED_ERROR" - ErrorCodeDependency_throttling_error ErrorCode = "DEPENDENCY_THROTTLING_ERROR" - ErrorCodeDependency_service_error ErrorCode = "DEPENDENCY_SERVICE_ERROR" - ErrorCodeService_quota_not_available_error ErrorCode = "SERVICE_QUOTA_NOT_AVAILABLE_ERROR" + ErrorCodeDependencyAccessDeniedError ErrorCode = "DEPENDENCY_ACCESS_DENIED_ERROR" + ErrorCodeDependencyThrottlingError ErrorCode = "DEPENDENCY_THROTTLING_ERROR" + ErrorCodeDependencyServiceError ErrorCode = "DEPENDENCY_SERVICE_ERROR" + ErrorCodeServiceQuotaNotAvailableError ErrorCode = "SERVICE_QUOTA_NOT_AVAILABLE_ERROR" ) // Values returns all known values for ErrorCode. Note that this can be expanded in @@ -56,11 +56,11 @@ type RequestStatus string // Enum values for RequestStatus const ( - RequestStatusPending RequestStatus = "PENDING" - RequestStatusCase_opened RequestStatus = "CASE_OPENED" - RequestStatusApproved RequestStatus = "APPROVED" - RequestStatusDenied RequestStatus = "DENIED" - RequestStatusCase_closed RequestStatus = "CASE_CLOSED" + RequestStatusPending RequestStatus = "PENDING" + RequestStatusCaseOpened RequestStatus = "CASE_OPENED" + RequestStatusApproved RequestStatus = "APPROVED" + RequestStatusDenied RequestStatus = "DENIED" + RequestStatusCaseClosed RequestStatus = "CASE_CLOSED" ) // Values returns all known values for RequestStatus. Note that this can be diff --git a/service/ses/api_op_CloneReceiptRuleSet.go b/service/ses/api_op_CloneReceiptRuleSet.go index cbde9b5e36d..2e6ae73a460 100644 --- a/service/ses/api_op_CloneReceiptRuleSet.go +++ b/service/ses/api_op_CloneReceiptRuleSet.go @@ -44,13 +44,13 @@ type CloneReceiptRuleSetInput struct { // The name of the rule set to create. The name must: // - // * This value can only + // * This value can only // contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes // (-). // - // * Start and end with a letter or number. + // * Start and end with a letter or number. // - // * Contain less than 64 + // * Contain less than 64 // characters. // // This member is required. diff --git a/service/ses/api_op_CreateReceiptRuleSet.go b/service/ses/api_op_CreateReceiptRuleSet.go index 5e09ec1da5d..f604946bb1e 100644 --- a/service/ses/api_op_CreateReceiptRuleSet.go +++ b/service/ses/api_op_CreateReceiptRuleSet.go @@ -37,13 +37,13 @@ type CreateReceiptRuleSetInput struct { // The name of the rule set to create. The name must: // - // * This value can only + // * This value can only // contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes // (-). // - // * Start and end with a letter or number. + // * Start and end with a letter or number. // - // * Contain less than 64 + // * Contain less than 64 // characters. // // This member is required. diff --git a/service/ses/api_op_GetIdentityDkimAttributes.go b/service/ses/api_op_GetIdentityDkimAttributes.go index 48396830b43..40130967cd2 100644 --- a/service/ses/api_op_GetIdentityDkimAttributes.go +++ b/service/ses/api_op_GetIdentityDkimAttributes.go @@ -17,14 +17,14 @@ import ( // tokens have been published. This operation takes a list of identities as input // and returns the following information for each: // -// * Whether Easy DKIM signing -// is enabled or disabled. +// * Whether Easy DKIM signing is +// enabled or disabled. // -// * A set of DKIM tokens that represent the identity. -// If the identity is an email address, the tokens represent the domain of that +// * A set of DKIM tokens that represent the identity. If the +// identity is an email address, the tokens represent the domain of that // address. // -// * Whether Amazon SES has successfully verified the DKIM tokens +// * Whether Amazon SES has successfully verified the DKIM tokens // published in the domain's DNS. This information is only returned for domain name // identities, not for email addresses. // diff --git a/service/ses/api_op_SendBulkTemplatedEmail.go b/service/ses/api_op_SendBulkTemplatedEmail.go index 710708ed4aa..275bd80f214 100644 --- a/service/ses/api_op_SendBulkTemplatedEmail.go +++ b/service/ses/api_op_SendBulkTemplatedEmail.go @@ -15,38 +15,38 @@ import ( // using an email template. In order to send email using the SendBulkTemplatedEmail // operation, your call to the API must meet the following requirements: // -// * The +// * The // call must refer to an existing email template. You can create email templates // using the CreateTemplate operation. // -// * The message must be sent from a -// verified email address or domain. +// * The message must be sent from a verified +// email address or domain. // -// * If your account is still in the Amazon -// SES sandbox, you may only send to verified addresses or domains, or to email -// addresses associated with the Amazon SES Mailbox Simulator. For more -// information, see Verifying Email Addresses and Domains +// * If your account is still in the Amazon SES sandbox, +// you may only send to verified addresses or domains, or to email addresses +// associated with the Amazon SES Mailbox Simulator. For more information, see +// Verifying Email Addresses and Domains // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // -// * The maximum message size is 10 MB. +// * The maximum message size is 10 MB. // -// -// * Each Destination parameter must include at least one recipient email address. -// The recipient address can be a To: address, a CC: address, or a BCC: address. If -// a recipient email address is invalid (that is, it is not in the format +// * Each +// Destination parameter must include at least one recipient email address. The +// recipient address can be a To: address, a CC: address, or a BCC: address. If a +// recipient email address is invalid (that is, it is not in the format // UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be // rejected, even if the message contains other recipients that are valid. // -// * -// The message may not include more than 50 recipients, across the To:, CC: and -// BCC: fields. If you need to send an email message to a larger audience, you can +// * The +// message may not include more than 50 recipients, across the To:, CC: and BCC: +// fields. If you need to send an email message to a larger audience, you can // divide your recipient list into groups of 50 or fewer, and then call the // SendBulkTemplatedEmail operation several times to send the message to each // group. // -// * The number of destinations you can contact in a single call to the -// API may be limited by your account's maximum sending rate. +// * The number of destinations you can contact in a single call to the API +// may be limited by your account's maximum sending rate. func (c *Client) SendBulkTemplatedEmail(ctx context.Context, params *SendBulkTemplatedEmailInput, optFns ...func(*Options)) (*SendBulkTemplatedEmailOutput, error) { if params == nil { params = &SendBulkTemplatedEmailInput{} diff --git a/service/ses/api_op_SendEmail.go b/service/ses/api_op_SendEmail.go index e825a2d418f..f67a36820eb 100644 --- a/service/ses/api_op_SendEmail.go +++ b/service/ses/api_op_SendEmail.go @@ -15,11 +15,11 @@ import ( // send email using the SendEmail operation, your message must meet the following // requirements: // -// * The message must be sent from a verified email address or +// * The message must be sent from a verified email address or // domain. If you attempt to send email using a non-verified address or domain, the // operation will result in an "Email address not verified" error. // -// * If your +// * If your // account is still in the Amazon SES sandbox, you may only send to verified // addresses or domains, or to email addresses associated with the Amazon SES // Mailbox Simulator. For more information, see Verifying Email Addresses and @@ -27,18 +27,18 @@ import ( // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // -// * The maximum message size is 10 MB. +// * The maximum message size is 10 MB. // -// -// * The message must include at least one recipient email address. The recipient -// address can be a To: address, a CC: address, or a BCC: address. If a recipient -// email address is invalid (that is, it is not in the format +// * The +// message must include at least one recipient email address. The recipient address +// can be a To: address, a CC: address, or a BCC: address. If a recipient email +// address is invalid (that is, it is not in the format // UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be // rejected, even if the message contains other recipients that are valid. // -// * -// The message may not include more than 50 recipients, across the To:, CC: and -// BCC: fields. If you need to send an email message to a larger audience, you can +// * The +// message may not include more than 50 recipients, across the To:, CC: and BCC: +// fields. If you need to send an email message to a larger audience, you can // divide your recipient list into groups of 50 or fewer, and then call the // SendEmail operation several times to send the message to each group. // diff --git a/service/ses/api_op_SendRawEmail.go b/service/ses/api_op_SendRawEmail.go index 0a234ad5831..f859c096cd5 100644 --- a/service/ses/api_op_SendRawEmail.go +++ b/service/ses/api_op_SendRawEmail.go @@ -19,39 +19,39 @@ import ( // also use this operation to send messages that include attachments. The // SendRawEmail operation has the following requirements: // -// * You can only send +// * You can only send // email from verified email addresses or domains // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). // If you try to send email from an address that isn't verified, the operation // results in an "Email address not verified" error. // -// * If your account is -// still in the Amazon SES sandbox +// * If your account is still in +// the Amazon SES sandbox // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html), // you can only send email to other verified addresses in your account, or to // addresses that are associated with the Amazon SES mailbox simulator // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mailbox-simulator.html). // +// * +// The maximum message size, including attachments, is 10 MB. // -// * The maximum message size, including attachments, is 10 MB. -// -// * Each message -// has to include at least one recipient address. A recipient address includes any +// * Each message has +// to include at least one recipient address. A recipient address includes any // address on the To:, CC:, or BCC: lines. // -// * If you send a single message to -// more than one recipient address, and one of the recipient addresses isn't in a -// valid format (that is, it's not in the format +// * If you send a single message to more +// than one recipient address, and one of the recipient addresses isn't in a valid +// format (that is, it's not in the format // UserName@[SubDomain.]Domain.TopLevelDomain), Amazon SES rejects the entire // message, even if the other addresses are valid. // -// * Each message can include -// up to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to +// * Each message can include up +// to 50 recipient addresses across the To:, CC:, or BCC: lines. If you need to // send a single message to more than 50 recipients, you have to split the list of // recipient addresses into groups of less than 50 recipients, and send separate // messages to each group. // -// * Amazon SES allows you to specify 8-bit +// * Amazon SES allows you to specify 8-bit // Content-Transfer-Encoding for MIME message parts. However, if Amazon SES has to // modify the contents of your message (for example, if you use open and click // tracking), 8-bit content isn't preserved. For this reason, we highly recommend @@ -63,40 +63,39 @@ import ( // Additionally, keep the following // considerations in mind when using the SendRawEmail operation: // -// * Although -// you can customize the message headers when using the SendRawEmail operation, -// Amazon SES will automatically apply its own Message-ID and Date headers; if you -// passed these headers when creating the message, they will be overwritten by the -// values that Amazon SES provides. +// * Although you +// can customize the message headers when using the SendRawEmail operation, Amazon +// SES will automatically apply its own Message-ID and Date headers; if you passed +// these headers when creating the message, they will be overwritten by the values +// that Amazon SES provides. // -// * If you are using sending authorization -// to send on behalf of another user, SendRawEmail enables you to specify the -// cross-account identity for the email's Source, From, and Return-Path parameters -// in one of two ways: you can pass optional parameters SourceArn, FromArn, and/or -// ReturnPathArn to the API, or you can include the following X-headers in the -// header of your raw email: +// * If you are using sending authorization to send on +// behalf of another user, SendRawEmail enables you to specify the cross-account +// identity for the email's Source, From, and Return-Path parameters in one of two +// ways: you can pass optional parameters SourceArn, FromArn, and/or ReturnPathArn +// to the API, or you can include the following X-headers in the header of your raw +// email: // -// * X-SES-SOURCE-ARN +// * X-SES-SOURCE-ARN // -// * -// X-SES-FROM-ARN +// * X-SES-FROM-ARN // -// * X-SES-RETURN-PATH-ARN +// * X-SES-RETURN-PATH-ARN // -// Don't include these -// X-headers in the DKIM signature. Amazon SES removes these before it sends the -// email. If you only specify the SourceIdentityArn parameter, Amazon SES sets the -// From and Return-Path addresses to the same identity that you specified. For more -// information about sending authorization, see the Using Sending Authorization -// with Amazon SES +// Don't +// include these X-headers in the DKIM signature. Amazon SES removes these before +// it sends the email. If you only specify the SourceIdentityArn parameter, Amazon +// SES sets the From and Return-Path addresses to the same identity that you +// specified. For more information about sending authorization, see the Using +// Sending Authorization with Amazon SES // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html) // in the Amazon SES Developer Guide. // -// * For every message that you send, the -// total number of recipients (including each recipient in the To:, CC: and BCC: -// fields) is counted against the maximum number of emails you can send in a -// 24-hour period (your sending quota). For more information about sending quotas -// in Amazon SES, see Managing Your Amazon SES Sending Limits +// * For every message that you send, the total +// number of recipients (including each recipient in the To:, CC: and BCC: fields) +// is counted against the maximum number of emails you can send in a 24-hour period +// (your sending quota). For more information about sending quotas in Amazon SES, +// see Managing Your Amazon SES Sending Limits // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html) // in the Amazon SES Developer Guide. func (c *Client) SendRawEmail(ctx context.Context, params *SendRawEmailInput, optFns ...func(*Options)) (*SendRawEmailOutput, error) { @@ -121,31 +120,31 @@ type SendRawEmailInput struct { // The raw email message itself. The message has to meet the following criteria: // + // * + // The message has to contain a header and a body, separated by a blank line. // - // * The message has to contain a header and a body, separated by a blank line. - // + // * + // All of the required header fields must be present in the message. // - // * All of the required header fields must be present in the message. + // * Each part + // of a multipart MIME message must be formatted properly. // - // * Each - // part of a multipart MIME message must be formatted properly. - // - // * Attachments - // must be of a content type that Amazon SES supports. For a list on unsupported - // content types, see Unsupported Attachment Types + // * Attachments must be + // of a content type that Amazon SES supports. For a list on unsupported content + // types, see Unsupported Attachment Types // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html) in the // Amazon SES Developer Guide. // - // * The entire message must be base64-encoded. - // + // * The entire message must be base64-encoded. // - // * If any of the MIME parts in your message contain content that is outside of - // the 7-bit ASCII character range, we highly recommend that you encode that - // content. For more information, see Sending Raw Email + // * If + // any of the MIME parts in your message contain content that is outside of the + // 7-bit ASCII character range, we highly recommend that you encode that content. + // For more information, see Sending Raw Email // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html) in // the Amazon SES Developer Guide. // - // * Per RFC 5321 + // * Per RFC 5321 // (https://tools.ietf.org/html/rfc5321#section-4.5.3.1.6), the maximum length of // each line of text, including the , must not exceed 1,000 characters. // diff --git a/service/ses/api_op_SendTemplatedEmail.go b/service/ses/api_op_SendTemplatedEmail.go index c8a28a8e9f5..d904ffba3da 100644 --- a/service/ses/api_op_SendTemplatedEmail.go +++ b/service/ses/api_op_SendTemplatedEmail.go @@ -15,30 +15,30 @@ import ( // sending. In order to send email using the SendTemplatedEmail operation, your // call to the API must meet the following requirements: // -// * The call must refer -// to an existing email template. You can create email templates using the +// * The call must refer to +// an existing email template. You can create email templates using the // CreateTemplate operation. // -// * The message must be sent from a verified email +// * The message must be sent from a verified email // address or domain. // -// * If your account is still in the Amazon SES sandbox, -// you may only send to verified addresses or domains, or to email addresses -// associated with the Amazon SES Mailbox Simulator. For more information, see -// Verifying Email Addresses and Domains +// * If your account is still in the Amazon SES sandbox, you +// may only send to verified addresses or domains, or to email addresses associated +// with the Amazon SES Mailbox Simulator. For more information, see Verifying Email +// Addresses and Domains // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html) // in the Amazon SES Developer Guide. // -// * The maximum message size is 10 MB. +// * The maximum message size is 10 MB. // -// -// * Calls to the SendTemplatedEmail operation may only include one Destination +// * +// Calls to the SendTemplatedEmail operation may only include one Destination // parameter. A destination is a set of recipients who will receive the same // version of the email. The Destination parameter can include up to 50 recipients, // across the To:, CC: and BCC: fields. // -// * The Destination parameter must -// include at least one recipient email address. The recipient address can be a To: +// * The Destination parameter must include +// at least one recipient email address. The recipient address can be a To: // address, a CC: address, or a BCC: address. If a recipient email address is // invalid (that is, it is not in the format // UserName@[SubDomain.]Domain.TopLevelDomain), the entire message will be diff --git a/service/ses/api_op_VerifyDomainDkim.go b/service/ses/api_op_VerifyDomainDkim.go index e3bf6e88208..9a9a50b37ef 100644 --- a/service/ses/api_op_VerifyDomainDkim.go +++ b/service/ses/api_op_VerifyDomainDkim.go @@ -27,12 +27,12 @@ import ( // email that you send from the domain is authenticated using DKIM. To create the // CNAME records for DKIM authentication, use the following values: // -// * Name: +// * Name: // token._domainkey.example.com // -// * Type: CNAME +// * Type: CNAME // -// * Value: +// * Value: // token.dkim.amazonses.com // // In the preceding example, replace token with one of diff --git a/service/ses/types/enums.go b/service/ses/types/enums.go index 5a85f26ef74..109fdaff017 100644 --- a/service/ses/types/enums.go +++ b/service/ses/types/enums.go @@ -92,10 +92,10 @@ type ConfigurationSetAttribute string // Enum values for ConfigurationSetAttribute const ( - ConfigurationSetAttributeEvent_destinations ConfigurationSetAttribute = "eventDestinations" - ConfigurationSetAttributeTracking_options ConfigurationSetAttribute = "trackingOptions" - ConfigurationSetAttributeDelivery_options ConfigurationSetAttribute = "deliveryOptions" - ConfigurationSetAttributeReputation_options ConfigurationSetAttribute = "reputationOptions" + ConfigurationSetAttributeEventDestinations ConfigurationSetAttribute = "eventDestinations" + ConfigurationSetAttributeTrackingOptions ConfigurationSetAttribute = "trackingOptions" + ConfigurationSetAttributeDeliveryOptions ConfigurationSetAttribute = "deliveryOptions" + ConfigurationSetAttributeReputationOptions ConfigurationSetAttribute = "reputationOptions" ) // Values returns all known values for ConfigurationSetAttribute. Note that this @@ -136,9 +136,9 @@ type DimensionValueSource string // Enum values for DimensionValueSource const ( - DimensionValueSourceMessage_tag DimensionValueSource = "messageTag" - DimensionValueSourceEmail_header DimensionValueSource = "emailHeader" - DimensionValueSourceLink_tag DimensionValueSource = "linkTag" + DimensionValueSourceMessageTag DimensionValueSource = "messageTag" + DimensionValueSourceEmailHeader DimensionValueSource = "emailHeader" + DimensionValueSourceLinkTag DimensionValueSource = "linkTag" ) // Values returns all known values for DimensionValueSource. Note that this can be @@ -180,14 +180,14 @@ type EventType string // Enum values for EventType const ( - EventTypeSend EventType = "send" - EventTypeReject EventType = "reject" - EventTypeBounce EventType = "bounce" - EventTypeComplaint EventType = "complaint" - EventTypeDelivery EventType = "delivery" - EventTypeOpen EventType = "open" - EventTypeClick EventType = "click" - EventTypeRendering_failure EventType = "renderingFailure" + EventTypeSend EventType = "send" + EventTypeReject EventType = "reject" + EventTypeBounce EventType = "bounce" + EventTypeComplaint EventType = "complaint" + EventTypeDelivery EventType = "delivery" + EventTypeOpen EventType = "open" + EventTypeClick EventType = "click" + EventTypeRenderingFailure EventType = "renderingFailure" ) // Values returns all known values for EventType. Note that this can be expanded in @@ -302,7 +302,7 @@ type StopScope string // Enum values for StopScope const ( - StopScopeRule_set StopScope = "RuleSet" + StopScopeRuleSet StopScope = "RuleSet" ) // Values returns all known values for StopScope. Note that this can be expanded in diff --git a/service/ses/types/errors.go b/service/ses/types/errors.go index b3ccfc259fa..223fdee3bf2 100644 --- a/service/ses/types/errors.go +++ b/service/ses/types/errors.go @@ -524,11 +524,11 @@ func (e *InvalidTemplateException) ErrorFault() smithy.ErrorFault { return smith // redirects is invalid. This error appears most often in the following // situations: // -// * When the tracking domain you specified is not verified in -// Amazon SES. +// * When the tracking domain you specified is not verified in Amazon +// SES. // -// * When the tracking domain you specified is not a valid domain -// or subdomain. +// * When the tracking domain you specified is not a valid domain or +// subdomain. type InvalidTrackingOptionsException struct { Message *string } diff --git a/service/ses/types/types.go b/service/ses/types/types.go index 48ae75809d0..2193b56f4cc 100644 --- a/service/ses/types/types.go +++ b/service/ses/types/types.go @@ -148,54 +148,53 @@ type BulkEmailDestinationStatus struct { // The status of a message sent using the SendBulkTemplatedEmail operation. // Possible values for this parameter include: // - // * Success: Amazon SES accepted - // the message, and will attempt to deliver it to the recipients. + // * Success: Amazon SES accepted the + // message, and will attempt to deliver it to the recipients. // - // * - // MessageRejected: The message was rejected because it contained a virus. + // * MessageRejected: + // The message was rejected because it contained a virus. // - // * + // * // MailFromDomainNotVerified: The sender's email address or domain was not // verified. // - // * ConfigurationSetDoesNotExist: The configuration set you - // specified does not exist. + // * ConfigurationSetDoesNotExist: The configuration set you specified + // does not exist. // - // * TemplateDoesNotExist: The template you - // specified does not exist. + // * TemplateDoesNotExist: The template you specified does not + // exist. // - // * AccountSuspended: Your account has been shut - // down because of issues related to your email sending practices. + // * AccountSuspended: Your account has been shut down because of issues + // related to your email sending practices. // - // * - // AccountThrottled: The number of emails you can send has been reduced because - // your account has exceeded its allocated sending limit. + // * AccountThrottled: The number of + // emails you can send has been reduced because your account has exceeded its + // allocated sending limit. // - // * - // AccountDailyQuotaExceeded: You have reached or exceeded the maximum number of - // emails you can send from your account in a 24-hour period. + // * AccountDailyQuotaExceeded: You have reached or + // exceeded the maximum number of emails you can send from your account in a + // 24-hour period. // - // * - // InvalidSendingPoolName: The configuration set you specified refers to an IP pool - // that does not exist. + // * InvalidSendingPoolName: The configuration set you specified + // refers to an IP pool that does not exist. // - // * AccountSendingPaused: Email sending for the Amazon - // SES account was disabled using the UpdateAccountSendingEnabled operation. - // - // * - // ConfigurationSetSendingPaused: Email sending for this configuration set was - // disabled using the UpdateConfigurationSetSendingEnabled operation. + // * AccountSendingPaused: Email sending + // for the Amazon SES account was disabled using the UpdateAccountSendingEnabled + // operation. // - // * - // InvalidParameterValue: One or more of the parameters you specified when calling - // this operation was invalid. See the error message for additional information. + // * ConfigurationSetSendingPaused: Email sending for this + // configuration set was disabled using the UpdateConfigurationSetSendingEnabled + // operation. // + // * InvalidParameterValue: One or more of the parameters you specified + // when calling this operation was invalid. See the error message for additional + // information. // - // * TransientFailure: Amazon SES was unable to process your request because of a - // temporary issue. + // * TransientFailure: Amazon SES was unable to process your request + // because of a temporary issue. // - // * Failed: Amazon SES was unable to process your request. - // See the error message for additional information. + // * Failed: Amazon SES was unable to process your + // request. See the error message for additional information. Status BulkEmailStatus } @@ -224,10 +223,10 @@ type CloudWatchDimensionConfiguration struct { // do not provide the value of the dimension when you send an email. The default // value must: // - // * This value can only contain ASCII letters (a-z, A-Z), numbers + // * This value can only contain ASCII letters (a-z, A-Z), numbers // (0-9), underscores (_), or dashes (-). // - // * Contain less than 256 characters. + // * Contain less than 256 characters. // // This member is required. DefaultDimensionValue *string @@ -235,11 +234,11 @@ type CloudWatchDimensionConfiguration struct { // The name of an Amazon CloudWatch dimension associated with an email sending // metric. The name must: // - // * This value can only contain ASCII letters (a-z, - // A-Z), numbers (0-9), underscores (_), or dashes (-). + // * This value can only contain ASCII letters (a-z, A-Z), + // numbers (0-9), underscores (_), or dashes (-). // - // * Contain less than - // 256 characters. + // * Contain less than 256 + // characters. // // This member is required. DimensionName *string @@ -266,10 +265,10 @@ type ConfigurationSet struct { // The name of the configuration set. The name must meet the following // requirements: // - // * Contain only letters (a-z, A-Z), numbers (0-9), underscores + // * Contain only letters (a-z, A-Z), numbers (0-9), underscores // (_), or dashes (-). // - // * Contain 64 characters or fewer. + // * Contain 64 characters or fewer. // // This member is required. Name *string @@ -362,11 +361,11 @@ type EventDestination struct { // The name of the event destination. The name must: // - // * This value can only - // contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes - // (-). + // * This value can only contain + // ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * Contain less than 64 characters. + // * + // Contain less than 64 characters. // // This member is required. Name *string @@ -638,22 +637,22 @@ type MessageTag struct { // The name of the tag. The name must: // - // * This value can only contain ASCII - // letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). + // * This value can only contain ASCII letters + // (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * - // Contain less than 256 characters. + // * Contain less than + // 256 characters. // // This member is required. Name *string // The value of the tag. The value must: // - // * This value can only contain ASCII + // * This value can only contain ASCII // letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * - // Contain less than 256 characters. + // * Contain + // less than 256 characters. // // This member is required. Value *string @@ -729,14 +728,13 @@ type ReceiptFilter struct { // The name of the IP address filter. The name must: // - // * This value can only - // contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes - // (-). + // * This value can only contain + // ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * Start and end with a letter or number. + // * + // Start and end with a letter or number. // - // * Contain less than 64 - // characters. + // * Contain less than 64 characters. // // This member is required. Name *string @@ -776,13 +774,13 @@ type ReceiptRule struct { // The name of the receipt rule. The name must: // - // * This value can only contain + // * This value can only contain // ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * + // * // Start and end with a letter or number. // - // * Contain less than 64 characters. + // * Contain less than 64 characters. // // This member is required. Name *string @@ -822,14 +820,13 @@ type ReceiptRuleSetMetadata struct { // The name of the receipt rule set. The name must: // - // * This value can only - // contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes - // (-). + // * This value can only contain + // ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). // - // * Start and end with a letter or number. + // * + // Start and end with a letter or number. // - // * Contain less than 64 - // characters. + // * Contain less than 64 characters. Name *string } @@ -930,7 +927,7 @@ type S3Action struct { // saving them to the Amazon S3 bucket. You can use the default master key or a // custom master key you created in AWS KMS as follows: // - // * To use the default + // * To use the default // master key, provide an ARN in the form of // arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. For example, if // your AWS account ID is 123456789012 and you want to use the default master key @@ -939,10 +936,10 @@ type S3Action struct { // key, you don't need to perform any extra steps to give Amazon SES permission to // use the key. // - // * To use a custom master key you created in AWS KMS, provide - // the ARN of the master key and ensure that you add a statement to your key's - // policy to give Amazon SES permission to use it. For more information about - // giving permissions, see the Amazon SES Developer Guide + // * To use a custom master key you created in AWS KMS, provide the + // ARN of the master key and ensure that you add a statement to your key's policy + // to give Amazon SES permission to use it. For more information about giving + // permissions, see the Amazon SES Developer Guide // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // // For diff --git a/service/sesv2/api_op_GetAccount.go b/service/sesv2/api_op_GetAccount.go index 643add9dd2a..68a347c0e6f 100644 --- a/service/sesv2/api_op_GetAccount.go +++ b/service/sesv2/api_op_GetAccount.go @@ -47,17 +47,17 @@ type GetAccountOutput struct { // The reputation status of your Amazon SES account. The status can be one of the // following: // - // * HEALTHY – There are no reputation-related issues that - // currently impact your account. + // * HEALTHY – There are no reputation-related issues that currently + // impact your account. // - // * PROBATION – We've identified potential - // issues with your Amazon SES account. We're placing your account under review - // while you work on correcting these issues. + // * PROBATION – We've identified potential issues with your + // Amazon SES account. We're placing your account under review while you work on + // correcting these issues. // - // * SHUTDOWN – Your account's - // ability to send email is currently paused because of an issue with the email - // sent from your account. When you correct the issue, you can contact us and - // request that your account's ability to send email is resumed. + // * SHUTDOWN – Your account's ability to send email is + // currently paused because of an issue with the email sent from your account. When + // you correct the issue, you can contact us and request that your account's + // ability to send email is resumed. EnforcementStatus *string // Indicates whether or not your account has production access in the current AWS diff --git a/service/sesv2/api_op_PutAccountSuppressionAttributes.go b/service/sesv2/api_op_PutAccountSuppressionAttributes.go index 5d6a3e80223..cd99b5051ed 100644 --- a/service/sesv2/api_op_PutAccountSuppressionAttributes.go +++ b/service/sesv2/api_op_PutAccountSuppressionAttributes.go @@ -34,11 +34,11 @@ type PutAccountSuppressionAttributesInput struct { // added to the suppression list for your account. This list can contain any or all // of the following: // - // * COMPLAINT – Amazon SES adds an email address to the + // * COMPLAINT – Amazon SES adds an email address to the // suppression list for your account when a message sent to that address results in // a complaint. // - // * BOUNCE – Amazon SES adds an email address to the suppression + // * BOUNCE – Amazon SES adds an email address to the suppression // list for your account when a message sent to that address results in a hard // bounce. SuppressedReasons []types.SuppressionListReason diff --git a/service/sesv2/api_op_PutConfigurationSetSuppressionOptions.go b/service/sesv2/api_op_PutConfigurationSetSuppressionOptions.go index 27fa5097ddf..ea8945bf090 100644 --- a/service/sesv2/api_op_PutConfigurationSetSuppressionOptions.go +++ b/service/sesv2/api_op_PutConfigurationSetSuppressionOptions.go @@ -41,13 +41,12 @@ type PutConfigurationSetSuppressionOptionsInput struct { // the suppression list for your account. This list can contain any or all of the // following: // - // * COMPLAINT – Amazon SES adds an email address to the - // suppression list for your account when a message sent to that address results in - // a complaint. + // * COMPLAINT – Amazon SES adds an email address to the suppression + // list for your account when a message sent to that address results in a + // complaint. // - // * BOUNCE – Amazon SES adds an email address to the suppression - // list for your account when a message sent to that address results in a hard - // bounce. + // * BOUNCE – Amazon SES adds an email address to the suppression list + // for your account when a message sent to that address results in a hard bounce. SuppressedReasons []types.SuppressionListReason } diff --git a/service/sesv2/api_op_PutEmailIdentityDkimSigningAttributes.go b/service/sesv2/api_op_PutEmailIdentityDkimSigningAttributes.go index 48920072bb1..53e71db2cc7 100644 --- a/service/sesv2/api_op_PutEmailIdentityDkimSigningAttributes.go +++ b/service/sesv2/api_op_PutEmailIdentityDkimSigningAttributes.go @@ -14,20 +14,19 @@ import ( // Used to configure or change the DKIM authentication settings for an email domain // identity. You can use this operation to do any of the following: // -// * Update -// the signing attributes for an identity that uses Bring Your Own DKIM -// (BYODKIM). +// * Update the +// signing attributes for an identity that uses Bring Your Own DKIM (BYODKIM). // -// * Change from using no DKIM authentication to using Easy DKIM. +// * +// Change from using no DKIM authentication to using Easy DKIM. // +// * Change from +// using no DKIM authentication to using BYODKIM. // -// * Change from using no DKIM authentication to using BYODKIM. +// * Change from using Easy DKIM to +// using BYODKIM. // -// * Change from -// using Easy DKIM to using BYODKIM. -// -// * Change from using BYODKIM to using Easy -// DKIM. +// * Change from using BYODKIM to using Easy DKIM. func (c *Client) PutEmailIdentityDkimSigningAttributes(ctx context.Context, params *PutEmailIdentityDkimSigningAttributesInput, optFns ...func(*Options)) (*PutEmailIdentityDkimSigningAttributesOutput, error) { if params == nil { params = &PutEmailIdentityDkimSigningAttributesInput{} @@ -54,11 +53,10 @@ type PutEmailIdentityDkimSigningAttributesInput struct { // The method that you want to use to configure DKIM for the identity. There are // two possible values: // - // * AWS_SES – Configure DKIM for the identity by using - // Easy DKIM - // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). + // * AWS_SES – Configure DKIM for the identity by using Easy + // DKIM (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). // - // * + // * // EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM // (BYODKIM). // @@ -86,23 +84,23 @@ type PutEmailIdentityDkimSigningAttributesOutput struct { // paired with the private key that you specified in the process of creating the // identity. The status can be one of the following: // - // * PENDING – The - // verification process was initiated, but Amazon SES hasn't yet detected the DKIM - // records in the DNS configuration for the domain. + // * PENDING – The verification + // process was initiated, but Amazon SES hasn't yet detected the DKIM records in + // the DNS configuration for the domain. // - // * SUCCESS – The - // verification process completed successfully. + // * SUCCESS – The verification process + // completed successfully. // - // * FAILED – The verification - // process failed. This typically occurs when Amazon SES fails to find the DKIM - // records in the DNS configuration of the domain. + // * FAILED – The verification process failed. This + // typically occurs when Amazon SES fails to find the DKIM records in the DNS + // configuration of the domain. // - // * TEMPORARY_FAILURE – A - // temporary issue is preventing Amazon SES from determining the DKIM - // authentication status of the domain. + // * TEMPORARY_FAILURE – A temporary issue is + // preventing Amazon SES from determining the DKIM authentication status of the + // domain. // - // * NOT_STARTED – The DKIM verification - // process hasn't been initiated for the domain. + // * NOT_STARTED – The DKIM verification process hasn't been initiated for + // the domain. DkimStatus types.DkimStatus // If you used Easy DKIM diff --git a/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go b/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go index 696b29224fb..7fbfbc6bde4 100644 --- a/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go +++ b/service/sesv2/api_op_PutEmailIdentityMailFromAttributes.go @@ -49,14 +49,14 @@ type PutEmailIdentityMailFromAttributesInput struct { // The custom MAIL FROM domain that you want the verified identity to use. The MAIL // FROM domain must meet the following criteria: // - // * It has to be a subdomain of - // the verified identity. + // * It has to be a subdomain of the + // verified identity. // - // * It can't be used to receive email. + // * It can't be used to receive email. // - // * It can't - // be used in a "From" address if the MAIL FROM domain is a destination for - // feedback forwarding emails. + // * It can't be used in + // a "From" address if the MAIL FROM domain is a destination for feedback + // forwarding emails. MailFromDomain *string } diff --git a/service/sesv2/api_op_SendEmail.go b/service/sesv2/api_op_SendEmail.go index 4ee2267486a..159156ec3a1 100644 --- a/service/sesv2/api_op_SendEmail.go +++ b/service/sesv2/api_op_SendEmail.go @@ -14,19 +14,19 @@ import ( // Sends an email message. You can use the Amazon SES API v2 to send two types of // messages: // -// * Simple – A standard email message. When you create this type of +// * Simple – A standard email message. When you create this type of // message, you specify the sender, the recipient, and the message body, and Amazon // SES assembles the message for you. // -// * Raw – A raw, MIME-formatted email -// message. When you send this type of email, you have to specify all of the -// message headers, as well as the message body. You can use this message type to -// send messages that contain attachments. The message that you specify has to be a +// * Raw – A raw, MIME-formatted email message. +// When you send this type of email, you have to specify all of the message +// headers, as well as the message body. You can use this message type to send +// messages that contain attachments. The message that you specify has to be a // valid MIME message. // -// * Templated – A message that contains personalization -// tags. When you send this type of email, Amazon SES API v2 automatically replaces -// the tags with values that you specify. +// * Templated – A message that contains personalization tags. +// When you send this type of email, Amazon SES API v2 automatically replaces the +// tags with values that you specify. func (c *Client) SendEmail(ctx context.Context, params *SendEmailInput, optFns ...func(*Options)) (*SendEmailOutput, error) { if params == nil { params = &SendEmailInput{} diff --git a/service/sesv2/types/enums.go b/service/sesv2/types/enums.go index 97693c379fc..b463bf0f5cc 100644 --- a/service/sesv2/types/enums.go +++ b/service/sesv2/types/enums.go @@ -6,8 +6,8 @@ type BehaviorOnMxFailure string // Enum values for BehaviorOnMxFailure const ( - BehaviorOnMxFailureUse_default_value BehaviorOnMxFailure = "USE_DEFAULT_VALUE" - BehaviorOnMxFailureReject_message BehaviorOnMxFailure = "REJECT_MESSAGE" + BehaviorOnMxFailureUseDefaultValue BehaviorOnMxFailure = "USE_DEFAULT_VALUE" + BehaviorOnMxFailureRejectMessage BehaviorOnMxFailure = "REJECT_MESSAGE" ) // Values returns all known values for BehaviorOnMxFailure. Note that this can be @@ -24,20 +24,20 @@ type BulkEmailStatus string // Enum values for BulkEmailStatus const ( - BulkEmailStatusSuccess BulkEmailStatus = "SUCCESS" - BulkEmailStatusMessage_rejected BulkEmailStatus = "MESSAGE_REJECTED" - BulkEmailStatusMail_from_domain_not_verified BulkEmailStatus = "MAIL_FROM_DOMAIN_NOT_VERIFIED" - BulkEmailStatusConfiguration_set_not_found BulkEmailStatus = "CONFIGURATION_SET_NOT_FOUND" - BulkEmailStatusTemplate_not_found BulkEmailStatus = "TEMPLATE_NOT_FOUND" - BulkEmailStatusAccount_suspended BulkEmailStatus = "ACCOUNT_SUSPENDED" - BulkEmailStatusAccount_throttled BulkEmailStatus = "ACCOUNT_THROTTLED" - BulkEmailStatusAccount_daily_quota_exceeded BulkEmailStatus = "ACCOUNT_DAILY_QUOTA_EXCEEDED" - BulkEmailStatusInvalid_sending_pool_name BulkEmailStatus = "INVALID_SENDING_POOL_NAME" - BulkEmailStatusAccount_sending_paused BulkEmailStatus = "ACCOUNT_SENDING_PAUSED" - BulkEmailStatusConfiguration_set_sending_paused BulkEmailStatus = "CONFIGURATION_SET_SENDING_PAUSED" - BulkEmailStatusInvalid_parameter BulkEmailStatus = "INVALID_PARAMETER" - BulkEmailStatusTransient_failure BulkEmailStatus = "TRANSIENT_FAILURE" - BulkEmailStatusFailed BulkEmailStatus = "FAILED" + BulkEmailStatusSuccess BulkEmailStatus = "SUCCESS" + BulkEmailStatusMessageRejected BulkEmailStatus = "MESSAGE_REJECTED" + BulkEmailStatusMailFromDomainNotVerified BulkEmailStatus = "MAIL_FROM_DOMAIN_NOT_VERIFIED" + BulkEmailStatusConfigurationSetNotFound BulkEmailStatus = "CONFIGURATION_SET_NOT_FOUND" + BulkEmailStatusTemplateNotFound BulkEmailStatus = "TEMPLATE_NOT_FOUND" + BulkEmailStatusAccountSuspended BulkEmailStatus = "ACCOUNT_SUSPENDED" + BulkEmailStatusAccountThrottled BulkEmailStatus = "ACCOUNT_THROTTLED" + BulkEmailStatusAccountDailyQuotaExceeded BulkEmailStatus = "ACCOUNT_DAILY_QUOTA_EXCEEDED" + BulkEmailStatusInvalidSendingPoolName BulkEmailStatus = "INVALID_SENDING_POOL_NAME" + BulkEmailStatusAccountSendingPaused BulkEmailStatus = "ACCOUNT_SENDING_PAUSED" + BulkEmailStatusConfigurationSetSendingPaused BulkEmailStatus = "CONFIGURATION_SET_SENDING_PAUSED" + BulkEmailStatusInvalidParameter BulkEmailStatus = "INVALID_PARAMETER" + BulkEmailStatusTransientFailure BulkEmailStatus = "TRANSIENT_FAILURE" + BulkEmailStatusFailed BulkEmailStatus = "FAILED" ) // Values returns all known values for BulkEmailStatus. Note that this can be @@ -102,9 +102,9 @@ type DeliverabilityDashboardAccountStatus string // Enum values for DeliverabilityDashboardAccountStatus const ( - DeliverabilityDashboardAccountStatusActive DeliverabilityDashboardAccountStatus = "ACTIVE" - DeliverabilityDashboardAccountStatusPending_expiration DeliverabilityDashboardAccountStatus = "PENDING_EXPIRATION" - DeliverabilityDashboardAccountStatusDisabled DeliverabilityDashboardAccountStatus = "DISABLED" + DeliverabilityDashboardAccountStatusActive DeliverabilityDashboardAccountStatus = "ACTIVE" + DeliverabilityDashboardAccountStatusPendingExpiration DeliverabilityDashboardAccountStatus = "PENDING_EXPIRATION" + DeliverabilityDashboardAccountStatusDisabled DeliverabilityDashboardAccountStatus = "DISABLED" ) // Values returns all known values for DeliverabilityDashboardAccountStatus. Note @@ -123,8 +123,8 @@ type DeliverabilityTestStatus string // Enum values for DeliverabilityTestStatus const ( - DeliverabilityTestStatusIn_progress DeliverabilityTestStatus = "IN_PROGRESS" - DeliverabilityTestStatusCompleted DeliverabilityTestStatus = "COMPLETED" + DeliverabilityTestStatusInProgress DeliverabilityTestStatus = "IN_PROGRESS" + DeliverabilityTestStatusCompleted DeliverabilityTestStatus = "COMPLETED" ) // Values returns all known values for DeliverabilityTestStatus. Note that this can @@ -141,9 +141,9 @@ type DimensionValueSource string // Enum values for DimensionValueSource const ( - DimensionValueSourceMessage_tag DimensionValueSource = "MESSAGE_TAG" - DimensionValueSourceEmail_header DimensionValueSource = "EMAIL_HEADER" - DimensionValueSourceLink_tag DimensionValueSource = "LINK_TAG" + DimensionValueSourceMessageTag DimensionValueSource = "MESSAGE_TAG" + DimensionValueSourceEmailHeader DimensionValueSource = "EMAIL_HEADER" + DimensionValueSourceLinkTag DimensionValueSource = "LINK_TAG" ) // Values returns all known values for DimensionValueSource. Note that this can be @@ -161,7 +161,7 @@ type DkimSigningAttributesOrigin string // Enum values for DkimSigningAttributesOrigin const ( - DkimSigningAttributesOriginAws_ses DkimSigningAttributesOrigin = "AWS_SES" + DkimSigningAttributesOriginAwsSes DkimSigningAttributesOrigin = "AWS_SES" DkimSigningAttributesOriginExternal DkimSigningAttributesOrigin = "EXTERNAL" ) @@ -179,11 +179,11 @@ type DkimStatus string // Enum values for DkimStatus const ( - DkimStatusPending DkimStatus = "PENDING" - DkimStatusSuccess DkimStatus = "SUCCESS" - DkimStatusFailed DkimStatus = "FAILED" - DkimStatusTemporary_failure DkimStatus = "TEMPORARY_FAILURE" - DkimStatusNot_started DkimStatus = "NOT_STARTED" + DkimStatusPending DkimStatus = "PENDING" + DkimStatusSuccess DkimStatus = "SUCCESS" + DkimStatusFailed DkimStatus = "FAILED" + DkimStatusTemporaryFailure DkimStatus = "TEMPORARY_FAILURE" + DkimStatusNotStarted DkimStatus = "NOT_STARTED" ) // Values returns all known values for DkimStatus. Note that this can be expanded @@ -203,15 +203,15 @@ type EventType string // Enum values for EventType const ( - EventTypeSend EventType = "SEND" - EventTypeReject EventType = "REJECT" - EventTypeBounce EventType = "BOUNCE" - EventTypeComplaint EventType = "COMPLAINT" - EventTypeDelivery EventType = "DELIVERY" - EventTypeOpen EventType = "OPEN" - EventTypeClick EventType = "CLICK" - EventTypeRendering_failure EventType = "RENDERING_FAILURE" - EventTypeDelivery_delay EventType = "DELIVERY_DELAY" + EventTypeSend EventType = "SEND" + EventTypeReject EventType = "REJECT" + EventTypeBounce EventType = "BOUNCE" + EventTypeComplaint EventType = "COMPLAINT" + EventTypeDelivery EventType = "DELIVERY" + EventTypeOpen EventType = "OPEN" + EventTypeClick EventType = "CLICK" + EventTypeRenderingFailure EventType = "RENDERING_FAILURE" + EventTypeDeliveryDelay EventType = "DELIVERY_DELAY" ) // Values returns all known values for EventType. Note that this can be expanded in @@ -235,9 +235,9 @@ type IdentityType string // Enum values for IdentityType const ( - IdentityTypeEmail_address IdentityType = "EMAIL_ADDRESS" - IdentityTypeDomain IdentityType = "DOMAIN" - IdentityTypeManaged_domain IdentityType = "MANAGED_DOMAIN" + IdentityTypeEmailAddress IdentityType = "EMAIL_ADDRESS" + IdentityTypeDomain IdentityType = "DOMAIN" + IdentityTypeManagedDomain IdentityType = "MANAGED_DOMAIN" ) // Values returns all known values for IdentityType. Note that this can be expanded @@ -255,7 +255,7 @@ type ImportDestinationType string // Enum values for ImportDestinationType const ( - ImportDestinationTypeSuppression_list ImportDestinationType = "SUPPRESSION_LIST" + ImportDestinationTypeSuppressionList ImportDestinationType = "SUPPRESSION_LIST" ) // Values returns all known values for ImportDestinationType. Note that this can be @@ -293,10 +293,10 @@ type MailFromDomainStatus string // Enum values for MailFromDomainStatus const ( - MailFromDomainStatusPending MailFromDomainStatus = "PENDING" - MailFromDomainStatusSuccess MailFromDomainStatus = "SUCCESS" - MailFromDomainStatusFailed MailFromDomainStatus = "FAILED" - MailFromDomainStatusTemporary_failure MailFromDomainStatus = "TEMPORARY_FAILURE" + MailFromDomainStatusPending MailFromDomainStatus = "PENDING" + MailFromDomainStatusSuccess MailFromDomainStatus = "SUCCESS" + MailFromDomainStatusFailed MailFromDomainStatus = "FAILED" + MailFromDomainStatusTemporaryFailure MailFromDomainStatus = "TEMPORARY_FAILURE" ) // Values returns all known values for MailFromDomainStatus. Note that this can be @@ -409,8 +409,8 @@ type WarmupStatus string // Enum values for WarmupStatus const ( - WarmupStatusIn_progress WarmupStatus = "IN_PROGRESS" - WarmupStatusDone WarmupStatus = "DONE" + WarmupStatusInProgress WarmupStatus = "IN_PROGRESS" + WarmupStatusDone WarmupStatus = "DONE" ) // Values returns all known values for WarmupStatus. Note that this can be expanded diff --git a/service/sesv2/types/types.go b/service/sesv2/types/types.go index f5ad4ab1ce9..b3a0dda489c 100644 --- a/service/sesv2/types/types.go +++ b/service/sesv2/types/types.go @@ -20,11 +20,11 @@ type AccountDetails struct { // The type of email your account is sending. The mail type can be one of the // following: // - // * MARKETING – Most of your sending traffic is to keep your - // customers informed of your latest offering. + // * MARKETING – Most of your sending traffic is to keep your customers + // informed of your latest offering. // - // * TRANSACTIONAL – Most of your - // sending traffic is to communicate during a transaction with a customer. + // * TRANSACTIONAL – Most of your sending + // traffic is to communicate during a transaction with a customer. MailType MailType // Information about the review of the latest details you submitted. @@ -114,57 +114,56 @@ type BulkEmailEntryResult struct { // The status of a message sent using the SendBulkTemplatedEmail operation. // Possible values for this parameter include: // - // * SUCCESS: Amazon SES accepted - // the message, and will attempt to deliver it to the recipients. + // * SUCCESS: Amazon SES accepted the + // message, and will attempt to deliver it to the recipients. // - // * - // MESSAGE_REJECTED: The message was rejected because it contained a virus. + // * MESSAGE_REJECTED: + // The message was rejected because it contained a virus. // - // * + // * // MAIL_FROM_DOMAIN_NOT_VERIFIED: The sender's email address or domain was not // verified. // - // * CONFIGURATION_SET_DOES_NOT_EXIST: The configuration set you + // * CONFIGURATION_SET_DOES_NOT_EXIST: The configuration set you // specified does not exist. // - // * TEMPLATE_DOES_NOT_EXIST: The template you - // specified does not exist. + // * TEMPLATE_DOES_NOT_EXIST: The template you specified + // does not exist. // - // * ACCOUNT_SUSPENDED: Your account has been shut - // down because of issues related to your email sending practices. + // * ACCOUNT_SUSPENDED: Your account has been shut down because of + // issues related to your email sending practices. // - // * - // ACCOUNT_THROTTLED: The number of emails you can send has been reduced because - // your account has exceeded its allocated sending limit. + // * ACCOUNT_THROTTLED: The number + // of emails you can send has been reduced because your account has exceeded its + // allocated sending limit. // - // * - // ACCOUNT_DAILY_QUOTA_EXCEEDED: You have reached or exceeded the maximum number of - // emails you can send from your account in a 24-hour period. + // * ACCOUNT_DAILY_QUOTA_EXCEEDED: You have reached or + // exceeded the maximum number of emails you can send from your account in a + // 24-hour period. // - // * - // INVALID_SENDING_POOL_NAME: The configuration set you specified refers to an IP - // pool that does not exist. + // * INVALID_SENDING_POOL_NAME: The configuration set you + // specified refers to an IP pool that does not exist. // - // * ACCOUNT_SENDING_PAUSED: Email sending for the - // Amazon SES account was disabled using the UpdateAccountSendingEnabled + // * ACCOUNT_SENDING_PAUSED: + // Email sending for the Amazon SES account was disabled using the + // UpdateAccountSendingEnabled // (https://docs.aws.amazon.com/ses/latest/APIReference/API_UpdateAccountSendingEnabled.html) // operation. // - // * CONFIGURATION_SET_SENDING_PAUSED: Email sending for this + // * CONFIGURATION_SET_SENDING_PAUSED: Email sending for this // configuration set was disabled using the UpdateConfigurationSetSendingEnabled // (https://docs.aws.amazon.com/ses/latest/APIReference/API_UpdateConfigurationSetSendingEnabled.html) // operation. // - // * INVALID_PARAMETER_VALUE: One or more of the parameters you + // * INVALID_PARAMETER_VALUE: One or more of the parameters you // specified when calling this operation was invalid. See the error message for // additional information. // - // * TRANSIENT_FAILURE: Amazon SES was unable to - // process your request because of a temporary issue. + // * TRANSIENT_FAILURE: Amazon SES was unable to process + // your request because of a temporary issue. // - // * FAILED: Amazon SES was - // unable to process your request. See the error message for additional - // information. + // * FAILED: Amazon SES was unable to + // process your request. See the error message for additional information. Status BulkEmailStatus } @@ -188,11 +187,11 @@ type CloudWatchDimensionConfiguration struct { // don't provide the value of the dimension when you send an email. This value has // to meet the following criteria: // - // * It can only contain ASCII letters (a–z, - // A–Z), numbers (0–9), underscores (_), or dashes (-). + // * It can only contain ASCII letters (a–z, A–Z), + // numbers (0–9), underscores (_), or dashes (-). // - // * It can contain no - // more than 256 characters. + // * It can contain no more than + // 256 characters. // // This member is required. DefaultDimensionValue *string @@ -200,11 +199,11 @@ type CloudWatchDimensionConfiguration struct { // The name of an Amazon CloudWatch dimension associated with an email sending // metric. The name has to meet the following criteria: // - // * It can only contain + // * It can only contain // ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-). // - // * - // It can contain no more than 256 characters. + // * It + // can contain no more than 256 characters. // // This member is required. DimensionName *string @@ -293,11 +292,11 @@ type DedicatedIp struct { // The warm-up status of a dedicated IP address. The status can have one of the // following values: // - // * IN_PROGRESS – The IP address isn't ready to use because - // the dedicated IP warm-up process is ongoing. + // * IN_PROGRESS – The IP address isn't ready to use because the + // dedicated IP warm-up process is ongoing. // - // * DONE – The dedicated IP - // warm-up process is complete, and the IP address is ready to use. + // * DONE – The dedicated IP warm-up + // process is complete, and the IP address is ready to use. // // This member is required. WarmupStatus WarmupStatus @@ -379,11 +378,11 @@ type DkimAttributes struct { // A string that indicates how DKIM was configured for the identity. There are two // possible values: // - // * AWS_SES – Indicates that DKIM was configured for the + // * AWS_SES – Indicates that DKIM was configured for the // identity by using Easy DKIM // (https://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). // - // * + // * // EXTERNAL – Indicates that DKIM was configured for the identity by using Bring // Your Own DKIM (BYODKIM). SigningAttributesOrigin DkimSigningAttributesOrigin @@ -396,23 +395,23 @@ type DkimAttributes struct { // Describes whether or not Amazon SES has successfully located the DKIM records in // the DNS records for the domain. The status can be one of the following: // - // * + // * // PENDING – The verification process was initiated, but Amazon SES hasn't yet // detected the DKIM records in the DNS configuration for the domain. // - // * - // SUCCESS – The verification process completed successfully. + // * SUCCESS – + // The verification process completed successfully. // - // * FAILED – The - // verification process failed. This typically occurs when Amazon SES fails to find - // the DKIM records in the DNS configuration of the domain. + // * FAILED – The verification + // process failed. This typically occurs when Amazon SES fails to find the DKIM + // records in the DNS configuration of the domain. // - // * - // TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining - // the DKIM authentication status of the domain. + // * TEMPORARY_FAILURE – A + // temporary issue is preventing Amazon SES from determining the DKIM + // authentication status of the domain. // - // * NOT_STARTED – The DKIM - // verification process hasn't been initiated for the domain. + // * NOT_STARTED – The DKIM verification + // process hasn't been initiated for the domain. Status DkimStatus // If you used Easy DKIM @@ -560,29 +559,29 @@ type EmailContent struct { // The raw email message. The message has to meet the following criteria: // - // * - // The message has to contain a header and a body, separated by one blank line. + // * The + // message has to contain a header and a body, separated by one blank line. // + // * All + // of the required header fields must be present in the message. // - // * All of the required header fields must be present in the message. + // * Each part of a + // multipart MIME message must be formatted properly. // - // * Each - // part of a multipart MIME message must be formatted properly. - // - // * If you - // include attachments, they must be in a file format that the Amazon SES API v2 + // * If you include + // attachments, they must be in a file format that the Amazon SES API v2 // supports. // - // * The entire message must be Base64 encoded. + // * The entire message must be Base64 encoded. // - // * If any of the - // MIME parts in your message contain content that is outside of the 7-bit ASCII + // * If any of the MIME + // parts in your message contain content that is outside of the 7-bit ASCII // character range, you should encode that content to ensure that recipients' email // clients render the message properly. // - // * The length of any single line of - // text in the message can't exceed 1,000 characters. This restriction is defined - // in RFC 5321 (https://tools.ietf.org/html/rfc5321). + // * The length of any single line of text in + // the message can't exceed 1,000 characters. This restriction is defined in RFC + // 5321 (https://tools.ietf.org/html/rfc5321). Raw *RawMessage // The simple email message. The message consists of a subject and a message body. @@ -724,14 +723,13 @@ type IdentityInfo struct { // The email identity type. The identity type can be one of the following: // - // * + // * // EMAIL_ADDRESS – The identity is an email address. // - // * DOMAIN – The identity - // is a domain. + // * DOMAIN – The identity is a + // domain. // - // * MANAGED_DOMAIN – The identity is a domain that is managed by - // AWS. + // * MANAGED_DOMAIN – The identity is a domain that is managed by AWS. IdentityType IdentityType // Indicates whether or not you can send email from the identity. An identity is an @@ -841,20 +839,20 @@ type MailFromAttributes struct { // This member is required. MailFromDomain *string - // The status of the MAIL FROM domain. This status can have the following values: + // The status of the MAIL FROM domain. This status can have the following + // values: // + // * PENDING – Amazon SES hasn't started searching for the MX record + // yet. // - // * PENDING – Amazon SES hasn't started searching for the MX record yet. - // - // * - // SUCCESS – Amazon SES detected the required MX record for the MAIL FROM domain. - // + // * SUCCESS – Amazon SES detected the required MX record for the MAIL FROM + // domain. // - // * FAILED – Amazon SES can't find the required MX record, or the record no longer - // exists. + // * FAILED – Amazon SES can't find the required MX record, or the record + // no longer exists. // - // * TEMPORARY_FAILURE – A temporary issue occurred, which prevented - // Amazon SES from determining the status of the MAIL FROM domain. + // * TEMPORARY_FAILURE – A temporary issue occurred, which + // prevented Amazon SES from determining the status of the MAIL FROM domain. // // This member is required. MailFromDomainStatus MailFromDomainStatus @@ -886,11 +884,10 @@ type MessageTag struct { // The name of the message tag. The message tag name has to meet the following // criteria: // - // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), // underscores (_), or dashes (-). // - // * It can contain no more than 256 - // characters. + // * It can contain no more than 256 characters. // // This member is required. Name *string @@ -898,11 +895,10 @@ type MessageTag struct { // The value of the message tag. The message tag value has to meet the following // criteria: // - // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), + // * It can only contain ASCII letters (a–z, A–Z), numbers (0–9), // underscores (_), or dashes (-). // - // * It can contain no more than 256 - // characters. + // * It can contain no more than 256 characters. // // This member is required. Value *string @@ -967,29 +963,28 @@ type RawMessage struct { // The raw email message. The message has to meet the following criteria: // - // * - // The message has to contain a header and a body, separated by one blank line. + // * The + // message has to contain a header and a body, separated by one blank line. // + // * All + // of the required header fields must be present in the message. // - // * All of the required header fields must be present in the message. + // * Each part of a + // multipart MIME message must be formatted properly. // - // * Each - // part of a multipart MIME message must be formatted properly. + // * Attachments must be in a + // file format that the Amazon SES supports. // - // * Attachments - // must be in a file format that the Amazon SES supports. + // * The entire message must be Base64 + // encoded. // - // * The entire message - // must be Base64 encoded. + // * If any of the MIME parts in your message contain content that is + // outside of the 7-bit ASCII character range, you should encode that content to + // ensure that recipients' email clients render the message properly. // - // * If any of the MIME parts in your message contain - // content that is outside of the 7-bit ASCII character range, you should encode - // that content to ensure that recipients' email clients render the message - // properly. - // - // * The length of any single line of text in the message can't - // exceed 1,000 characters. This restriction is defined in RFC 5321 - // (https://tools.ietf.org/html/rfc5321). + // * The length + // of any single line of text in the message can't exceed 1,000 characters. This + // restriction is defined in RFC 5321 (https://tools.ietf.org/html/rfc5321). // // This member is required. Data []byte @@ -1036,18 +1031,17 @@ type ReviewDetails struct { // The status of the latest review of your account. The status can be one of the // following: // - // * PENDING – We have received your appeal and are in the process - // of reviewing it. + // * PENDING – We have received your appeal and are in the process of + // reviewing it. // - // * GRANTED – Your appeal has been reviewed and your - // production access has been granted. + // * GRANTED – Your appeal has been reviewed and your production + // access has been granted. // - // * DENIED – Your appeal has been - // reviewed and your production access has been denied. + // * DENIED – Your appeal has been reviewed and your + // production access has been denied. // - // * FAILED – An internal - // error occurred and we didn't receive your appeal. You can submit your appeal - // again. + // * FAILED – An internal error occurred and we + // didn't receive your appeal. You can submit your appeal again. Status ReviewStatus } @@ -1157,11 +1151,11 @@ type SuppressionAttributes struct { // added to the suppression list for your account. This list can contain any or all // of the following: // - // * COMPLAINT – Amazon SES adds an email address to the + // * COMPLAINT – Amazon SES adds an email address to the // suppression list for your account when a message sent to that address results in // a complaint. // - // * BOUNCE – Amazon SES adds an email address to the suppression + // * BOUNCE – Amazon SES adds an email address to the suppression // list for your account when a message sent to that address results in a hard // bounce. SuppressedReasons []SuppressionListReason @@ -1170,14 +1164,14 @@ type SuppressionAttributes struct { // An object that contains details about the action of suppression list. type SuppressionListDestination struct { - // The type of action that you want to perform on the address. Acceptable values: - // + // The type of action that you want to perform on the address. Acceptable + // values: // - // * PUT: add the addresses to the suppression list. If the record already exists, - // it will override it with the new value. + // * PUT: add the addresses to the suppression list. If the record already + // exists, it will override it with the new value. // - // * DELETE: remove the addresses from - // the suppression list. + // * DELETE: remove the addresses + // from the suppression list. // // This member is required. SuppressionListImportAction SuppressionListImportAction @@ -1191,13 +1185,12 @@ type SuppressionOptions struct { // the suppression list for your account. This list can contain any or all of the // following: // - // * COMPLAINT – Amazon SES adds an email address to the - // suppression list for your account when a message sent to that address results in - // a complaint. + // * COMPLAINT – Amazon SES adds an email address to the suppression + // list for your account when a message sent to that address results in a + // complaint. // - // * BOUNCE – Amazon SES adds an email address to the suppression - // list for your account when a message sent to that address results in a hard - // bounce. + // * BOUNCE – Amazon SES adds an email address to the suppression list + // for your account when a message sent to that address results in a hard bounce. SuppressedReasons []SuppressionListReason } @@ -1213,22 +1206,22 @@ type SuppressionOptions struct { // or one of the following symbols: _ . : / = + -. The following additional // restrictions apply to tags: // -// * Tag keys and values are case sensitive. -// +// * Tag keys and values are case sensitive. // -// * For each associated resource, each tag key must be unique and it can have only -// one value. +// * For +// each associated resource, each tag key must be unique and it can have only one +// value. // -// * The aws: prefix is reserved for use by AWS; you can’t use it -// in any tag keys or values that you define. In addition, you can't edit or remove -// tag keys or values that use this prefix. Tags that use this prefix don’t count +// * The aws: prefix is reserved for use by AWS; you can’t use it in any +// tag keys or values that you define. In addition, you can't edit or remove tag +// keys or values that use this prefix. Tags that use this prefix don’t count // against the limit of 50 tags per resource. // -// * You can associate tags with -// public or shared resources, but the tags are available only for your AWS -// account, not any other accounts that share the resource. In addition, the tags -// are available only for resources that are located in the specified AWS Region -// for your AWS account. +// * You can associate tags with public +// or shared resources, but the tags are available only for your AWS account, not +// any other accounts that share the resource. In addition, the tags are available +// only for resources that are located in the specified AWS Region for your AWS +// account. type Tag struct { // One part of a key-value pair that defines a tag. The maximum length of a tag key diff --git a/service/sfn/api_op_CreateActivity.go b/service/sfn/api_op_CreateActivity.go index 99576e2a27f..bbfe9b773af 100644 --- a/service/sfn/api_op_CreateActivity.go +++ b/service/sfn/api_op_CreateActivity.go @@ -48,21 +48,21 @@ type CreateActivityInput struct { // (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) // in the AWS Step Functions Developer Guide. A name must not contain: // - // * white + // * white // space // - // * brackets < > { } [ ] + // * brackets < > { } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special - // characters " # % \ ^ | ~ ` $ & , ; : / + // * special characters " + // # % \ ^ | ~ ` $ & , ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To + // enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, + // - and _. // // This member is required. Name *string diff --git a/service/sfn/api_op_CreateStateMachine.go b/service/sfn/api_op_CreateStateMachine.go index be364471e6b..2b0fa0ed0f6 100644 --- a/service/sfn/api_op_CreateStateMachine.go +++ b/service/sfn/api_op_CreateStateMachine.go @@ -53,21 +53,21 @@ type CreateStateMachineInput struct { // The name of the state machine. A name must not contain: // - // * white space + // * white space // + // * + // brackets < > { } [ ] // - // * brackets < > { } [ ] + // * wildcard characters ? * // - // * wildcard characters ? * + // * special characters " # % \ ^ + // | ~ ` $ & , ; : / // - // * special characters - // " # % \ ^ | ~ ` $ & , ; : / + // * control characters (U+0000-001F, U+007F-009F) // - // * control characters (U+0000-001F, - // U+007F-009F) - // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable + // logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and + // _. // // This member is required. Name *string diff --git a/service/sfn/api_op_DescribeActivity.go b/service/sfn/api_op_DescribeActivity.go index e8dff0df740..e88d930a5a1 100644 --- a/service/sfn/api_op_DescribeActivity.go +++ b/service/sfn/api_op_DescribeActivity.go @@ -50,21 +50,20 @@ type DescribeActivityOutput struct { // The name of the activity. A name must not contain: // - // * white space + // * white space // - // * - // brackets < > { } [ ] + // * brackets < + // > { } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special characters " - // # % \ ^ | ~ ` $ & , ; : / + // * special characters " # % \ ^ | ~ ` $ & , + // ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable logging with + // CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. // // This member is required. Name *string diff --git a/service/sfn/api_op_DescribeExecution.go b/service/sfn/api_op_DescribeExecution.go index 8892efcbc16..ddf6bae5e85 100644 --- a/service/sfn/api_op_DescribeExecution.go +++ b/service/sfn/api_op_DescribeExecution.go @@ -70,21 +70,20 @@ type DescribeExecutionOutput struct { // The name of the execution. A name must not contain: // - // * white space + // * white space // - // * - // brackets < > { } [ ] + // * brackets < + // > { } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special characters " - // # % \ ^ | ~ ` $ & , ; : / + // * special characters " # % \ ^ | ~ ` $ & , + // ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable logging with + // CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. Name *string // The JSON output data of the execution. Length constraints apply to the payload diff --git a/service/sfn/api_op_DescribeStateMachine.go b/service/sfn/api_op_DescribeStateMachine.go index 1fa8a8ba3fa..cd3938373cc 100644 --- a/service/sfn/api_op_DescribeStateMachine.go +++ b/service/sfn/api_op_DescribeStateMachine.go @@ -53,21 +53,21 @@ type DescribeStateMachineOutput struct { // The name of the state machine. A name must not contain: // - // * white space + // * white space // + // * + // brackets < > { } [ ] // - // * brackets < > { } [ ] + // * wildcard characters ? * // - // * wildcard characters ? * + // * special characters " # % \ ^ + // | ~ ` $ & , ; : / // - // * special characters - // " # % \ ^ | ~ ` $ & , ; : / + // * control characters (U+0000-001F, U+007F-009F) // - // * control characters (U+0000-001F, - // U+007F-009F) - // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable + // logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and + // _. // // This member is required. Name *string diff --git a/service/sfn/api_op_StartExecution.go b/service/sfn/api_op_StartExecution.go index 62cc8ca21af..04e51f16e58 100644 --- a/service/sfn/api_op_StartExecution.go +++ b/service/sfn/api_op_StartExecution.go @@ -51,21 +51,21 @@ type StartExecutionInput struct { // (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html#service-limits-state-machine-executions) // in the AWS Step Functions Developer Guide. A name must not contain: // - // * white + // * white // space // - // * brackets < > { } [ ] + // * brackets < > { } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special - // characters " # % \ ^ | ~ ` $ & , ; : / + // * special characters " + // # % \ ^ | ~ ` $ & , ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To + // enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, + // - and _. Name *string // Passes the AWS X-Ray trace header. The trace header can also be passed in the diff --git a/service/sfn/types/enums.go b/service/sfn/types/enums.go index 9d661406966..724fb60fa1f 100644 --- a/service/sfn/types/enums.go +++ b/service/sfn/types/enums.go @@ -9,7 +9,7 @@ const ( ExecutionStatusRunning ExecutionStatus = "RUNNING" ExecutionStatusSucceeded ExecutionStatus = "SUCCEEDED" ExecutionStatusFailed ExecutionStatus = "FAILED" - ExecutionStatusTimed_out ExecutionStatus = "TIMED_OUT" + ExecutionStatusTimedOut ExecutionStatus = "TIMED_OUT" ExecutionStatusAborted ExecutionStatus = "ABORTED" ) diff --git a/service/sfn/types/types.go b/service/sfn/types/types.go index 57086b109f2..f13e0b835a6 100644 --- a/service/sfn/types/types.go +++ b/service/sfn/types/types.go @@ -31,21 +31,20 @@ type ActivityListItem struct { // The name of the activity. A name must not contain: // - // * white space + // * white space // - // * - // brackets < > { } [ ] + // * brackets < + // > { } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special characters " - // # % \ ^ | ~ ` $ & , ; : / + // * special characters " # % \ ^ | ~ ` $ & , + // ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable logging with + // CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. // // This member is required. Name *string @@ -160,21 +159,20 @@ type ExecutionListItem struct { // The name of the execution. A name must not contain: // - // * white space + // * white space // - // * - // brackets < > { } [ ] + // * brackets < + // > { } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special characters " - // # % \ ^ | ~ ` $ & , ; : / + // * special characters " # % \ ^ | ~ ` $ & , + // ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable logging with + // CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. // // This member is required. Name *string @@ -503,21 +501,20 @@ type StateExitedEventDetails struct { // The name of the state. A name must not contain: // - // * white space + // * white space // - // * - // brackets < > { } [ ] + // * brackets < > { + // } [ ] // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special characters " - // # % \ ^ | ~ ` $ & , ; : / + // * special characters " # % \ ^ | ~ ` $ & , ; : + // / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable logging with + // CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _. // // This member is required. Name *string @@ -540,21 +537,21 @@ type StateMachineListItem struct { // The name of the state machine. A name must not contain: // - // * white space + // * white space // + // * + // brackets < > { } [ ] // - // * brackets < > { } [ ] - // - // * wildcard characters ? * + // * wildcard characters ? * // - // * special characters - // " # % \ ^ | ~ ` $ & , ; : / + // * special characters " # % \ ^ + // | ~ ` $ & , ; : / // - // * control characters (U+0000-001F, - // U+007F-009F) + // * control characters (U+0000-001F, U+007F-009F) // - // To enable logging with CloudWatch Logs, the name should only - // contain 0-9, A-Z, a-z, - and _. + // To enable + // logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and + // _. // // This member is required. Name *string diff --git a/service/shield/api_op_CreateProtection.go b/service/shield/api_op_CreateProtection.go index 6406d05c9a4..001484f931c 100644 --- a/service/shield/api_op_CreateProtection.go +++ b/service/shield/api_op_CreateProtection.go @@ -45,26 +45,26 @@ type CreateProtectionInput struct { // The ARN (Amazon Resource Name) of the resource to be protected. The ARN should // be in one of the following formats: // - // * For an Application Load Balancer: + // * For an Application Load Balancer: // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // - // * For an Elastic Load Balancer (Classic Load Balancer): + // * + // For an Elastic Load Balancer (Classic Load Balancer): // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/load-balancer-name // - // - // * For an AWS CloudFront distribution: + // * + // For an AWS CloudFront distribution: // arn:aws:cloudfront::account-id:distribution/distribution-id // - // * For an AWS - // Global Accelerator accelerator: + // * For an AWS Global + // Accelerator accelerator: // arn:aws:globalaccelerator::account-id:accelerator/accelerator-id // - // * For - // Amazon Route 53: arn:aws:route53:::hostedzone/hosted-zone-id + // * For Amazon + // Route 53: arn:aws:route53:::hostedzone/hosted-zone-id // - // * For an - // Elastic IP address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id + // * For an Elastic IP + // address: arn:aws:ec2:region:account-id:eip-allocation/allocation-id // // This member is required. ResourceArn *string diff --git a/service/shield/types/enums.go b/service/shield/types/enums.go index 6a62720f8e8..c14eddf8d8a 100644 --- a/service/shield/types/enums.go +++ b/service/shield/types/enums.go @@ -24,14 +24,14 @@ type AttackPropertyIdentifier string // Enum values for AttackPropertyIdentifier const ( - AttackPropertyIdentifierDestination_url AttackPropertyIdentifier = "DESTINATION_URL" - AttackPropertyIdentifierReferrer AttackPropertyIdentifier = "REFERRER" - AttackPropertyIdentifierSource_asn AttackPropertyIdentifier = "SOURCE_ASN" - AttackPropertyIdentifierSource_country AttackPropertyIdentifier = "SOURCE_COUNTRY" - AttackPropertyIdentifierSource_ip_address AttackPropertyIdentifier = "SOURCE_IP_ADDRESS" - AttackPropertyIdentifierSource_user_agent AttackPropertyIdentifier = "SOURCE_USER_AGENT" - AttackPropertyIdentifierWordpress_pingback_reflector AttackPropertyIdentifier = "WORDPRESS_PINGBACK_REFLECTOR" - AttackPropertyIdentifierWordpress_pingback_source AttackPropertyIdentifier = "WORDPRESS_PINGBACK_SOURCE" + AttackPropertyIdentifierDestinationUrl AttackPropertyIdentifier = "DESTINATION_URL" + AttackPropertyIdentifierReferrer AttackPropertyIdentifier = "REFERRER" + AttackPropertyIdentifierSourceAsn AttackPropertyIdentifier = "SOURCE_ASN" + AttackPropertyIdentifierSourceCountry AttackPropertyIdentifier = "SOURCE_COUNTRY" + AttackPropertyIdentifierSourceIpAddress AttackPropertyIdentifier = "SOURCE_IP_ADDRESS" + AttackPropertyIdentifierSourceUserAgent AttackPropertyIdentifier = "SOURCE_USER_AGENT" + AttackPropertyIdentifierWordpressPingbackReflector AttackPropertyIdentifier = "WORDPRESS_PINGBACK_REFLECTOR" + AttackPropertyIdentifierWordpressPingbackSource AttackPropertyIdentifier = "WORDPRESS_PINGBACK_SOURCE" ) // Values returns all known values for AttackPropertyIdentifier. Note that this can diff --git a/service/shield/types/types.go b/service/shield/types/types.go index 03e9fe7e97e..4a08a24be0a 100644 --- a/service/shield/types/types.go +++ b/service/shield/types/types.go @@ -92,46 +92,46 @@ type AttackVectorDescription struct { // The attack type. Valid values: // - // * UDP_TRAFFIC + // * UDP_TRAFFIC // - // * UDP_FRAGMENT + // * UDP_FRAGMENT // - // * + // * // GENERIC_UDP_REFLECTION // - // * DNS_REFLECTION + // * DNS_REFLECTION // - // * NTP_REFLECTION + // * NTP_REFLECTION // - // * + // * // CHARGEN_REFLECTION // - // * SSDP_REFLECTION + // * SSDP_REFLECTION // - // * PORT_MAPPER + // * PORT_MAPPER // - // * - // RIP_REFLECTION + // * RIP_REFLECTION // - // * SNMP_REFLECTION + // * + // SNMP_REFLECTION // - // * MSSQL_REFLECTION + // * MSSQL_REFLECTION // - // * - // NET_BIOS_REFLECTION + // * NET_BIOS_REFLECTION // - // * SYN_FLOOD + // * SYN_FLOOD // - // * ACK_FLOOD - // - // * REQUEST_FLOOD + // * + // ACK_FLOOD // + // * REQUEST_FLOOD // // * HTTP_REFLECTION // - // * UDS_REFLECTION + // * UDS_REFLECTION // - // * MEMCACHED_REFLECTION + // * + // MEMCACHED_REFLECTION // // This member is required. VectorType *string diff --git a/service/signer/api_op_StartSigningJob.go b/service/signer/api_op_StartSigningJob.go index 81652136ec1..a9d08859694 100644 --- a/service/signer/api_op_StartSigningJob.go +++ b/service/signer/api_op_StartSigningJob.go @@ -16,22 +16,22 @@ import ( // viewable by the ListSigningJobs operation for two years after they are // performed. Note the following requirements: // -// * You must create an Amazon S3 +// * You must create an Amazon S3 // source bucket. For more information, see Create a Bucket // (http://docs.aws.amazon.com/AmazonS3/latest/gsg/CreatingABucket.html) in the // Amazon S3 Getting Started Guide. // -// * Your S3 source bucket must be version +// * Your S3 source bucket must be version // enabled. // -// * You must create an S3 destination bucket. Code signing uses your -// S3 destination bucket to write your signed code. +// * You must create an S3 destination bucket. Code signing uses your S3 +// destination bucket to write your signed code. // -// * You specify the name of -// the source and destination buckets when calling the StartSigningJob operation. +// * You specify the name of the +// source and destination buckets when calling the StartSigningJob operation. // -// -// * You must also specify a request token that identifies your request to code +// * +// You must also specify a request token that identifies your request to code // signing. // // You can call the DescribeSigningJob and the ListSigningJobs actions diff --git a/service/sms/api_op_CreateReplicationJob.go b/service/sms/api_op_CreateReplicationJob.go index b513ffb7661..2788f43075d 100644 --- a/service/sms/api_op_CreateReplicationJob.go +++ b/service/sms/api_op_CreateReplicationJob.go @@ -54,18 +54,18 @@ type CreateReplicationJobInput struct { // The ID of the KMS key for replication jobs that produce encrypted AMIs. This // value can be any of the following: // - // * KMS key ID + // * KMS key ID // - // * KMS key alias + // * KMS key alias // - // * - // ARN referring to the KMS key ID + // * ARN + // referring to the KMS key ID // - // * ARN referring to the KMS key alias + // * ARN referring to the KMS key alias // - // If - // encrypted is true but a KMS key ID is not specified, the customer's default KMS - // key for Amazon EBS is used. + // If encrypted + // is true but a KMS key ID is not specified, the customer's default KMS key for + // Amazon EBS is used. KmsKeyId *string // The license type to be used for the AMI created by a successful replication run. diff --git a/service/sms/api_op_UpdateReplicationJob.go b/service/sms/api_op_UpdateReplicationJob.go index edecc980e9f..8d734df75e6 100644 --- a/service/sms/api_op_UpdateReplicationJob.go +++ b/service/sms/api_op_UpdateReplicationJob.go @@ -48,18 +48,18 @@ type UpdateReplicationJobInput struct { // The ID of the KMS key for replication jobs that produce encrypted AMIs. This // value can be any of the following: // - // * KMS key ID + // * KMS key ID // - // * KMS key alias + // * KMS key alias // - // * - // ARN referring to the KMS key ID + // * ARN + // referring to the KMS key ID // - // * ARN referring to the KMS key alias + // * ARN referring to the KMS key alias // - // If - // encrypted is enabled but a KMS key ID is not specified, the customer's default - // KMS key for Amazon EBS is used. + // If encrypted + // is enabled but a KMS key ID is not specified, the customer's default KMS key for + // Amazon EBS is used. KmsKeyId *string // The license type to be used for the AMI created by a successful replication run. diff --git a/service/sms/doc.go b/service/sms/doc.go index 16e2813c10d..b91fefa733e 100644 --- a/service/sms/doc.go +++ b/service/sms/doc.go @@ -7,10 +7,10 @@ // easier and faster for you to migrate your on-premises workloads to AWS. To learn // more about AWS SMS, see the following resources: // -// * AWS Server Migration -// Service product page (http://aws.amazon.com/server-migration-service/) +// * AWS Server Migration Service +// product page (http://aws.amazon.com/server-migration-service/) // -// * -// AWS Server Migration Service User Guide +// * AWS Server +// Migration Service User Guide // (https://docs.aws.amazon.com/server-migration-service/latest/userguide/) package sms diff --git a/service/sms/types/enums.go b/service/sms/types/enums.go index 33d5a98d877..202fb2de55c 100644 --- a/service/sms/types/enums.go +++ b/service/sms/types/enums.go @@ -133,12 +133,12 @@ type AppStatus string // Enum values for AppStatus const ( - AppStatusCreating AppStatus = "CREATING" - AppStatusActive AppStatus = "ACTIVE" - AppStatusUpdating AppStatus = "UPDATING" - AppStatusDeleting AppStatus = "DELETING" - AppStatusDeleted AppStatus = "DELETED" - AppStatusDelete_failed AppStatus = "DELETE_FAILED" + AppStatusCreating AppStatus = "CREATING" + AppStatusActive AppStatus = "ACTIVE" + AppStatusUpdating AppStatus = "UPDATING" + AppStatusDeleting AppStatus = "DELETING" + AppStatusDeleted AppStatus = "DELETED" + AppStatusDeleteFailed AppStatus = "DELETE_FAILED" ) // Values returns all known values for AppStatus. Note that this can be expanded in @@ -329,8 +329,8 @@ type ScriptType string // Enum values for ScriptType const ( - ScriptTypeShell_script ScriptType = "SHELL_SCRIPT" - ScriptTypePowershell_script ScriptType = "POWERSHELL_SCRIPT" + ScriptTypeShellScript ScriptType = "SHELL_SCRIPT" + ScriptTypePowershellScript ScriptType = "POWERSHELL_SCRIPT" ) // Values returns all known values for ScriptType. Note that this can be expanded diff --git a/service/sms/types/types.go b/service/sms/types/types.go index 41d405faae1..3dcdc745b30 100644 --- a/service/sms/types/types.go +++ b/service/sms/types/types.go @@ -165,18 +165,18 @@ type ReplicationJob struct { // The ID of the KMS key for replication jobs that produce encrypted AMIs. This // value can be any of the following: // - // * KMS key ID + // * KMS key ID // - // * KMS key alias + // * KMS key alias // - // * - // ARN referring to the KMS key ID + // * ARN + // referring to the KMS key ID // - // * ARN referring to the KMS key alias + // * ARN referring to the KMS key alias // - // If - // encrypted is enabled but a KMS key ID is not specified, the customer's default - // KMS key for Amazon EBS is used. + // If encrypted + // is enabled but a KMS key ID is not specified, the customer's default KMS key for + // Amazon EBS is used. KmsKeyId *string // The ID of the latest Amazon Machine Image (AMI). @@ -241,18 +241,18 @@ type ReplicationRun struct { // The ID of the KMS key for replication jobs that produce encrypted AMIs. This // value can be any of the following: // - // * KMS key ID + // * KMS key ID // - // * KMS key alias + // * KMS key alias // - // * - // ARN referring to the KMS key ID + // * ARN + // referring to the KMS key ID // - // * ARN referring to the KMS key alias + // * ARN referring to the KMS key alias // - // If - // encrypted is true but a KMS key ID is not specified, the customer's default KMS - // key for Amazon EBS is used. + // If encrypted + // is true but a KMS key ID is not specified, the customer's default KMS key for + // Amazon EBS is used. KmsKeyId *string // The ID of the replication run. @@ -424,18 +424,18 @@ type ServerReplicationParameters struct { // The ID of the KMS key for replication jobs that produce encrypted AMIs. This // value can be any of the following: // - // * KMS key ID + // * KMS key ID // - // * KMS key alias + // * KMS key alias // - // * - // ARN referring to the KMS key ID + // * ARN + // referring to the KMS key ID // - // * ARN referring to the KMS key alias + // * ARN referring to the KMS key alias // - // If - // encrypted is enabled but a KMS key ID is not specified, the customer's default - // KMS key for Amazon EBS is used. + // If encrypted + // is enabled but a KMS key ID is not specified, the customer's default KMS key for + // Amazon EBS is used. KmsKeyId *string // The license type for creating a replication job for the server. diff --git a/service/snowball/api_op_CreateCluster.go b/service/snowball/api_op_CreateCluster.go index 996ae0d2a71..469533c063b 100644 --- a/service/snowball/api_op_CreateCluster.go +++ b/service/snowball/api_op_CreateCluster.go @@ -61,35 +61,34 @@ type CreateClusterInput struct { // device moves to its destination while in transit. Regional shipping speeds are // as follows: // - // * In Australia, you have access to express shipping. Typically, + // * In Australia, you have access to express shipping. Typically, // Snow devices shipped express are delivered in about a day. // - // * In the - // European Union (EU), you have access to express shipping. Typically, Snow - // devices shipped express are delivered in about a day. In addition, most - // countries in the EU have access to standard shipping, which typically takes less - // than a week, one way. + // * In the European + // Union (EU), you have access to express shipping. Typically, Snow devices shipped + // express are delivered in about a day. In addition, most countries in the EU have + // access to standard shipping, which typically takes less than a week, one way. // - // * In India, Snow device are delivered in one to seven - // days. + // * + // In India, Snow device are delivered in one to seven days. // - // * In the United States of America (US), you have access to one-day - // shipping and two-day shipping. - // - // * In Australia, you have access to express - // shipping. Typically, devices shipped express are delivered in about a day. + // * In the United + // States of America (US), you have access to one-day shipping and two-day + // shipping. // + // * In Australia, you have access to express shipping. Typically, + // devices shipped express are delivered in about a day. // - // * In the European Union (EU), you have access to express shipping. Typically, - // Snow devices shipped express are delivered in about a day. In addition, most - // countries in the EU have access to standard shipping, which typically takes less - // than a week, one way. + // * In the European Union + // (EU), you have access to express shipping. Typically, Snow devices shipped + // express are delivered in about a day. In addition, most countries in the EU have + // access to standard shipping, which typically takes less than a week, one way. // - // * In India, Snow device are delivered in one to seven - // days. + // * + // In India, Snow device are delivered in one to seven days. // - // * In the US, you have access to one-day shipping and two-day - // shipping. + // * In the US, you have + // access to one-day shipping and two-day shipping. // // This member is required. ShippingOption types.ShippingOption diff --git a/service/snowball/api_op_CreateJob.go b/service/snowball/api_op_CreateJob.go index 505ff741f05..99feff4add8 100644 --- a/service/snowball/api_op_CreateJob.go +++ b/service/snowball/api_op_CreateJob.go @@ -84,20 +84,20 @@ type CreateJobInput struct { // the Snow device, rather it represents how quickly the Snow device moves to its // destination while in transit. Regional shipping speeds are as follows: // - // * In + // * In // Australia, you have access to express shipping. Typically, Snow devices shipped // express are delivered in about a day. // - // * In the European Union (EU), you - // have access to express shipping. Typically, Snow devices shipped express are + // * In the European Union (EU), you have + // access to express shipping. Typically, Snow devices shipped express are // delivered in about a day. In addition, most countries in the EU have access to // standard shipping, which typically takes less than a week, one way. // - // * In - // India, Snow devices are delivered in one to seven days. + // * In India, + // Snow devices are delivered in one to seven days. // - // * In the US, you - // have access to one-day shipping and two-day shipping. + // * In the US, you have access + // to one-day shipping and two-day shipping. ShippingOption types.ShippingOption // If your job is being created in one of the US regions, you have the option of diff --git a/service/snowball/types/enums.go b/service/snowball/types/enums.go index 94f0295b103..0a7abbec68a 100644 --- a/service/snowball/types/enums.go +++ b/service/snowball/types/enums.go @@ -6,11 +6,11 @@ type ClusterState string // Enum values for ClusterState const ( - ClusterStateAwaiting_quorum ClusterState = "AwaitingQuorum" - ClusterStatePending ClusterState = "Pending" - ClusterStateIn_use ClusterState = "InUse" - ClusterStateComplete ClusterState = "Complete" - ClusterStateCancelled ClusterState = "Cancelled" + ClusterStateAwaitingQuorum ClusterState = "AwaitingQuorum" + ClusterStatePending ClusterState = "Pending" + ClusterStateInUse ClusterState = "InUse" + ClusterStateComplete ClusterState = "Complete" + ClusterStateCancelled ClusterState = "Cancelled" ) // Values returns all known values for ClusterState. Note that this can be expanded @@ -30,19 +30,19 @@ type JobState string // Enum values for JobState const ( - JobStateNew JobState = "New" - JobStatePreparing_appliance JobState = "PreparingAppliance" - JobStatePreparing_shipment JobState = "PreparingShipment" - JobStateIn_transit_to_customer JobState = "InTransitToCustomer" - JobStateWith_customer JobState = "WithCustomer" - JobStateIn_transit_to_aws JobState = "InTransitToAWS" - JobStateWith_aws_sorting_facility JobState = "WithAWSSortingFacility" - JobStateWith_aws JobState = "WithAWS" - JobStateIn_progress JobState = "InProgress" - JobStateComplete JobState = "Complete" - JobStateCancelled JobState = "Cancelled" - JobStateListing JobState = "Listing" - JobStatePending JobState = "Pending" + JobStateNew JobState = "New" + JobStatePreparingAppliance JobState = "PreparingAppliance" + JobStatePreparingShipment JobState = "PreparingShipment" + JobStateInTransitToCustomer JobState = "InTransitToCustomer" + JobStateWithCustomer JobState = "WithCustomer" + JobStateInTransitToAws JobState = "InTransitToAWS" + JobStateWithAwsSortingFacility JobState = "WithAWSSortingFacility" + JobStateWithAws JobState = "WithAWS" + JobStateInProgress JobState = "InProgress" + JobStateComplete JobState = "Complete" + JobStateCancelled JobState = "Cancelled" + JobStateListing JobState = "Listing" + JobStatePending JobState = "Pending" ) // Values returns all known values for JobState. Note that this can be expanded in @@ -70,9 +70,9 @@ type JobType string // Enum values for JobType const ( - JobTypeImport JobType = "IMPORT" - JobTypeExport JobType = "EXPORT" - JobTypeLocal_use JobType = "LOCAL_USE" + JobTypeImport JobType = "IMPORT" + JobTypeExport JobType = "EXPORT" + JobTypeLocalUse JobType = "LOCAL_USE" ) // Values returns all known values for JobType. Note that this can be expanded in @@ -108,10 +108,10 @@ type ShippingLabelStatus string // Enum values for ShippingLabelStatus const ( - ShippingLabelStatusIn_progress ShippingLabelStatus = "InProgress" - ShippingLabelStatusTimed_out ShippingLabelStatus = "TimedOut" - ShippingLabelStatusSucceeded ShippingLabelStatus = "Succeeded" - ShippingLabelStatusFailed ShippingLabelStatus = "Failed" + ShippingLabelStatusInProgress ShippingLabelStatus = "InProgress" + ShippingLabelStatusTimedOut ShippingLabelStatus = "TimedOut" + ShippingLabelStatusSucceeded ShippingLabelStatus = "Succeeded" + ShippingLabelStatusFailed ShippingLabelStatus = "Failed" ) // Values returns all known values for ShippingLabelStatus. Note that this can be @@ -130,10 +130,10 @@ type ShippingOption string // Enum values for ShippingOption const ( - ShippingOptionSecond_day ShippingOption = "SECOND_DAY" - ShippingOptionNext_day ShippingOption = "NEXT_DAY" - ShippingOptionExpress ShippingOption = "EXPRESS" - ShippingOptionStandard ShippingOption = "STANDARD" + ShippingOptionSecondDay ShippingOption = "SECOND_DAY" + ShippingOptionNextDay ShippingOption = "NEXT_DAY" + ShippingOptionExpress ShippingOption = "EXPRESS" + ShippingOptionStandard ShippingOption = "STANDARD" ) // Values returns all known values for ShippingOption. Note that this can be @@ -152,13 +152,13 @@ type SnowballCapacity string // Enum values for SnowballCapacity const ( - SnowballCapacityT50 SnowballCapacity = "T50" - SnowballCapacityT80 SnowballCapacity = "T80" - SnowballCapacityT100 SnowballCapacity = "T100" - SnowballCapacityT42 SnowballCapacity = "T42" - SnowballCapacityT98 SnowballCapacity = "T98" - SnowballCapacityT8 SnowballCapacity = "T8" - SnowballCapacityNo_preference SnowballCapacity = "NoPreference" + SnowballCapacityT50 SnowballCapacity = "T50" + SnowballCapacityT80 SnowballCapacity = "T80" + SnowballCapacityT100 SnowballCapacity = "T100" + SnowballCapacityT42 SnowballCapacity = "T42" + SnowballCapacityT98 SnowballCapacity = "T98" + SnowballCapacityT8 SnowballCapacity = "T8" + SnowballCapacityNoPreference SnowballCapacity = "NoPreference" ) // Values returns all known values for SnowballCapacity. Note that this can be @@ -182,10 +182,10 @@ type SnowballType string const ( SnowballTypeStandard SnowballType = "STANDARD" SnowballTypeEdge SnowballType = "EDGE" - SnowballTypeEdge_c SnowballType = "EDGE_C" - SnowballTypeEdge_cg SnowballType = "EDGE_CG" - SnowballTypeEdge_s SnowballType = "EDGE_S" - SnowballTypeSnc1_hdd SnowballType = "SNC1_HDD" + SnowballTypeEdgeC SnowballType = "EDGE_C" + SnowballTypeEdgeCg SnowballType = "EDGE_CG" + SnowballTypeEdgeS SnowballType = "EDGE_S" + SnowballTypeSnc1Hdd SnowballType = "SNC1_HDD" ) // Values returns all known values for SnowballType. Note that this can be expanded diff --git a/service/snowball/types/types.go b/service/snowball/types/types.go index f01115c0521..c62a84457c7 100644 --- a/service/snowball/types/types.go +++ b/service/snowball/types/types.go @@ -127,20 +127,20 @@ type ClusterMetadata struct { // soon you'll get each device, rather it represents how quickly each device moves // to its destination while in transit. Regional shipping speeds are as follows: // - // - // * In Australia, you have access to express shipping. Typically, devices shipped + // * + // In Australia, you have access to express shipping. Typically, devices shipped // express are delivered in about a day. // - // * In the European Union (EU), you - // have access to express shipping. Typically, Snow devices shipped express are + // * In the European Union (EU), you have + // access to express shipping. Typically, Snow devices shipped express are // delivered in about a day. In addition, most countries in the EU have access to // standard shipping, which typically takes less than a week, one way. // - // * In - // India, Snow devices are delivered in one to seven days. + // * In India, + // Snow devices are delivered in one to seven days. // - // * In the US, you - // have access to one-day shipping and two-day shipping. + // * In the US, you have access + // to one-day shipping and two-day shipping. ShippingOption ShippingOption // The type of AWS Snow device to use for this cluster. For cluster jobs, AWS Snow @@ -478,19 +478,19 @@ type ShippingDetails struct { // how quickly it moves to its destination while in transit. Regional shipping // speeds are as follows: // - // * In Australia, you have access to express shipping. + // * In Australia, you have access to express shipping. // Typically, Snow devices shipped express are delivered in about a day. // - // * In - // the European Union (EU), you have access to express shipping. Typically, Snow + // * In the + // European Union (EU), you have access to express shipping. Typically, Snow // devices shipped express are delivered in about a day. In addition, most // countries in the EU have access to standard shipping, which typically takes less // than a week, one way. // - // * In India, Snow device are delivered in one to seven + // * In India, Snow device are delivered in one to seven // days. // - // * In the United States of America (US), you have access to one-day + // * In the United States of America (US), you have access to one-day // shipping and two-day shipping. ShippingOption ShippingOption } diff --git a/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go b/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go index 3415f3c3787..4f5bdb8cd54 100644 --- a/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go +++ b/service/sns/api_op_CheckIfPhoneNumberIsOptedOut.go @@ -43,11 +43,11 @@ type CheckIfPhoneNumberIsOptedOutOutput struct { // Indicates whether the phone number is opted out: // - // * true – The phone number - // is opted out, meaning you cannot publish SMS messages to it. + // * true – The phone number is + // opted out, meaning you cannot publish SMS messages to it. // - // * false – The - // phone number is opted in, meaning you can publish SMS messages to it. + // * false – The phone + // number is opted in, meaning you can publish SMS messages to it. IsOptedOut *bool // Metadata pertaining to the operation's result. diff --git a/service/sns/api_op_CreatePlatformApplication.go b/service/sns/api_op_CreatePlatformApplication.go index 74958bec5de..7d88ea9fcc4 100644 --- a/service/sns/api_op_CreatePlatformApplication.go +++ b/service/sns/api_op_CreatePlatformApplication.go @@ -17,28 +17,27 @@ import ( // PlatformPrincipal and PlatformCredential are received from the notification // service. // -// * For ADM, PlatformPrincipal is client id and PlatformCredential -// is client secret. +// * For ADM, PlatformPrincipal is client id and PlatformCredential is +// client secret. // -// * For Baidu, PlatformPrincipal is API key and -// PlatformCredential is secret key. +// * For Baidu, PlatformPrincipal is API key and PlatformCredential +// is secret key. // -// * For APNS and APNS_SANDBOX, -// PlatformPrincipal is SSL certificate and PlatformCredential is private key. +// * For APNS and APNS_SANDBOX, PlatformPrincipal is SSL +// certificate and PlatformCredential is private key. // +// * For GCM (Firebase Cloud +// Messaging), there is no PlatformPrincipal and the PlatformCredential is API +// key. // -// * For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and the -// PlatformCredential is API key. +// * For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential is +// private key. // -// * For MPNS, PlatformPrincipal is TLS -// certificate and PlatformCredential is private key. -// -// * For WNS, -// PlatformPrincipal is Package Security Identifier and PlatformCredential is -// secret key. +// * For WNS, PlatformPrincipal is Package Security Identifier and +// PlatformCredential is secret key. // -// You can use the returned PlatformApplicationArn as an attribute for -// the CreatePlatformEndpoint action. +// You can use the returned +// PlatformApplicationArn as an attribute for the CreatePlatformEndpoint action. func (c *Client) CreatePlatformApplication(ctx context.Context, params *CreatePlatformApplicationInput, optFns ...func(*Options)) (*CreatePlatformApplicationOutput, error) { if params == nil { params = &CreatePlatformApplicationInput{} diff --git a/service/sns/api_op_CreateTopic.go b/service/sns/api_op_CreateTopic.go index 7a31c52a881..aab4b35aee4 100644 --- a/service/sns/api_op_CreateTopic.go +++ b/service/sns/api_op_CreateTopic.go @@ -46,24 +46,24 @@ type CreateTopicInput struct { // names, descriptions, and values of the special request parameters that the // CreateTopic action uses: // - // * DeliveryPolicy – The policy that defines how - // Amazon SNS retries failed deliveries to HTTP/S endpoints. + // * DeliveryPolicy – The policy that defines how Amazon + // SNS retries failed deliveries to HTTP/S endpoints. // - // * DisplayName – - // The display name to use for a topic with SMS subscriptions. + // * DisplayName – The display + // name to use for a topic with SMS subscriptions. // - // * FifoTopic – - // Set to true to create a FIFO topic. + // * FifoTopic – Set to true to + // create a FIFO topic. // - // * Policy – The policy that defines who - // can access your topic. By default, only the topic owner can publish or subscribe - // to the topic. + // * Policy – The policy that defines who can access your + // topic. By default, only the topic owner can publish or subscribe to the + // topic. // // The following attribute applies only to server-side-encryption // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // - // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon + // * + // KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon // SNS or a custom CMK. For more information, see Key Terms // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). // For more examples, see KeyId @@ -73,17 +73,17 @@ type CreateTopicInput struct { // The following attribute // applies only to FIFO topics: // - // * ContentBasedDeduplication – Enables + // * ContentBasedDeduplication – Enables // content-based deduplication. Amazon SNS uses a SHA-256 hash to generate the // MessageDeduplicationId using the body of the message (but not the attributes of // the message). // - // * When ContentBasedDeduplication is in effect, messages with + // * When ContentBasedDeduplication is in effect, messages with // identical content sent within the deduplication interval are treated as // duplicates and only one copy of the message is delivered. // - // * If the queue - // has ContentBasedDeduplication set, your MessageDeduplicationId overrides the + // * If the queue has + // ContentBasedDeduplication set, your MessageDeduplicationId overrides the // generated one. Attributes map[string]*string diff --git a/service/sns/api_op_GetEndpointAttributes.go b/service/sns/api_op_GetEndpointAttributes.go index 8b719c75651..895232b7c3a 100644 --- a/service/sns/api_op_GetEndpointAttributes.go +++ b/service/sns/api_op_GetEndpointAttributes.go @@ -43,16 +43,16 @@ type GetEndpointAttributesOutput struct { // Attributes include the following: // - // * CustomUserData – arbitrary user data to + // * CustomUserData – arbitrary user data to // associate with the endpoint. Amazon SNS does not use this data. The data must be // in UTF-8 format and less than 2KB. // - // * Enabled – flag that enables/disables + // * Enabled – flag that enables/disables // delivery to the endpoint. Amazon SNS will set this to false when a notification // service indicates to Amazon SNS that the endpoint is invalid. Users can set it // back to true, typically after updating Token. // - // * Token – device token, also + // * Token – device token, also // referred to as a registration id, for an app and mobile device. This is returned // from the notification service when an app and mobile device are registered with // the notification service. The device token for the iOS platform is returned in diff --git a/service/sns/api_op_GetPlatformApplicationAttributes.go b/service/sns/api_op_GetPlatformApplicationAttributes.go index e07937a4c48..8f987cd6c24 100644 --- a/service/sns/api_op_GetPlatformApplicationAttributes.go +++ b/service/sns/api_op_GetPlatformApplicationAttributes.go @@ -43,19 +43,19 @@ type GetPlatformApplicationAttributesOutput struct { // Attributes include the following: // - // * EventEndpointCreated – Topic ARN to - // which EndpointCreated event notifications should be sent. + // * EventEndpointCreated – Topic ARN to which + // EndpointCreated event notifications should be sent. // - // * - // EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications - // should be sent. + // * EventEndpointDeleted – + // Topic ARN to which EndpointDeleted event notifications should be sent. // - // * EventEndpointUpdated – Topic ARN to which EndpointUpdate - // event notifications should be sent. + // * + // EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications + // should be sent. // - // * EventDeliveryFailure – Topic ARN to - // which DeliveryFailure event notifications should be sent upon Direct Publish - // delivery failure (permanent) to one of the application's endpoints. + // * EventDeliveryFailure – Topic ARN to which DeliveryFailure + // event notifications should be sent upon Direct Publish delivery failure + // (permanent) to one of the application's endpoints. Attributes map[string]*string // Metadata pertaining to the operation's result. diff --git a/service/sns/api_op_GetSubscriptionAttributes.go b/service/sns/api_op_GetSubscriptionAttributes.go index e635ce76593..e551268668c 100644 --- a/service/sns/api_op_GetSubscriptionAttributes.go +++ b/service/sns/api_op_GetSubscriptionAttributes.go @@ -41,45 +41,45 @@ type GetSubscriptionAttributesOutput struct { // A map of the subscription's attributes. Attributes in this map include the // following: // - // * ConfirmationWasAuthenticated – true if the subscription + // * ConfirmationWasAuthenticated – true if the subscription // confirmation request was authenticated. // - // * DeliveryPolicy – The JSON + // * DeliveryPolicy – The JSON // serialization of the subscription's delivery policy. // - // * - // EffectiveDeliveryPolicy – The JSON serialization of the effective delivery - // policy that takes into account the topic delivery policy and account system - // defaults. + // * EffectiveDeliveryPolicy + // – The JSON serialization of the effective delivery policy that takes into + // account the topic delivery policy and account system defaults. // - // * FilterPolicy – The filter policy JSON that is assigned to the - // subscription. For more information, see Amazon SNS Message Filtering + // * FilterPolicy – + // The filter policy JSON that is assigned to the subscription. For more + // information, see Amazon SNS Message Filtering // (https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html) in the // Amazon SNS Developer Guide. // - // * Owner – The AWS account ID of the - // subscription's owner. + // * Owner – The AWS account ID of the subscription's + // owner. // - // * PendingConfirmation – true if the subscription - // hasn't been confirmed. To confirm a pending subscription, call the - // ConfirmSubscription action with a confirmation token. + // * PendingConfirmation – true if the subscription hasn't been confirmed. + // To confirm a pending subscription, call the ConfirmSubscription action with a + // confirmation token. // - // * RawMessageDelivery - // – true if raw message delivery is enabled for the subscription. Raw messages are - // free of JSON formatting and can be sent to HTTP/S and Amazon SQS endpoints. + // * RawMessageDelivery – true if raw message delivery is + // enabled for the subscription. Raw messages are free of JSON formatting and can + // be sent to HTTP/S and Amazon SQS endpoints. // + // * RedrivePolicy – When specified, + // sends undeliverable messages to the specified Amazon SQS dead-letter queue. + // Messages that can't be delivered due to client errors (for example, when the + // subscribed endpoint is unreachable) or server errors (for example, when the + // service that powers the subscribed endpoint becomes unavailable) are held in the + // dead-letter queue for further analysis or reprocessing. // - // * RedrivePolicy – When specified, sends undeliverable messages to the specified - // Amazon SQS dead-letter queue. Messages that can't be delivered due to client - // errors (for example, when the subscribed endpoint is unreachable) or server - // errors (for example, when the service that powers the subscribed endpoint - // becomes unavailable) are held in the dead-letter queue for further analysis or - // reprocessing. + // * SubscriptionArn – The + // subscription's ARN. // - // * SubscriptionArn – The subscription's ARN. - // - // * TopicArn – - // The topic ARN that the subscription is associated with. + // * TopicArn – The topic ARN that the subscription is + // associated with. Attributes map[string]*string // Metadata pertaining to the operation's result. diff --git a/service/sns/api_op_GetTopicAttributes.go b/service/sns/api_op_GetTopicAttributes.go index 624765a4464..606ef1d7d4b 100644 --- a/service/sns/api_op_GetTopicAttributes.go +++ b/service/sns/api_op_GetTopicAttributes.go @@ -39,42 +39,42 @@ type GetTopicAttributesInput struct { // Response for GetTopicAttributes action. type GetTopicAttributesOutput struct { - // A map of the topic's attributes. Attributes in this map include the following: + // A map of the topic's attributes. Attributes in this map include the + // following: // + // * DeliveryPolicy – The JSON serialization of the topic's delivery + // policy. // - // * DeliveryPolicy – The JSON serialization of the topic's delivery policy. + // * DisplayName – The human-readable name used in the From field for + // notifications to email and email-json endpoints. // - // * - // DisplayName – The human-readable name used in the From field for notifications - // to email and email-json endpoints. + // * Owner – The AWS account ID + // of the topic's owner. // - // * Owner – The AWS account ID of the - // topic's owner. - // - // * Policy – The JSON serialization of the topic's access + // * Policy – The JSON serialization of the topic's access // control policy. // - // * SubscriptionsConfirmed – The number of confirmed + // * SubscriptionsConfirmed – The number of confirmed // subscriptions for the topic. // - // * SubscriptionsDeleted – The number of deleted + // * SubscriptionsDeleted – The number of deleted // subscriptions for the topic. // - // * SubscriptionsPending – The number of + // * SubscriptionsPending – The number of // subscriptions pending confirmation for the topic. // - // * TopicArn – The topic's + // * TopicArn – The topic's // ARN. // - // * EffectiveDeliveryPolicy – The JSON serialization of the effective + // * EffectiveDeliveryPolicy – The JSON serialization of the effective // delivery policy, taking system defaults into account. // // The following attribute // applies only to server-side-encryption // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // - // - // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon + // * + // KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon // SNS or a custom CMK. For more information, see Key Terms // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). // For more examples, see KeyId diff --git a/service/sns/api_op_Publish.go b/service/sns/api_op_Publish.go index 44b534649bd..3918ea0c224 100644 --- a/service/sns/api_op_Publish.go +++ b/service/sns/api_op_Publish.go @@ -49,49 +49,48 @@ type PublishInput struct { // transport protocol, set the value of the MessageStructure parameter to json and // use a JSON object for the Message parameter. Constraints: // - // * With the - // exception of SMS, messages must be UTF-8 encoded strings and at most 256 KB in - // size (262,144 bytes, not 262,144 characters). - // - // * For SMS, each message can - // contain up to 140 characters. This character limit depends on the encoding - // schema. For example, an SMS message can contain 160 GSM characters, 140 ASCII - // characters, or 70 UCS-2 characters. If you publish a message that exceeds this - // size limit, Amazon SNS sends the message as multiple messages, each fitting - // within the size limit. Messages aren't truncated mid-word but are cut off at - // whole-word boundaries. The total size limit for a single SMS Publish action is - // 1,600 characters. + // * With the exception + // of SMS, messages must be UTF-8 encoded strings and at most 256 KB in size + // (262,144 bytes, not 262,144 characters). + // + // * For SMS, each message can contain up + // to 140 characters. This character limit depends on the encoding schema. For + // example, an SMS message can contain 160 GSM characters, 140 ASCII characters, or + // 70 UCS-2 characters. If you publish a message that exceeds this size limit, + // Amazon SNS sends the message as multiple messages, each fitting within the size + // limit. Messages aren't truncated mid-word but are cut off at whole-word + // boundaries. The total size limit for a single SMS Publish action is 1,600 + // characters. // // JSON-specific constraints: // - // * Keys in the JSON object - // that correspond to supported transport protocols must have simple JSON string + // * Keys in the JSON object that + // correspond to supported transport protocols must have simple JSON string // values. // - // * The values will be parsed (unescaped) before they are used in + // * The values will be parsed (unescaped) before they are used in // outgoing messages. // - // * Outbound notifications are JSON encoded (meaning that - // the characters will be reescaped for sending). + // * Outbound notifications are JSON encoded (meaning that the + // characters will be reescaped for sending). // - // * Values have a minimum - // length of 0 (the empty string, "", is allowed). + // * Values have a minimum length of 0 + // (the empty string, "", is allowed). // - // * Values have a maximum - // length bounded by the overall message size (so, including multiple protocols may - // limit message sizes). + // * Values have a maximum length bounded by + // the overall message size (so, including multiple protocols may limit message + // sizes). // - // * Non-string values will cause the key to be - // ignored. + // * Non-string values will cause the key to be ignored. // - // * Keys that do not correspond to supported transport protocols are - // ignored. + // * Keys that do + // not correspond to supported transport protocols are ignored. // - // * Duplicate keys are not allowed. + // * Duplicate keys + // are not allowed. // - // * Failure to parse or - // validate any key or value in the message will cause the Publish call to return - // an error (no partial delivery). + // * Failure to parse or validate any key or value in the message + // will cause the Publish call to return an error (no partial delivery). // // This member is required. Message *string @@ -125,15 +124,15 @@ type PublishInput struct { // your SMS subscribers and a longer message to your email subscribers. If you set // MessageStructure to json, the value of the Message parameter must: // - // * be a + // * be a // syntactically valid JSON object; and // - // * contain at least a top-level JSON - // key of "default" with a value that is a string. + // * contain at least a top-level JSON key of + // "default" with a value that is a string. // - // You can define other top-level - // keys that define the message you want to send to a specific transport protocol - // (e.g., "http"). Valid value: json + // You can define other top-level keys + // that define the message you want to send to a specific transport protocol (e.g., + // "http"). Valid value: json MessageStructure *string // The phone number to which you want to deliver an SMS message. Use E.164 format. diff --git a/service/sns/api_op_SetEndpointAttributes.go b/service/sns/api_op_SetEndpointAttributes.go index 2786c3fd48b..da178e64775 100644 --- a/service/sns/api_op_SetEndpointAttributes.go +++ b/service/sns/api_op_SetEndpointAttributes.go @@ -35,17 +35,17 @@ type SetEndpointAttributesInput struct { // A map of the endpoint attributes. Attributes in this map include the // following: // - // * CustomUserData – arbitrary user data to associate with the + // * CustomUserData – arbitrary user data to associate with the // endpoint. Amazon SNS does not use this data. The data must be in UTF-8 format // and less than 2KB. // - // * Enabled – flag that enables/disables delivery to the + // * Enabled – flag that enables/disables delivery to the // endpoint. Amazon SNS will set this to false when a notification service // indicates to Amazon SNS that the endpoint is invalid. Users can set it back to // true, typically after updating Token. // - // * Token – device token, also referred - // to as a registration id, for an app and mobile device. This is returned from the + // * Token – device token, also referred to + // as a registration id, for an app and mobile device. This is returned from the // notification service when an app and mobile device are registered with the // notification service. // diff --git a/service/sns/api_op_SetPlatformApplicationAttributes.go b/service/sns/api_op_SetPlatformApplicationAttributes.go index b078223f8af..da3e61cfda6 100644 --- a/service/sns/api_op_SetPlatformApplicationAttributes.go +++ b/service/sns/api_op_SetPlatformApplicationAttributes.go @@ -38,39 +38,39 @@ type SetPlatformApplicationAttributesInput struct { // A map of the platform application attributes. Attributes in this map include the // following: // - // * PlatformCredential – The credential received from the - // notification service. For APNS and APNS_SANDBOX, PlatformCredential is private - // key. For GCM (Firebase Cloud Messaging), PlatformCredential is API key. For ADM, + // * PlatformCredential – The credential received from the notification + // service. For APNS and APNS_SANDBOX, PlatformCredential is private key. For GCM + // (Firebase Cloud Messaging), PlatformCredential is API key. For ADM, // PlatformCredential is client secret. // - // * PlatformPrincipal – The principal + // * PlatformPrincipal – The principal // received from the notification service. For APNS and APNS_SANDBOX, // PlatformPrincipal is SSL certificate. For GCM (Firebase Cloud Messaging), there // is no PlatformPrincipal. For ADM, PlatformPrincipal is client id. // - // * + // * // EventEndpointCreated – Topic ARN to which EndpointCreated event notifications // are sent. // - // * EventEndpointDeleted – Topic ARN to which EndpointDeleted event + // * EventEndpointDeleted – Topic ARN to which EndpointDeleted event // notifications are sent. // - // * EventEndpointUpdated – Topic ARN to which + // * EventEndpointUpdated – Topic ARN to which // EndpointUpdate event notifications are sent. // - // * EventDeliveryFailure – Topic - // ARN to which DeliveryFailure event notifications are sent upon Direct Publish + // * EventDeliveryFailure – Topic ARN + // to which DeliveryFailure event notifications are sent upon Direct Publish // delivery failure (permanent) to one of the application's endpoints. // - // * + // * // SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write access to // use CloudWatch Logs on your behalf. // - // * FailureFeedbackRoleArn – IAM role ARN + // * FailureFeedbackRoleArn – IAM role ARN // used to give Amazon SNS write access to use CloudWatch Logs on your behalf. // - // - // * SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully + // * + // SuccessFeedbackSampleRate – Sample rate percentage (0-100) of successfully // delivered messages. // // This member is required. diff --git a/service/sns/api_op_SetSMSAttributes.go b/service/sns/api_op_SetSMSAttributes.go index 5186b9f5013..c9a66a375ce 100644 --- a/service/sns/api_op_SetSMSAttributes.go +++ b/service/sns/api_op_SetSMSAttributes.go @@ -61,11 +61,11 @@ type SetSMSAttributesInput struct { // – The type of SMS message that you will send by default. You can assign the // following values: // - // * Promotional – (Default) Noncritical messages, such as + // * Promotional – (Default) Noncritical messages, such as // marketing messages. Amazon SNS optimizes the message delivery to incur the // lowest cost. // - // * Transactional – Critical messages that support customer + // * Transactional – Critical messages that support customer // transactions, such as one-time passcodes for multi-factor authentication. Amazon // SNS optimizes the message delivery to achieve the highest // reliability. @@ -76,29 +76,29 @@ type SetSMSAttributesInput struct { // information for each SMS message that was successfully delivered by your // account: // - // * Time that the message was published (in UTC) + // * Time that the message was published (in UTC) // - // * Message ID + // * Message ID // + // * + // Destination phone number // - // * Destination phone number + // * Message type // - // * Message type + // * Delivery status // - // * Delivery status + // * Message price (in + // USD) // - // * - // Message price (in USD) + // * Part number (a message is split into multiple parts if it is too long + // for a single message) // - // * Part number (a message is split into multiple - // parts if it is too long for a single message) + // * Total number of parts // - // * Total number of parts - // - // To - // receive the report, the bucket must have a policy that allows the Amazon SNS - // service principle to perform the s3:PutObject and s3:GetBucketLocation actions. - // For an example bucket policy and usage report, see Monitoring SMS Activity + // To receive the report, the + // bucket must have a policy that allows the Amazon SNS service principle to + // perform the s3:PutObject and s3:GetBucketLocation actions. For an example bucket + // policy and usage report, see Monitoring SMS Activity // (https://docs.aws.amazon.com/sns/latest/dg/sms_stats.html) in the Amazon SNS // Developer Guide. // diff --git a/service/sns/api_op_SetSubscriptionAttributes.go b/service/sns/api_op_SetSubscriptionAttributes.go index babd0174950..40434910fc9 100644 --- a/service/sns/api_op_SetSubscriptionAttributes.go +++ b/service/sns/api_op_SetSubscriptionAttributes.go @@ -34,23 +34,23 @@ type SetSubscriptionAttributesInput struct { // names, descriptions, and values of the special request parameters that the // SetTopicAttributes action uses: // - // * DeliveryPolicy – The policy that defines - // how Amazon SNS retries failed deliveries to HTTP/S endpoints. + // * DeliveryPolicy – The policy that defines how + // Amazon SNS retries failed deliveries to HTTP/S endpoints. // - // * - // FilterPolicy – The simple JSON object that lets your subscriber receive only a - // subset of messages, rather than receiving every message published to the - // topic. + // * FilterPolicy – The + // simple JSON object that lets your subscriber receive only a subset of messages, + // rather than receiving every message published to the topic. // - // * RawMessageDelivery – When set to true, enables raw message - // delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the - // endpoints to process JSON formatting, which is otherwise created for Amazon SNS - // metadata. + // * + // RawMessageDelivery – When set to true, enables raw message delivery to Amazon + // SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process + // JSON formatting, which is otherwise created for Amazon SNS metadata. // - // * RedrivePolicy – When specified, sends undeliverable messages to - // the specified Amazon SQS dead-letter queue. Messages that can't be delivered due - // to client errors (for example, when the subscribed endpoint is unreachable) or - // server errors (for example, when the service that powers the subscribed endpoint + // * + // RedrivePolicy – When specified, sends undeliverable messages to the specified + // Amazon SQS dead-letter queue. Messages that can't be delivered due to client + // errors (for example, when the subscribed endpoint is unreachable) or server + // errors (for example, when the service that powers the subscribed endpoint // becomes unavailable) are held in the dead-letter queue for further analysis or // reprocessing. // diff --git a/service/sns/api_op_SetTopicAttributes.go b/service/sns/api_op_SetTopicAttributes.go index 2caf0c8ac4f..a905f4c6ff3 100644 --- a/service/sns/api_op_SetTopicAttributes.go +++ b/service/sns/api_op_SetTopicAttributes.go @@ -33,22 +33,22 @@ type SetTopicAttributesInput struct { // names, descriptions, and values of the special request parameters that the // SetTopicAttributes action uses: // - // * DeliveryPolicy – The policy that defines - // how Amazon SNS retries failed deliveries to HTTP/S endpoints. + // * DeliveryPolicy – The policy that defines how + // Amazon SNS retries failed deliveries to HTTP/S endpoints. // - // * DisplayName - // – The display name to use for a topic with SMS subscriptions. + // * DisplayName – The + // display name to use for a topic with SMS subscriptions. // - // * Policy – - // The policy that defines who can access your topic. By default, only the topic - // owner can publish or subscribe to the topic. + // * Policy – The policy + // that defines who can access your topic. By default, only the topic owner can + // publish or subscribe to the topic. // - // The following attribute applies - // only to server-side-encryption + // The following attribute applies only to + // server-side-encryption // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // - // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon + // * + // KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon // SNS or a custom CMK. For more information, see Key Terms // (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html#sse-key-terms). // For more examples, see KeyId @@ -58,17 +58,17 @@ type SetTopicAttributesInput struct { // The following attribute // applies only to FIFO topics: // - // * ContentBasedDeduplication – Enables + // * ContentBasedDeduplication – Enables // content-based deduplication. Amazon SNS uses a SHA-256 hash to generate the // MessageDeduplicationId using the body of the message (but not the attributes of // the message). // - // * When ContentBasedDeduplication is in effect, messages with + // * When ContentBasedDeduplication is in effect, messages with // identical content sent within the deduplication interval are treated as // duplicates and only one copy of the message is delivered. // - // * If the queue - // has ContentBasedDeduplication set, your MessageDeduplicationId overrides the + // * If the queue has + // ContentBasedDeduplication set, your MessageDeduplicationId overrides the // generated one. // // This member is required. diff --git a/service/sns/api_op_Subscribe.go b/service/sns/api_op_Subscribe.go index 6a9dd0e8f40..fd4770ecb11 100644 --- a/service/sns/api_op_Subscribe.go +++ b/service/sns/api_op_Subscribe.go @@ -36,28 +36,28 @@ type SubscribeInput struct { // The protocol you want to use. Supported protocols include: // - // * http – - // delivery of JSON-encoded message via HTTP POST + // * http – delivery of + // JSON-encoded message via HTTP POST // - // * https – delivery of - // JSON-encoded message via HTTPS POST + // * https – delivery of JSON-encoded message + // via HTTPS POST // - // * email – delivery of message via - // SMTP + // * email – delivery of message via SMTP // - // * email-json – delivery of JSON-encoded message via SMTP + // * email-json – delivery + // of JSON-encoded message via SMTP // - // * sms – - // delivery of message via SMS + // * sms – delivery of message via SMS // - // * sqs – delivery of JSON-encoded message to an - // Amazon SQS queue + // * sqs – + // delivery of JSON-encoded message to an Amazon SQS queue // - // * application – delivery of JSON-encoded message to an - // EndpointArn for a mobile app and device. + // * application – + // delivery of JSON-encoded message to an EndpointArn for a mobile app and + // device. // - // * lambda – delivery of - // JSON-encoded message to an Amazon Lambda function. + // * lambda – delivery of JSON-encoded message to an Amazon Lambda + // function. // // This member is required. Protocol *string @@ -71,23 +71,23 @@ type SubscribeInput struct { // names, descriptions, and values of the special request parameters that the // SetTopicAttributes action uses: // - // * DeliveryPolicy – The policy that defines - // how Amazon SNS retries failed deliveries to HTTP/S endpoints. + // * DeliveryPolicy – The policy that defines how + // Amazon SNS retries failed deliveries to HTTP/S endpoints. // - // * - // FilterPolicy – The simple JSON object that lets your subscriber receive only a - // subset of messages, rather than receiving every message published to the - // topic. + // * FilterPolicy – The + // simple JSON object that lets your subscriber receive only a subset of messages, + // rather than receiving every message published to the topic. // - // * RawMessageDelivery – When set to true, enables raw message - // delivery to Amazon SQS or HTTP/S endpoints. This eliminates the need for the - // endpoints to process JSON formatting, which is otherwise created for Amazon SNS - // metadata. + // * + // RawMessageDelivery – When set to true, enables raw message delivery to Amazon + // SQS or HTTP/S endpoints. This eliminates the need for the endpoints to process + // JSON formatting, which is otherwise created for Amazon SNS metadata. // - // * RedrivePolicy – When specified, sends undeliverable messages to - // the specified Amazon SQS dead-letter queue. Messages that can't be delivered due - // to client errors (for example, when the subscribed endpoint is unreachable) or - // server errors (for example, when the service that powers the subscribed endpoint + // * + // RedrivePolicy – When specified, sends undeliverable messages to the specified + // Amazon SQS dead-letter queue. Messages that can't be delivered due to client + // errors (for example, when the subscribed endpoint is unreachable) or server + // errors (for example, when the service that powers the subscribed endpoint // becomes unavailable) are held in the dead-letter queue for further analysis or // reprocessing. Attributes map[string]*string @@ -95,43 +95,42 @@ type SubscribeInput struct { // The endpoint that you want to receive notifications. Endpoints vary by // protocol: // - // * For the http protocol, the (public) endpoint is a URL beginning + // * For the http protocol, the (public) endpoint is a URL beginning // with http:// // - // * For the https protocol, the (public) endpoint is a URL - // beginning with https:// + // * For the https protocol, the (public) endpoint is a URL beginning + // with https:// // - // * For the email protocol, the endpoint is an email - // address + // * For the email protocol, the endpoint is an email address // - // * For the email-json protocol, the endpoint is an email address + // * For + // the email-json protocol, the endpoint is an email address // + // * For the sms + // protocol, the endpoint is a phone number of an SMS-enabled device // - // * For the sms protocol, the endpoint is a phone number of an SMS-enabled - // device + // * For the sqs + // protocol, the endpoint is the ARN of an Amazon SQS queue // - // * For the sqs protocol, the endpoint is the ARN of an Amazon SQS - // queue + // * For the application + // protocol, the endpoint is the EndpointArn of a mobile app and device. // - // * For the application protocol, the endpoint is the EndpointArn of a - // mobile app and device. - // - // * For the lambda protocol, the endpoint is the ARN - // of an Amazon Lambda function. + // * For the + // lambda protocol, the endpoint is the ARN of an Amazon Lambda function. Endpoint *string // Sets whether the response from the Subscribe request includes the subscription // ARN, even if the subscription is not yet confirmed. // - // * If you set this - // parameter to true, the response includes the ARN in all cases, even if the - // subscription is not yet confirmed. In addition to the ARN for confirmed - // subscriptions, the response also includes the pending subscription ARN value for - // subscriptions that aren't yet confirmed. A subscription becomes confirmed when - // the subscriber calls the ConfirmSubscription action with a confirmation - // token. + // * If you set this parameter + // to true, the response includes the ARN in all cases, even if the subscription is + // not yet confirmed. In addition to the ARN for confirmed subscriptions, the + // response also includes the pending subscription ARN value for subscriptions that + // aren't yet confirmed. A subscription becomes confirmed when the subscriber calls + // the ConfirmSubscription action with a confirmation token. // - // The default value is false. + // The default value is + // false. ReturnSubscriptionArn *bool } diff --git a/service/sns/api_op_TagResource.go b/service/sns/api_op_TagResource.go index 5189e6309c6..90e46e67e0b 100644 --- a/service/sns/api_op_TagResource.go +++ b/service/sns/api_op_TagResource.go @@ -16,20 +16,20 @@ import ( // Developer Guide. When you use topic tags, keep the following guidelines in // mind: // -// * Adding more than 50 tags to a topic isn't recommended. +// * Adding more than 50 tags to a topic isn't recommended. // -// * Tags -// don't have any semantic meaning. Amazon SNS interprets tags as character -// strings. +// * Tags don't +// have any semantic meaning. Amazon SNS interprets tags as character strings. // -// * Tags are case-sensitive. +// * +// Tags are case-sensitive. // -// * A new tag with a key identical -// to that of an existing tag overwrites the existing tag. +// * A new tag with a key identical to that of an +// existing tag overwrites the existing tag. // -// * Tagging actions -// are limited to 10 TPS per AWS account, per AWS region. If your application -// requires a higher throughput, file a technical support request +// * Tagging actions are limited to 10 +// TPS per AWS account, per AWS region. If your application requires a higher +// throughput, file a technical support request // (https://console.aws.amazon.com/support/home#/case/create?issueType=technical). func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { diff --git a/service/sqs/api_op_AddPermission.go b/service/sqs/api_op_AddPermission.go index bb4dc3f350b..23f6996b252 100644 --- a/service/sqs/api_op_AddPermission.go +++ b/service/sqs/api_op_AddPermission.go @@ -19,17 +19,17 @@ import ( // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue) // in the Amazon Simple Queue Service Developer Guide. // -// * AddPermission -// generates a policy for you. You can use SetQueueAttributes to upload your -// policy. For more information, see Using Custom Policies with the Amazon SQS -// Access Policy Language +// * AddPermission generates a +// policy for you. You can use SetQueueAttributes to upload your policy. For more +// information, see Using Custom Policies with the Amazon SQS Access Policy +// Language // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html) // in the Amazon Simple Queue Service Developer Guide. // -// * An Amazon SQS policy -// can have a maximum of 7 actions. +// * An Amazon SQS policy can +// have a maximum of 7 actions. // -// * To remove the ability to change queue +// * To remove the ability to change queue // permissions, you must deny permission to the AddPermission, RemovePermission, // and SetQueueAttributes actions in your IAM policy. // diff --git a/service/sqs/api_op_ChangeMessageVisibility.go b/service/sqs/api_op_ChangeMessageVisibility.go index 3cdb3d627ec..b995295ace0 100644 --- a/service/sqs/api_op_ChangeMessageVisibility.go +++ b/service/sqs/api_op_ChangeMessageVisibility.go @@ -21,28 +21,28 @@ import ( // time. If you try to extend the visibility timeout beyond the maximum, your // request is rejected. An Amazon SQS message has three basic states: // -// * Sent -// to a queue by a producer. +// * Sent to a +// queue by a producer. // -// * Received from the queue by a consumer. +// * Received from the queue by a consumer. // -// * -// Deleted from the queue. +// * Deleted from +// the queue. // -// A message is considered to be stored after it is sent -// to a queue by a producer, but not yet received from the queue by a consumer -// (that is, between states 1 and 2). There is no limit to the number of stored -// messages. A message is considered to be in flight after it is received from a -// queue by a consumer, but not yet deleted from the queue (that is, between states -// 2 and 3). There is a limit to the number of inflight messages. Limits that apply -// to inflight messages are unrelated to the unlimited number of stored messages. -// For most standard queues (depending on queue traffic and message backlog), there -// can be a maximum of approximately 120,000 inflight messages (received from a -// queue by a consumer, but not yet deleted from the queue). If you reach this -// limit, Amazon SQS returns the OverLimit error message. To avoid reaching the -// limit, you should delete messages from the queue after they're processed. You -// can also increase the number of queues you use to process your messages. To -// request a limit increase, file a support request +// A message is considered to be stored after it is sent to a queue by +// a producer, but not yet received from the queue by a consumer (that is, between +// states 1 and 2). There is no limit to the number of stored messages. A message +// is considered to be in flight after it is received from a queue by a consumer, +// but not yet deleted from the queue (that is, between states 2 and 3). There is a +// limit to the number of inflight messages. Limits that apply to inflight messages +// are unrelated to the unlimited number of stored messages. For most standard +// queues (depending on queue traffic and message backlog), there can be a maximum +// of approximately 120,000 inflight messages (received from a queue by a consumer, +// but not yet deleted from the queue). If you reach this limit, Amazon SQS returns +// the OverLimit error message. To avoid reaching the limit, you should delete +// messages from the queue after they're processed. You can also increase the +// number of queues you use to process your messages. To request a limit increase, +// file a support request // (https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sqs). // For FIFO queues, there can be a maximum of 20,000 inflight messages (received // from a queue by a consumer, but not yet deleted from the queue). If you reach diff --git a/service/sqs/api_op_CreateQueue.go b/service/sqs/api_op_CreateQueue.go index 641cd6fde6e..6c18d4df659 100644 --- a/service/sqs/api_op_CreateQueue.go +++ b/service/sqs/api_op_CreateQueue.go @@ -13,7 +13,7 @@ import ( // Creates a new standard or FIFO queue. You can pass one or more attributes in the // request. Keep the following in mind: // -// * If you don't specify the FifoQueue +// * If you don't specify the FifoQueue // attribute, Amazon SQS creates a standard queue. You can't change the queue type // after you create it and you can't convert an existing standard queue into a FIFO // queue. You must either create a new FIFO queue for your application or delete @@ -22,31 +22,31 @@ import ( // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving) // in the Amazon Simple Queue Service Developer Guide. // -// * If you don't provide -// a value for an attribute, the queue is created with the default value for the +// * If you don't provide a +// value for an attribute, the queue is created with the default value for the // attribute. // -// * If you delete a queue, you must wait at least 60 seconds -// before creating a queue with the same name. +// * If you delete a queue, you must wait at least 60 seconds before +// creating a queue with the same name. // -// To successfully create a new queue, -// you must provide a queue name that adheres to the limits related to queues +// To successfully create a new queue, you +// must provide a queue name that adheres to the limits related to queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) // and is unique within the scope of your queues. After you create a queue, you // must wait at least one second after the queue is created to be able to use the // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires // only the QueueName parameter. be aware of existing queue names: // -// * If you +// * If you // provide the name of an existing queue along with the exact names and values of // all the queue's attributes, CreateQueue returns the queue URL for the existing // queue. // -// * If the queue name, attribute names, or attribute values don't -// match an existing queue, CreateQueue returns an error. +// * If the queue name, attribute names, or attribute values don't match an +// existing queue, CreateQueue returns an error. // -// Some actions take lists -// of parameters. These lists are specified using the param.n notation. Values of n +// Some actions take lists of +// parameters. These lists are specified using the param.n notation. Values of n // are integers starting from 1. For example, a parameter list with two elements // looks like this: &AttributeName.1=first&AttributeName.2=second Cross-account // permissions don't apply to this action. For more information, see Grant @@ -73,14 +73,14 @@ type CreateQueueInput struct { // The name of the new queue. The following limits apply to this name: // - // * A - // queue name can have up to 80 characters. + // * A queue + // name can have up to 80 characters. // - // * Valid values: alphanumeric - // characters, hyphens (-), and underscores (_). + // * Valid values: alphanumeric characters, + // hyphens (-), and underscores (_). // - // * A FIFO queue name must end - // with the .fifo suffix. + // * A FIFO queue name must end with the .fifo + // suffix. // // Queue URLs and names are case-sensitive. // @@ -91,56 +91,53 @@ type CreateQueueInput struct { // names, descriptions, and values of the special request parameters that the // CreateQueue action uses: // - // * DelaySeconds – The length of time, in seconds, - // for which the delivery of all messages in the queue is delayed. Valid values: An + // * DelaySeconds – The length of time, in seconds, for + // which the delivery of all messages in the queue is delayed. Valid values: An // integer from 0 to 900 seconds (15 minutes). Default: 0. // - // * - // MaximumMessageSize – The limit of how many bytes a message can contain before - // Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to - // 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). + // * MaximumMessageSize – + // The limit of how many bytes a message can contain before Amazon SQS rejects it. + // Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). + // Default: 262,144 (256 KiB). // - // * - // MessageRetentionPeriod – The length of time, in seconds, for which Amazon SQS - // retains a message. Valid values: An integer from 60 seconds (1 minute) to - // 1,209,600 seconds (14 days). Default: 345,600 (4 days). + // * MessageRetentionPeriod – The length of time, in + // seconds, for which Amazon SQS retains a message. Valid values: An integer from + // 60 seconds (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 + // days). // - // * Policy – The - // queue's policy. A valid AWS policy. For more information about policy structure, - // see Overview of AWS IAM Policies + // * Policy – The queue's policy. A valid AWS policy. For more information + // about policy structure, see Overview of AWS IAM Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) in the // Amazon IAM User Guide. // - // * ReceiveMessageWaitTimeSeconds – The length of - // time, in seconds, for which a ReceiveMessage action waits for a message to - // arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0. + // * ReceiveMessageWaitTimeSeconds – The length of time, in + // seconds, for which a ReceiveMessage action waits for a message to arrive. Valid + // values: An integer from 0 to 20 (seconds). Default: 0. // - // * - // RedrivePolicy – The string that includes the parameters for the dead-letter - // queue functionality of the source queue as a JSON object. For more information - // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues + // * RedrivePolicy – The + // string that includes the parameters for the dead-letter queue functionality of + // the source queue as a JSON object. For more information about the redrive policy + // and dead-letter queues, see Using Amazon SQS Dead-Letter Queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. // - // * - // deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to - // which Amazon SQS moves messages after the value of maxReceiveCount is - // exceeded. + // * deadLetterTargetArn – The + // Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves + // messages after the value of maxReceiveCount is exceeded. // - // * maxReceiveCount – The number of times a message is - // delivered to the source queue before being moved to the dead-letter queue. When - // the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon - // SQS moves the message to the dead-letter-queue. + // * maxReceiveCount – + // The number of times a message is delivered to the source queue before being + // moved to the dead-letter queue. When the ReceiveCount for a message exceeds the + // maxReceiveCount for a queue, Amazon SQS moves the message to the + // dead-letter-queue. // - // The dead-letter queue of a - // FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a - // standard queue must also be a standard queue. + // The dead-letter queue of a FIFO queue must also be a FIFO + // queue. Similarly, the dead-letter queue of a standard queue must also be a + // standard queue. // - // * VisibilityTimeout – The - // visibility timeout for the queue, in seconds. Valid values: An integer from 0 to - // 43,200 (12 hours). Default: 30. For more information about the visibility - // timeout, see Visibility Timeout + // * VisibilityTimeout – The visibility timeout for the queue, in + // seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For + // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // @@ -148,8 +145,8 @@ type CreateQueueInput struct { // apply only to server-side-encryption // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon + // * + // KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon // SQS or a custom CMK. For more information, see Key Terms // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, @@ -158,9 +155,8 @@ type CreateQueueInput struct { // (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // * - // KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon - // SQS can reuse a data key + // * KmsDataKeyReusePeriodSeconds + // – The length of time, in seconds, for which Amazon SQS can reuse a data key // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 @@ -173,50 +169,50 @@ type CreateQueueInput struct { // following attributes apply only to FIFO (first-in-first-out) queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // - // * FifoQueue – Designates a queue as FIFO. Valid values: true, false. If you - // don't specify the FifoQueue attribute, Amazon SQS creates a standard queue. You - // can provide this attribute only during queue creation. You can't change it for - // an existing queue. When you set this attribute, you must also provide the + // * + // FifoQueue – Designates a queue as FIFO. Valid values: true, false. If you don't + // specify the FifoQueue attribute, Amazon SQS creates a standard queue. You can + // provide this attribute only during queue creation. You can't change it for an + // existing queue. When you set this attribute, you must also provide the // MessageGroupId for your messages explicitly. For more information, see FIFO // Queue Logic // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. // - // * - // ContentBasedDeduplication – Enables content-based deduplication. Valid values: - // true, false. For more information, see Exactly-Once Processing + // * ContentBasedDeduplication + // – Enables content-based deduplication. Valid values: true, false. For more + // information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. // - // * Every message - // must have a unique MessageDeduplicationId, - // - // * You may provide a - // MessageDeduplicationId explicitly. - // - // * If you aren't able to provide - // a MessageDeduplicationId and you enable ContentBasedDeduplication for your - // queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId - // using the body of the message (but not the attributes of the message). + // * Every message must have a + // unique MessageDeduplicationId, // + // * You may provide a MessageDeduplicationId + // explicitly. // - // * If you don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. + // * If you aren't able to provide a MessageDeduplicationId and you + // enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash + // to generate the MessageDeduplicationId using the body of the message (but not + // the attributes of the message). // - // * If - // the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. + // * If you don't provide a MessageDeduplicationId + // and the queue doesn't have ContentBasedDeduplication set, the action fails with + // an error. // - // * When ContentBasedDeduplication is in - // effect, messages with identical content sent within the deduplication interval - // are treated as duplicates and only one copy of the message is delivered. + // * If the queue has ContentBasedDeduplication set, your + // MessageDeduplicationId overrides the generated one. // + // * When + // ContentBasedDeduplication is in effect, messages with identical content sent + // within the deduplication interval are treated as duplicates and only one copy of + // the message is delivered. // - // * If you send one message with ContentBasedDeduplication enabled and then - // another message with a MessageDeduplicationId that is the same as the one - // generated for the first MessageDeduplicationId, the two messages are treated as - // duplicates and only one copy of the message is delivered. + // * If you send one message with + // ContentBasedDeduplication enabled and then another message with a + // MessageDeduplicationId that is the same as the one generated for the first + // MessageDeduplicationId, the two messages are treated as duplicates and only one + // copy of the message is delivered. Attributes map[string]*string // Add cost allocation tags to the specified Amazon SQS queue. For an overview, see @@ -225,16 +221,16 @@ type CreateQueueInput struct { // in the Amazon Simple Queue Service Developer Guide. When you use queue tags, // keep the following guidelines in mind: // - // * Adding more than 50 tags to a - // queue isn't recommended. + // * Adding more than 50 tags to a queue + // isn't recommended. // - // * Tags don't have any semantic meaning. Amazon SQS + // * Tags don't have any semantic meaning. Amazon SQS // interprets tags as character strings. // - // * Tags are case-sensitive. + // * Tags are case-sensitive. // - // * A - // new tag with a key identical to that of an existing tag overwrites the existing + // * A new tag + // with a key identical to that of an existing tag overwrites the existing // tag. // // For a full list of tag restrictions, see Limits Related to Queues diff --git a/service/sqs/api_op_GetQueueAttributes.go b/service/sqs/api_op_GetQueueAttributes.go index b54e30e8964..5a2476f8636 100644 --- a/service/sqs/api_op_GetQueueAttributes.go +++ b/service/sqs/api_op_GetQueueAttributes.go @@ -47,71 +47,70 @@ type GetQueueAttributesInput struct { // least 1 minute after the producers stop sending messages. This period is // required for the queue metadata to reach eventual consistency. // - // * All – - // Returns all values. + // * All – Returns + // all values. // - // * ApproximateNumberOfMessages – Returns the approximate - // number of messages available for retrieval from the queue. + // * ApproximateNumberOfMessages – Returns the approximate number of + // messages available for retrieval from the queue. // - // * + // * // ApproximateNumberOfMessagesDelayed – Returns the approximate number of messages // in the queue that are delayed and not available for reading immediately. This // can happen when the queue is configured as a delay queue or when a message has // been sent with a delay parameter. // - // * ApproximateNumberOfMessagesNotVisible – + // * ApproximateNumberOfMessagesNotVisible – // Returns the approximate number of messages that are in flight. Messages are // considered to be in flight if they have been sent to a client but have not yet // been deleted or have not yet reached the end of their visibility window. // - // * + // * // CreatedTimestamp – Returns the time when the queue was created in seconds (epoch // time (http://en.wikipedia.org/wiki/Unix_time)). // - // * DelaySeconds – Returns - // the default delay on the queue in seconds. + // * DelaySeconds – Returns the + // default delay on the queue in seconds. // - // * LastModifiedTimestamp – - // Returns the time when the queue was last changed in seconds (epoch time + // * LastModifiedTimestamp – Returns the + // time when the queue was last changed in seconds (epoch time // (http://en.wikipedia.org/wiki/Unix_time)). // - // * MaximumMessageSize – Returns - // the limit of how many bytes a message can contain before Amazon SQS rejects - // it. + // * MaximumMessageSize – Returns the + // limit of how many bytes a message can contain before Amazon SQS rejects it. // - // * MessageRetentionPeriod – Returns the length of time, in seconds, for - // which Amazon SQS retains a message. + // * + // MessageRetentionPeriod – Returns the length of time, in seconds, for which + // Amazon SQS retains a message. // - // * Policy – Returns the policy of the - // queue. + // * Policy – Returns the policy of the queue. // - // * QueueArn – Returns the Amazon resource name (ARN) of the queue. + // * + // QueueArn – Returns the Amazon resource name (ARN) of the queue. // - // - // * ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for + // * + // ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, for // which the ReceiveMessage action waits for a message to arrive. // - // * - // RedrivePolicy – The string that includes the parameters for the dead-letter - // queue functionality of the source queue as a JSON object. For more information - // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues + // * RedrivePolicy + // – The string that includes the parameters for the dead-letter queue + // functionality of the source queue as a JSON object. For more information about + // the redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter + // Queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. // - // * - // deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to - // which Amazon SQS moves messages after the value of maxReceiveCount is - // exceeded. + // * deadLetterTargetArn – The + // Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves + // messages after the value of maxReceiveCount is exceeded. // - // * maxReceiveCount – The number of times a message is - // delivered to the source queue before being moved to the dead-letter queue. When - // the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon - // SQS moves the message to the dead-letter-queue. + // * maxReceiveCount – + // The number of times a message is delivered to the source queue before being + // moved to the dead-letter queue. When the ReceiveCount for a message exceeds the + // maxReceiveCount for a queue, Amazon SQS moves the message to the + // dead-letter-queue. // - // * VisibilityTimeout – - // Returns the visibility timeout for the queue. For more information about the - // visibility timeout, see Visibility Timeout + // * VisibilityTimeout – Returns the visibility timeout for the + // queue. For more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // @@ -119,24 +118,24 @@ type GetQueueAttributesInput struct { // apply only to server-side-encryption // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // - // * KmsMasterKeyId – Returns the ID of an AWS-managed customer master key (CMK) - // for Amazon SQS or a custom CMK. For more information, see Key Terms + // * + // KmsMasterKeyId – Returns the ID of an AWS-managed customer master key (CMK) for + // Amazon SQS or a custom CMK. For more information, see Key Terms // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // - // - // * KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for - // which Amazon SQS can reuse a data key to encrypt or decrypt messages before - // calling AWS KMS again. For more information, see How Does the Data Key Reuse - // Period Work? + // * + // KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, for which + // Amazon SQS can reuse a data key to encrypt or decrypt messages before calling + // AWS KMS again. For more information, see How Does the Data Key Reuse Period + // Work? // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The // following attributes apply only to FIFO (first-in-first-out) queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // - // * FifoQueue – Returns whether the queue is FIFO. For more information, see FIFO + // * + // FifoQueue – Returns whether the queue is FIFO. For more information, see FIFO // Queue Logic // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. To determine whether a queue @@ -144,7 +143,7 @@ type GetQueueAttributesInput struct { // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // - // * + // * // ContentBasedDeduplication – Returns whether content-based deduplication is // enabled for the queue. For more information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) diff --git a/service/sqs/api_op_ReceiveMessage.go b/service/sqs/api_op_ReceiveMessage.go index b978ec4b1da..ed4de9976f0 100644 --- a/service/sqs/api_op_ReceiveMessage.go +++ b/service/sqs/api_op_ReceiveMessage.go @@ -24,25 +24,25 @@ import ( // a particular ReceiveMessage response. If this happens, repeat the request. For // each message returned, the response includes the following: // -// * The message +// * The message // body. // -// * An MD5 digest of the message body. For information about MD5, see +// * An MD5 digest of the message body. For information about MD5, see // RFC1321 (https://www.ietf.org/rfc/rfc1321.txt). // -// * The MessageId you -// received when you sent the message to the queue. +// * The MessageId you received +// when you sent the message to the queue. // -// * The receipt handle. +// * The receipt handle. // +// * The message +// attributes. // -// * The message attributes. +// * An MD5 digest of the message attributes. // -// * An MD5 digest of the message attributes. -// -// The -// receipt handle is the identifier you must provide when deleting the message. For -// more information, see Queue and Message Identifiers +// The receipt handle is +// the identifier you must provide when deleting the message. For more information, +// see Queue and Message Identifiers // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html) // in the Amazon Simple Queue Service Developer Guide. You can provide the // VisibilityTimeout parameter in your request. The parameter is applied to the @@ -84,42 +84,41 @@ type ReceiveMessageInput struct { // A list of attributes that need to be returned along with each message. These // attributes include: // - // * All – Returns all values. + // * All – Returns all values. // - // * + // * // ApproximateFirstReceiveTimestamp – Returns the time the message was first // received from the queue (epoch time (http://en.wikipedia.org/wiki/Unix_time) in // milliseconds). // - // * ApproximateReceiveCount – Returns the number of times a + // * ApproximateReceiveCount – Returns the number of times a // message has been received across all queues but not deleted. // - // * - // AWSTraceHeader – Returns the AWS X-Ray trace header string. - // - // * SenderId + // * AWSTraceHeader – + // Returns the AWS X-Ray trace header string. // + // * SenderId // - // * For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. + // * For an IAM user, + // returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. // + // * For an IAM role, + // returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. // - // * For an IAM role, returns the IAM role ID, for example - // ABCDE1F2GH3I4JK5LMNOP:i-a123b456. - // - // * SentTimestamp – Returns the time the - // message was sent to the queue (epoch time + // * + // SentTimestamp – Returns the time the message was sent to the queue (epoch time // (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). // - // * + // * // MessageDeduplicationId – Returns the value provided by the producer that calls // the SendMessage action. // - // * MessageGroupId – Returns the value provided by - // the producer that calls the SendMessage action. Messages with the same + // * MessageGroupId – Returns the value provided by the + // producer that calls the SendMessage action. Messages with the same // MessageGroupId are returned in sequence. // - // * SequenceNumber – Returns the - // value provided by Amazon SQS. + // * SequenceNumber – Returns the value + // provided by Amazon SQS. AttributeNames []types.QueueAttributeName // The maximum number of messages to return. Amazon SQS never returns more messages @@ -129,26 +128,26 @@ type ReceiveMessageInput struct { // The name of the message attribute, where N is the index. // - // * The name can - // contain alphanumeric characters and the underscore (_), hyphen (-), and period - // (.). - // - // * The name is case-sensitive and must be unique among all attribute - // names for the message. + // * The name can contain + // alphanumeric characters and the underscore (_), hyphen (-), and period (.). // - // * The name must not start with AWS-reserved prefixes - // such as AWS. or Amazon. (or any casing variants). + // * + // The name is case-sensitive and must be unique among all attribute names for the + // message. // - // * The name must not start - // or end with a period (.), and it should not have periods in succession (..). + // * The name must not start with AWS-reserved prefixes such as AWS. or + // Amazon. (or any casing variants). // + // * The name must not start or end with a + // period (.), and it should not have periods in succession (..). // - // * The name can be up to 256 characters long. + // * The name can + // be up to 256 characters long. // - // When using ReceiveMessage, you can - // send a list of attribute names to receive, or you can return all of the - // attributes by specifying All or . in your request. You can also use all message - // attributes starting with a prefix, for example bar.. + // When using ReceiveMessage, you can send a list of + // attribute names to receive, or you can return all of the attributes by + // specifying All or . in your request. You can also use all message attributes + // starting with a prefix, for example bar.. MessageAttributeNames []*string // This parameter applies only to FIFO (first-in-first-out) queues. The token used @@ -158,25 +157,25 @@ type ReceiveMessageInput struct { // to retrieve the same set of messages, even if their visibility timeout has not // yet expired. // - // * You can use ReceiveRequestAttemptId only for 5 minutes after - // a ReceiveMessage action. + // * You can use ReceiveRequestAttemptId only for 5 minutes after a + // ReceiveMessage action. // - // * When you set FifoQueue, a caller of the - // ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly. + // * When you set FifoQueue, a caller of the ReceiveMessage + // action can provide a ReceiveRequestAttemptId explicitly. // - // * - // If a caller of the ReceiveMessage action doesn't provide a - // ReceiveRequestAttemptId, Amazon SQS generates a ReceiveRequestAttemptId. + // * If a caller of the + // ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, Amazon SQS + // generates a ReceiveRequestAttemptId. // - // * - // It is possible to retry the ReceiveMessage action with the same - // ReceiveRequestAttemptId if none of the messages have been modified (deleted or - // had their visibility changes). + // * It is possible to retry the + // ReceiveMessage action with the same ReceiveRequestAttemptId if none of the + // messages have been modified (deleted or had their visibility changes). // - // * During a visibility timeout, subsequent - // calls with the same ReceiveRequestAttemptId return the same messages and receipt - // handles. If a retry occurs within the deduplication interval, it resets the - // visibility timeout. For more information, see Visibility Timeout + // * During + // a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId + // return the same messages and receipt handles. If a retry occurs within the + // deduplication interval, it resets the visibility timeout. For more information, + // see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. If a caller of the // ReceiveMessage action still processes messages when the visibility timeout @@ -188,15 +187,15 @@ type ReceiveMessageInput struct { // before the visibility timeout expires and extend the visibility timeout as // necessary. // - // * While messages with a particular MessageGroupId are invisible, - // no more messages belonging to the same MessageGroupId are returned until the + // * While messages with a particular MessageGroupId are invisible, no + // more messages belonging to the same MessageGroupId are returned until the // visibility timeout expires. You can still receive messages with another // MessageGroupId as long as it is also visible. // - // * If a caller of - // ReceiveMessage can't track the ReceiveRequestAttemptId, no retries work until - // the original visibility timeout expires. As a result, delays might occur but the - // messages in the queue remain in a strict order. + // * If a caller of ReceiveMessage + // can't track the ReceiveRequestAttemptId, no retries work until the original + // visibility timeout expires. As a result, delays might occur but the messages in + // the queue remain in a strict order. // // The maximum length of // ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain diff --git a/service/sqs/api_op_RemovePermission.go b/service/sqs/api_op_RemovePermission.go index 46b34a23214..8058384c442 100644 --- a/service/sqs/api_op_RemovePermission.go +++ b/service/sqs/api_op_RemovePermission.go @@ -13,16 +13,16 @@ import ( // Revokes any permissions in the queue policy that matches the specified Label // parameter. // -// * Only the owner of a queue can remove permissions from it. +// * Only the owner of a queue can remove permissions from it. // -// -// * Cross-account permissions don't apply to this action. For more information, -// see Grant Cross-Account Permissions to a Role and a User Name +// * +// Cross-account permissions don't apply to this action. For more information, see +// Grant Cross-Account Permissions to a Role and a User Name // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // -// * To remove the ability -// to change queue permissions, you must deny permission to the AddPermission, +// * To remove the ability to +// change queue permissions, you must deny permission to the AddPermission, // RemovePermission, and SetQueueAttributes actions in your IAM policy. func (c *Client) RemovePermission(ctx context.Context, params *RemovePermissionInput, optFns ...func(*Options)) (*RemovePermissionOutput, error) { if params == nil { diff --git a/service/sqs/api_op_SendMessage.go b/service/sqs/api_op_SendMessage.go index b4efe98980d..1dcf0497492 100644 --- a/service/sqs/api_op_SendMessage.go +++ b/service/sqs/api_op_SendMessage.go @@ -73,31 +73,31 @@ type SendMessageInput struct { // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. // - // * Every message must - // have a unique MessageDeduplicationId, + // * Every message must have a + // unique MessageDeduplicationId, // - // * You may provide a - // MessageDeduplicationId explicitly. + // * You may provide a MessageDeduplicationId + // explicitly. // - // * If you aren't able to provide a - // MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, - // Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the - // body of the message (but not the attributes of the message). + // * If you aren't able to provide a MessageDeduplicationId and you + // enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash + // to generate the MessageDeduplicationId using the body of the message (but not + // the attributes of the message). // - // * If you - // don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. + // * If you don't provide a MessageDeduplicationId + // and the queue doesn't have ContentBasedDeduplication set, the action fails with + // an error. // - // * If the - // queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides - // the generated one. + // * If the queue has ContentBasedDeduplication set, your + // MessageDeduplicationId overrides the generated one. // - // * When ContentBasedDeduplication is in effect, messages - // with identical content sent within the deduplication interval are treated as - // duplicates and only one copy of the message is delivered. + // * When + // ContentBasedDeduplication is in effect, messages with identical content sent + // within the deduplication interval are treated as duplicates and only one copy of + // the message is delivered. // - // * If you send one - // message with ContentBasedDeduplication enabled and then another message with a + // * If you send one message with + // ContentBasedDeduplication enabled and then another message with a // MessageDeduplicationId that is the same as the one generated for the first // MessageDeduplicationId, the two messages are treated as duplicates and only one // copy of the message is delivered. @@ -125,16 +125,16 @@ type SendMessageInput struct { // multiple consumers can process the queue, but the session data of each user is // processed in a FIFO fashion. // - // * You must associate a non-empty - // MessageGroupId with a message. If you don't provide a MessageGroupId, the action - // fails. + // * You must associate a non-empty MessageGroupId + // with a message. If you don't provide a MessageGroupId, the action fails. // - // * ReceiveMessage might return messages with multiple MessageGroupId - // values. For each MessageGroupId, the messages are sorted by time sent. The - // caller can't specify a MessageGroupId. + // * + // ReceiveMessage might return messages with multiple MessageGroupId values. For + // each MessageGroupId, the messages are sorted by time sent. The caller can't + // specify a MessageGroupId. // - // The length of MessageGroupId is 128 - // characters. Valid values: alphanumeric characters and punctuation + // The length of MessageGroupId is 128 characters. Valid + // values: alphanumeric characters and punctuation // (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). For best practices of using MessageGroupId, // see Using the MessageGroupId Property // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) @@ -145,11 +145,11 @@ type SendMessageInput struct { // The message system attribute to send. Each message system attribute consists of // a Name, Type, and Value. // - // * Currently, the only supported message system + // * Currently, the only supported message system // attribute is AWSTraceHeader. Its type must be String and its value must be a // correctly formatted AWS X-Ray trace header string. // - // * The size of a message + // * The size of a message // system attribute doesn't count towards the total size of a message. MessageSystemAttributes map[string]*types.MessageSystemAttributeValue } diff --git a/service/sqs/api_op_SetQueueAttributes.go b/service/sqs/api_op_SetQueueAttributes.go index 9762ef81f08..b0f126c203d 100644 --- a/service/sqs/api_op_SetQueueAttributes.go +++ b/service/sqs/api_op_SetQueueAttributes.go @@ -15,18 +15,18 @@ import ( // propagate throughout the Amazon SQS system. Changes made to the // MessageRetentionPeriod attribute can take up to 15 minutes. // -// * In the -// future, new attributes might be added. If you write code that calls this action, -// we recommend that you structure your code so that it can handle new attributes +// * In the future, +// new attributes might be added. If you write code that calls this action, we +// recommend that you structure your code so that it can handle new attributes // gracefully. // -// * Cross-account permissions don't apply to this action. For -// more information, see Grant Cross-Account Permissions to a Role and a User Name +// * Cross-account permissions don't apply to this action. For more +// information, see Grant Cross-Account Permissions to a Role and a User Name // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) // in the Amazon Simple Queue Service Developer Guide. // -// * To remove the ability -// to change queue permissions, you must deny permission to the AddPermission, +// * To remove the ability to +// change queue permissions, you must deny permission to the AddPermission, // RemovePermission, and SetQueueAttributes actions in your IAM policy. func (c *Client) SetQueueAttributes(ctx context.Context, params *SetQueueAttributesInput, optFns ...func(*Options)) (*SetQueueAttributesOutput, error) { if params == nil { @@ -50,55 +50,53 @@ type SetQueueAttributesInput struct { // values of the special request parameters that the SetQueueAttributes action // uses: // - // * DelaySeconds – The length of time, in seconds, for which the - // delivery of all messages in the queue is delayed. Valid values: An integer from - // 0 to 900 (15 minutes). Default: 0. + // * DelaySeconds – The length of time, in seconds, for which the delivery + // of all messages in the queue is delayed. Valid values: An integer from 0 to 900 + // (15 minutes). Default: 0. // - // * MaximumMessageSize – The limit of how - // many bytes a message can contain before Amazon SQS rejects it. Valid values: An - // integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 - // (256 KiB). + // * MaximumMessageSize – The limit of how many bytes a + // message can contain before Amazon SQS rejects it. Valid values: An integer from + // 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 + // KiB). // - // * MessageRetentionPeriod – The length of time, in seconds, for - // which Amazon SQS retains a message. Valid values: An integer representing - // seconds, from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). + // * MessageRetentionPeriod – The length of time, in seconds, for which + // Amazon SQS retains a message. Valid values: An integer representing seconds, + // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). // - // - // * Policy – The queue's policy. A valid AWS policy. For more information about - // policy structure, see Overview of AWS IAM Policies + // * Policy + // – The queue's policy. A valid AWS policy. For more information about policy + // structure, see Overview of AWS IAM Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) in the // Amazon IAM User Guide. // - // * ReceiveMessageWaitTimeSeconds – The length of - // time, in seconds, for which a ReceiveMessage action waits for a message to - // arrive. Valid values: An integer from 0 to 20 (seconds). Default: 0. + // * ReceiveMessageWaitTimeSeconds – The length of time, in + // seconds, for which a ReceiveMessage action waits for a message to arrive. Valid + // values: An integer from 0 to 20 (seconds). Default: 0. // - // * - // RedrivePolicy – The string that includes the parameters for the dead-letter - // queue functionality of the source queue as a JSON object. For more information - // about the redrive policy and dead-letter queues, see Using Amazon SQS - // Dead-Letter Queues + // * RedrivePolicy – The + // string that includes the parameters for the dead-letter queue functionality of + // the source queue as a JSON object. For more information about the redrive policy + // and dead-letter queues, see Using Amazon SQS Dead-Letter Queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. // - // * - // deadLetterTargetArn – The Amazon Resource Name (ARN) of the dead-letter queue to - // which Amazon SQS moves messages after the value of maxReceiveCount is - // exceeded. + // * deadLetterTargetArn – The + // Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves + // messages after the value of maxReceiveCount is exceeded. // - // * maxReceiveCount – The number of times a message is - // delivered to the source queue before being moved to the dead-letter queue. When - // the ReceiveCount for a message exceeds the maxReceiveCount for a queue, Amazon - // SQS moves the message to the dead-letter-queue. + // * maxReceiveCount – + // The number of times a message is delivered to the source queue before being + // moved to the dead-letter queue. When the ReceiveCount for a message exceeds the + // maxReceiveCount for a queue, Amazon SQS moves the message to the + // dead-letter-queue. // - // The dead-letter queue of a - // FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a - // standard queue must also be a standard queue. + // The dead-letter queue of a FIFO queue must also be a FIFO + // queue. Similarly, the dead-letter queue of a standard queue must also be a + // standard queue. // - // * VisibilityTimeout – The - // visibility timeout for the queue, in seconds. Valid values: An integer from 0 to - // 43,200 (12 hours). Default: 30. For more information about the visibility - // timeout, see Visibility Timeout + // * VisibilityTimeout – The visibility timeout for the queue, in + // seconds. Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For + // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // @@ -106,8 +104,8 @@ type SetQueueAttributesInput struct { // apply only to server-side-encryption // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // - // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon + // * + // KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) for Amazon // SQS or a custom CMK. For more information, see Key Terms // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, @@ -116,9 +114,8 @@ type SetQueueAttributesInput struct { // (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // * - // KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for which Amazon - // SQS can reuse a data key + // * KmsDataKeyReusePeriodSeconds + // – The length of time, in seconds, for which Amazon SQS can reuse a data key // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 @@ -131,40 +128,40 @@ type SetQueueAttributesInput struct { // following attribute applies only to FIFO (first-in-first-out) queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // - // * ContentBasedDeduplication – Enables content-based deduplication. For more + // * + // ContentBasedDeduplication – Enables content-based deduplication. For more // information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. // - // * Every message - // must have a unique MessageDeduplicationId, - // - // * You may provide a - // MessageDeduplicationId explicitly. - // - // * If you aren't able to provide - // a MessageDeduplicationId and you enable ContentBasedDeduplication for your - // queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId - // using the body of the message (but not the attributes of the message). + // * Every message must have a + // unique MessageDeduplicationId, // + // * You may provide a MessageDeduplicationId + // explicitly. // - // * If you don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. + // * If you aren't able to provide a MessageDeduplicationId and you + // enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash + // to generate the MessageDeduplicationId using the body of the message (but not + // the attributes of the message). // - // * If - // the queue has ContentBasedDeduplication set, your MessageDeduplicationId - // overrides the generated one. + // * If you don't provide a MessageDeduplicationId + // and the queue doesn't have ContentBasedDeduplication set, the action fails with + // an error. // - // * When ContentBasedDeduplication is in - // effect, messages with identical content sent within the deduplication interval - // are treated as duplicates and only one copy of the message is delivered. + // * If the queue has ContentBasedDeduplication set, your + // MessageDeduplicationId overrides the generated one. // + // * When + // ContentBasedDeduplication is in effect, messages with identical content sent + // within the deduplication interval are treated as duplicates and only one copy of + // the message is delivered. // - // * If you send one message with ContentBasedDeduplication enabled and then - // another message with a MessageDeduplicationId that is the same as the one - // generated for the first MessageDeduplicationId, the two messages are treated as - // duplicates and only one copy of the message is delivered. + // * If you send one message with + // ContentBasedDeduplication enabled and then another message with a + // MessageDeduplicationId that is the same as the one generated for the first + // MessageDeduplicationId, the two messages are treated as duplicates and only one + // copy of the message is delivered. // // This member is required. Attributes map[string]*string diff --git a/service/sqs/api_op_TagQueue.go b/service/sqs/api_op_TagQueue.go index 0f18ab4eda5..7bd29fe9bbb 100644 --- a/service/sqs/api_op_TagQueue.go +++ b/service/sqs/api_op_TagQueue.go @@ -16,16 +16,16 @@ import ( // in the Amazon Simple Queue Service Developer Guide. When you use queue tags, // keep the following guidelines in mind: // -// * Adding more than 50 tags to a -// queue isn't recommended. +// * Adding more than 50 tags to a queue +// isn't recommended. // -// * Tags don't have any semantic meaning. Amazon SQS +// * Tags don't have any semantic meaning. Amazon SQS // interprets tags as character strings. // -// * Tags are case-sensitive. +// * Tags are case-sensitive. // -// * A -// new tag with a key identical to that of an existing tag overwrites the existing +// * A new tag +// with a key identical to that of an existing tag overwrites the existing // tag. // // For a full list of tag restrictions, see Limits Related to Queues diff --git a/service/sqs/doc.go b/service/sqs/doc.go index 11df30f2091..73e5e286415 100644 --- a/service/sqs/doc.go +++ b/service/sqs/doc.go @@ -15,39 +15,39 @@ // programming language. The SDKs perform tasks such as the following // automatically: // -// * Cryptographically sign your service requests +// * Cryptographically sign your service requests // -// * Retry +// * Retry // requests // -// * Handle error responses +// * Handle error responses // // Additional Information // -// * Amazon SQS -// Product Page (http://aws.amazon.com/sqs/) +// * Amazon SQS Product +// Page (http://aws.amazon.com/sqs/) // -// * Amazon Simple Queue Service -// Developer Guide +// * Amazon Simple Queue Service Developer +// Guide // -// * Making API Requests +// * Making API Requests // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) // -// -// * Amazon SQS Message Attributes +// * +// Amazon SQS Message Attributes // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // -// -// * Amazon SQS Dead-Letter Queues +// * +// Amazon SQS Dead-Letter Queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // -// -// * Amazon SQS in the AWS CLI Command Reference +// * +// Amazon SQS in the AWS CLI Command Reference // (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) // -// * Amazon -// Web Services General Reference +// * Amazon Web +// Services General Reference // -// * Regions and Endpoints +// * Regions and Endpoints // (https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) package sqs diff --git a/service/sqs/types/types.go b/service/sqs/types/types.go index 8917ba04f73..c57762b3ef1 100644 --- a/service/sqs/types/types.go +++ b/service/sqs/types/types.go @@ -96,25 +96,24 @@ type Message struct { // A map of the attributes requested in ReceiveMessage to their respective values. // Supported attributes: // - // * ApproximateReceiveCount + // * ApproximateReceiveCount // - // * + // * // ApproximateFirstReceiveTimestamp // - // * MessageDeduplicationId + // * MessageDeduplicationId // - // * - // MessageGroupId + // * MessageGroupId // - // * SenderId + // * + // SenderId // - // * SentTimestamp + // * SentTimestamp // - // * - // SequenceNumber + // * SequenceNumber // - // ApproximateFirstReceiveTimestamp and SentTimestamp are each - // returned as an integer representing the epoch time + // ApproximateFirstReceiveTimestamp + // and SentTimestamp are each returned as an integer representing the epoch time // (http://en.wikipedia.org/wiki/Unix_time) in milliseconds. Attributes map[string]*string @@ -248,31 +247,31 @@ type SendMessageBatchRequestEntry struct { // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. // - // * Every message must - // have a unique MessageDeduplicationId, + // * Every message must have a + // unique MessageDeduplicationId, // - // * You may provide a - // MessageDeduplicationId explicitly. + // * You may provide a MessageDeduplicationId + // explicitly. // - // * If you aren't able to provide a - // MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, - // Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the - // body of the message (but not the attributes of the message). + // * If you aren't able to provide a MessageDeduplicationId and you + // enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash + // to generate the MessageDeduplicationId using the body of the message (but not + // the attributes of the message). // - // * If you - // don't provide a MessageDeduplicationId and the queue doesn't have - // ContentBasedDeduplication set, the action fails with an error. + // * If you don't provide a MessageDeduplicationId + // and the queue doesn't have ContentBasedDeduplication set, the action fails with + // an error. // - // * If the - // queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides - // the generated one. + // * If the queue has ContentBasedDeduplication set, your + // MessageDeduplicationId overrides the generated one. // - // * When ContentBasedDeduplication is in effect, messages - // with identical content sent within the deduplication interval are treated as - // duplicates and only one copy of the message is delivered. + // * When + // ContentBasedDeduplication is in effect, messages with identical content sent + // within the deduplication interval are treated as duplicates and only one copy of + // the message is delivered. // - // * If you send one - // message with ContentBasedDeduplication enabled and then another message with a + // * If you send one message with + // ContentBasedDeduplication enabled and then another message with a // MessageDeduplicationId that is the same as the one generated for the first // MessageDeduplicationId, the two messages are treated as duplicates and only one // copy of the message is delivered. @@ -300,16 +299,16 @@ type SendMessageBatchRequestEntry struct { // multiple consumers can process the queue, but the session data of each user is // processed in a FIFO fashion. // - // * You must associate a non-empty - // MessageGroupId with a message. If you don't provide a MessageGroupId, the action - // fails. + // * You must associate a non-empty MessageGroupId + // with a message. If you don't provide a MessageGroupId, the action fails. // - // * ReceiveMessage might return messages with multiple MessageGroupId - // values. For each MessageGroupId, the messages are sorted by time sent. The - // caller can't specify a MessageGroupId. + // * + // ReceiveMessage might return messages with multiple MessageGroupId values. For + // each MessageGroupId, the messages are sorted by time sent. The caller can't + // specify a MessageGroupId. // - // The length of MessageGroupId is 128 - // characters. Valid values: alphanumeric characters and punctuation + // The length of MessageGroupId is 128 characters. Valid + // values: alphanumeric characters and punctuation // (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). For best practices of using MessageGroupId, // see Using the MessageGroupId Property // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html) @@ -320,12 +319,12 @@ type SendMessageBatchRequestEntry struct { // The message system attribute to send Each message system attribute consists of a // Name, Type, and Value. // - // * Currently, the only supported message system - // attribute is AWSTraceHeader. Its type must be String and its value must be a - // correctly formatted AWS X-Ray trace header string. + // * Currently, the only supported message system attribute + // is AWSTraceHeader. Its type must be String and its value must be a correctly + // formatted AWS X-Ray trace header string. // - // * The size of a message - // system attribute doesn't count towards the total size of a message. + // * The size of a message system + // attribute doesn't count towards the total size of a message. MessageSystemAttributes map[string]*MessageSystemAttributeValue } diff --git a/service/ssm/api_op_CreateActivation.go b/service/ssm/api_op_CreateActivation.go index 74214754cb2..56fc7c00e74 100644 --- a/service/ssm/api_op_CreateActivation.go +++ b/service/ssm/api_op_CreateActivation.go @@ -74,13 +74,13 @@ type CreateActivationInput struct { // virtual machines (VMs) in your on-premises environment you intend to activate. // In this case, you could specify the following key name/value pairs: // - // * + // * // Key=OS,Value=Windows // - // * Key=Environment,Value=Production + // * Key=Environment,Value=Production // - // When you install - // SSM Agent on your on-premises servers and VMs, you specify an activation ID and + // When you install SSM + // Agent on your on-premises servers and VMs, you specify an activation ID and // code. When you specify the activation ID and code, tags assigned to the // activation are automatically applied to the on-premises servers or VMs. You // can't add tags to or delete tags from an existing activation. You can tag your diff --git a/service/ssm/api_op_CreateDocument.go b/service/ssm/api_op_CreateDocument.go index ce3b35c14b1..e01bf6d1c57 100644 --- a/service/ssm/api_op_CreateDocument.go +++ b/service/ssm/api_op_CreateDocument.go @@ -39,15 +39,15 @@ type CreateDocumentInput struct { // referencing the file in a command. For examples, see the following topics in the // AWS Systems Manager User Guide. // - // * Create an SSM document (AWS API) + // * Create an SSM document (AWS API) // (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html) // - // - // * Create an SSM document (AWS CLI) + // * + // Create an SSM document (AWS CLI) // (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-cli.html) // - // - // * Create an SSM document (API) + // * + // Create an SSM document (API) // (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html) // // This member is required. @@ -57,11 +57,11 @@ type CreateDocumentInput struct { // document name prefixes. These are reserved by AWS for use as document name // prefixes: // - // * aws- + // * aws- // - // * amazon + // * amazon // - // * amzn + // * amzn // // This member is required. Name *string @@ -93,9 +93,9 @@ type CreateDocumentInput struct { // or the environment where it will run. In this case, you could specify the // following key name/value pairs: // - // * Key=OS,Value=Windows + // * Key=OS,Value=Windows // - // * + // * // Key=Environment,Value=Production // // To add tags to an existing SSM document, use diff --git a/service/ssm/api_op_CreateMaintenanceWindow.go b/service/ssm/api_op_CreateMaintenanceWindow.go index 3e378db6248..8b43e482b08 100644 --- a/service/ssm/api_op_CreateMaintenanceWindow.go +++ b/service/ssm/api_op_CreateMaintenanceWindow.go @@ -103,12 +103,12 @@ type CreateMaintenanceWindowInput struct { // tasks it will run, the types of targets, and the environment it will run in. In // this case, you could specify the following key name/value pairs: // - // * + // * // Key=TaskType,Value=AgentUpdate // - // * Key=OS,Value=Windows + // * Key=OS,Value=Windows // - // * + // * // Key=Environment,Value=Production // // To add tags to an existing maintenance window, diff --git a/service/ssm/api_op_CreatePatchBaseline.go b/service/ssm/api_op_CreatePatchBaseline.go index 6879906e5a7..5b7eaff6b27 100644 --- a/service/ssm/api_op_CreatePatchBaseline.go +++ b/service/ssm/api_op_CreatePatchBaseline.go @@ -80,14 +80,14 @@ type CreatePatchBaselineInput struct { // The action for Patch Manager to take on patches included in the RejectedPackages // list. // - // * ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is + // * ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is // installed only if it is a dependency of another package. It is considered // compliant with the patch baseline, and its status is reported as InstalledOther. // This is the default action if no option is specified. // - // * BLOCK: Packages in - // the RejectedPatches list, and packages that include them as dependencies, are - // not installed under any circumstances. If a package was installed before it was + // * BLOCK: Packages in the + // RejectedPatches list, and packages that include them as dependencies, are not + // installed under any circumstances. If a package was installed before it was // added to the Rejected patches list, it is considered non-compliant with the // patch baseline, and its status is reported as InstalledRejected. RejectedPatchesAction types.PatchAction @@ -102,10 +102,10 @@ type CreatePatchBaselineInput struct { // of patches it specifies and the operating system family it applies to. In this // case, you could specify the following key name/value pairs: // - // * + // * // Key=PatchSeverity,Value=Critical // - // * Key=OS,Value=Windows + // * Key=OS,Value=Windows // // To add tags to an // existing patch baseline, use the AddTagsToResource action. diff --git a/service/ssm/api_op_DescribeOpsItems.go b/service/ssm/api_op_DescribeOpsItems.go index af713503114..eac3554d960 100644 --- a/service/ssm/api_op_DescribeOpsItems.go +++ b/service/ssm/api_op_DescribeOpsItems.go @@ -47,43 +47,43 @@ type DescribeOpsItemsInput struct { // One or more filters to limit the response. // - // * Key: CreatedTime Operations: + // * Key: CreatedTime Operations: // GreaterThan, LessThan // - // * Key: LastModifiedBy Operations: Contains, Equals + // * Key: LastModifiedBy Operations: Contains, Equals // + // * + // Key: LastModifiedTime Operations: GreaterThan, LessThan // - // * Key: LastModifiedTime Operations: GreaterThan, LessThan - // - // * Key: Priority + // * Key: Priority // Operations: Equals // - // * Key: Source Operations: Contains, Equals + // * Key: Source Operations: Contains, Equals // - // * Key: - // Status Operations: Equals + // * Key: Status + // Operations: Equals // - // * Key: Title Operations: Contains + // * Key: Title Operations: Contains // - // * Key: - // OperationalData* Operations: Equals + // * Key: OperationalData* + // Operations: Equals // - // * Key: OperationalDataKey Operations: - // Equals + // * Key: OperationalDataKey Operations: Equals // - // * Key: OperationalDataValue Operations: Equals, Contains + // * Key: + // OperationalDataValue Operations: Equals, Contains // - // * Key: - // OpsItemId Operations: Equals + // * Key: OpsItemId Operations: + // Equals // - // * Key: ResourceId Operations: Contains + // * Key: ResourceId Operations: Contains // - // * - // Key: AutomationId Operations: Equals + // * Key: AutomationId Operations: + // Equals // - // *If you filter the response by using the - // OperationalData operator, specify a key-value pair by using the following JSON - // format: {"key":"key_name","value":"a_value"} + // *If you filter the response by using the OperationalData operator, + // specify a key-value pair by using the following JSON format: + // {"key":"key_name","value":"a_value"} OpsItemFilters []*types.OpsItemFilter } diff --git a/service/ssm/api_op_DescribePatchGroups.go b/service/ssm/api_op_DescribePatchGroups.go index 3160d7192a3..c281d0433a0 100644 --- a/service/ssm/api_op_DescribePatchGroups.go +++ b/service/ssm/api_op_DescribePatchGroups.go @@ -32,19 +32,19 @@ type DescribePatchGroupsInput struct { // One or more filters. Use a filter to return a more specific list of results. For // DescribePatchGroups,valid filter keys include the following: // - // * NAME_PREFIX: - // The name of the patch group. Wildcards (*) are accepted. + // * NAME_PREFIX: The + // name of the patch group. Wildcards (*) are accepted. // - // * - // OPERATING_SYSTEM: The supported operating system type to return results for. For - // valid operating system values, see - // GetDefaultPatchBaselineRequest$OperatingSystem in CreatePatchBaseline. - // Examples: + // * OPERATING_SYSTEM: The + // supported operating system type to return results for. For valid operating + // system values, see GetDefaultPatchBaselineRequest$OperatingSystem in + // CreatePatchBaseline. Examples: // - // * --filters Key=NAME_PREFIX,Values=MyPatchGroup* + // * --filters + // Key=NAME_PREFIX,Values=MyPatchGroup* // - // * - // --filters Key=OPERATING_SYSTEM,Values=AMAZON_LINUX_2 + // * --filters + // Key=OPERATING_SYSTEM,Values=AMAZON_LINUX_2 Filters []*types.PatchOrchestratorFilter // The maximum number of patch groups to return (per page). diff --git a/service/ssm/api_op_GetCommandInvocation.go b/service/ssm/api_op_GetCommandInvocation.go index 2068e8e947d..1deb23ef986 100644 --- a/service/ssm/api_op_GetCommandInvocation.go +++ b/service/ssm/api_op_GetCommandInvocation.go @@ -127,50 +127,50 @@ type GetCommandInvocationOutput struct { // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // - // * Pending: The command has not been sent to the instance. + // * Pending: The command has not been sent to the instance. // - // * In + // * In // Progress: The command has been sent to the instance but has not reached a // terminal state. // - // * Delayed: The system attempted to send the command to the + // * Delayed: The system attempted to send the command to the // target, but the target was not available. The instance might not be available // because of network issues, because the instance was stopped, or for similar // reasons. The system will try to send the command again. // - // * Success: The - // command or plugin ran successfully. This is a terminal state. + // * Success: The command + // or plugin ran successfully. This is a terminal state. // - // * Delivery - // Timed Out: The command was not delivered to the instance before the delivery - // timeout expired. Delivery timeouts do not count against the parent command's - // MaxErrors limit, but they do contribute to whether the parent command status is - // Success or Incomplete. This is a terminal state. + // * Delivery Timed Out: The + // command was not delivered to the instance before the delivery timeout expired. + // Delivery timeouts do not count against the parent command's MaxErrors limit, but + // they do contribute to whether the parent command status is Success or + // Incomplete. This is a terminal state. // - // * Execution Timed Out: The - // command started to run on the instance, but the execution was not complete - // before the timeout expired. Execution timeouts count against the MaxErrors limit - // of the parent command. This is a terminal state. + // * Execution Timed Out: The command + // started to run on the instance, but the execution was not complete before the + // timeout expired. Execution timeouts count against the MaxErrors limit of the + // parent command. This is a terminal state. // - // * Failed: The command - // wasn't run successfully on the instance. For a plugin, this indicates that the - // result code was not zero. For a command invocation, this indicates that the - // result code for one or more plugins was not zero. Invocation failures count - // against the MaxErrors limit of the parent command. This is a terminal state. + // * Failed: The command wasn't run + // successfully on the instance. For a plugin, this indicates that the result code + // was not zero. For a command invocation, this indicates that the result code for + // one or more plugins was not zero. Invocation failures count against the + // MaxErrors limit of the parent command. This is a terminal state. // + // * Canceled: + // The command was terminated before it was completed. This is a terminal state. // - // * Canceled: The command was terminated before it was completed. This is a - // terminal state. - // - // * Undeliverable: The command can't be delivered to the - // instance. The instance might not exist or might not be responding. Undeliverable - // invocations don't count against the parent command's MaxErrors limit and don't - // contribute to whether the parent command status is Success or Incomplete. This - // is a terminal state. + // * + // Undeliverable: The command can't be delivered to the instance. The instance + // might not exist or might not be responding. Undeliverable invocations don't + // count against the parent command's MaxErrors limit and don't contribute to + // whether the parent command status is Success or Incomplete. This is a terminal + // state. // - // * Terminated: The parent command exceeded its - // MaxErrors limit and subsequent command invocations were canceled by the system. - // This is a terminal state. + // * Terminated: The parent command exceeded its MaxErrors limit and + // subsequent command invocations were canceled by the system. This is a terminal + // state. StatusDetails *string // Metadata pertaining to the operation's result. diff --git a/service/ssm/api_op_LabelParameterVersion.go b/service/ssm/api_op_LabelParameterVersion.go index c8770b5ee47..e4a439f09fe 100644 --- a/service/ssm/api_op_LabelParameterVersion.go +++ b/service/ssm/api_op_LabelParameterVersion.go @@ -16,34 +16,34 @@ import ( // remember the purpose of a parameter when there are multiple versions. Parameter // labels have the following requirements and restrictions. // -// * A version of a +// * A version of a // parameter can have a maximum of 10 labels. // -// * You can't attach the same -// label to different versions of the same parameter. For example, if version 1 has -// the label Production, then you can't attach Production to version 2. +// * You can't attach the same label to +// different versions of the same parameter. For example, if version 1 has the +// label Production, then you can't attach Production to version 2. // -// * You -// can move a label from one version of a parameter to another. +// * You can move +// a label from one version of a parameter to another. // -// * You can't -// create a label when you create a new parameter. You must attach a label to a -// specific version of a parameter. +// * You can't create a label +// when you create a new parameter. You must attach a label to a specific version +// of a parameter. // -// * You can't delete a parameter label. If -// you no longer want to use a parameter label, then you must move it to a -// different version of a parameter. +// * You can't delete a parameter label. If you no longer want to +// use a parameter label, then you must move it to a different version of a +// parameter. // -// * A label can have a maximum of 100 -// characters. +// * A label can have a maximum of 100 characters. // -// * Labels can contain letters (case sensitive), numbers, periods -// (.), hyphens (-), or underscores (_). +// * Labels can +// contain letters (case sensitive), numbers, periods (.), hyphens (-), or +// underscores (_). // -// * Labels can't begin with a number, -// "aws," or "ssm" (not case sensitive). If a label fails to meet these -// requirements, then the label is not associated with a parameter and the system -// displays it in the list of InvalidLabels. +// * Labels can't begin with a number, "aws," or "ssm" (not case +// sensitive). If a label fails to meet these requirements, then the label is not +// associated with a parameter and the system displays it in the list of +// InvalidLabels. func (c *Client) LabelParameterVersion(ctx context.Context, params *LabelParameterVersionInput, optFns ...func(*Options)) (*LabelParameterVersionOutput, error) { if params == nil { params = &LabelParameterVersionInput{} diff --git a/service/ssm/api_op_PutComplianceItems.go b/service/ssm/api_op_PutComplianceItems.go index 3e9640104bc..6c9a4f42b58 100644 --- a/service/ssm/api_op_PutComplianceItems.go +++ b/service/ssm/api_op_PutComplianceItems.go @@ -17,50 +17,50 @@ import ( // so you must provide a full list of compliance items each time that you send the // request. ComplianceType can be one of the following: // -// * ExecutionId: The +// * ExecutionId: The // execution ID when the patch, association, or custom compliance item was // applied. // -// * ExecutionType: Specify patch, association, or Custom:string. +// * ExecutionType: Specify patch, association, or Custom:string. // -// -// * ExecutionTime. The time the patch, association, or custom compliance item was +// * +// ExecutionTime. The time the patch, association, or custom compliance item was // applied to the instance. // -// * Id: The patch, association, or custom compliance +// * Id: The patch, association, or custom compliance // ID. // -// * Title: A title. +// * Title: A title. // -// * Status: The status of the compliance item. For +// * Status: The status of the compliance item. For // example, approved for patches, or Failed for associations. // -// * Severity: A -// patch severity. For example, critical. -// -// * DocumentName: A SSM document name. -// For example, AWS-RunPatchBaseline. +// * Severity: A patch +// severity. For example, critical. // -// * DocumentVersion: An SSM document -// version number. For example, 4. +// * DocumentName: A SSM document name. For +// example, AWS-RunPatchBaseline. // -// * Classification: A patch classification. -// For example, security updates. +// * DocumentVersion: An SSM document version +// number. For example, 4. // -// * PatchBaselineId: A patch baseline ID. +// * Classification: A patch classification. For example, +// security updates. // +// * PatchBaselineId: A patch baseline ID. // -// * PatchSeverity: A patch severity. For example, Critical. +// * PatchSeverity: A +// patch severity. For example, Critical. // -// * PatchState: A -// patch state. For example, InstancesWithFailedPatches. +// * PatchState: A patch state. For +// example, InstancesWithFailedPatches. // -// * PatchGroup: The -// name of a patch group. +// * PatchGroup: The name of a patch +// group. // -// * InstalledTime: The time the association, patch, or -// custom compliance item was applied to the resource. Specify the time by using -// the following format: yyyy-MM-dd'T'HH:mm:ss'Z' +// * InstalledTime: The time the association, patch, or custom compliance +// item was applied to the resource. Specify the time by using the following +// format: yyyy-MM-dd'T'HH:mm:ss'Z' func (c *Client) PutComplianceItems(ctx context.Context, params *PutComplianceItemsInput, optFns ...func(*Options)) (*PutComplianceItemsOutput, error) { if params == nil { params = &PutComplianceItemsInput{} diff --git a/service/ssm/api_op_PutParameter.go b/service/ssm/api_op_PutParameter.go index 0da23056a0b..74b83a03b3d 100644 --- a/service/ssm/api_op_PutParameter.go +++ b/service/ssm/api_op_PutParameter.go @@ -35,26 +35,26 @@ type PutParameterInput struct { // slash character (/) when you create or reference a parameter. For example: // /Dev/DBServer/MySQL/db-string13 Naming Constraints: // - // * Parameter names are - // case sensitive. + // * Parameter names are case + // sensitive. // - // * A parameter name must be unique within an AWS Region + // * A parameter name must be unique within an AWS Region // + // * A + // parameter name can't be prefixed with "aws" or "ssm" (case-insensitive). // - // * A parameter name can't be prefixed with "aws" or "ssm" (case-insensitive). - // - // - // * Parameter names can include only the following symbols and letters: + // * + // Parameter names can include only the following symbols and letters: // a-zA-Z0-9_.-/ // - // * A parameter name can't include spaces. + // * A parameter name can't include spaces. // - // * Parameter - // hierarchies are limited to a maximum depth of fifteen levels. + // * Parameter hierarchies + // are limited to a maximum depth of fifteen levels. // - // For additional - // information about valid values for parameter names, see About requirements and - // constraints for parameter names + // For additional information + // about valid values for parameter names, see About requirements and constraints + // for parameter names // (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) // in the AWS Systems Manager User Guide. The maximum length constraint listed // below includes capacity for additional system attributes that are not part of @@ -82,10 +82,10 @@ type PutParameterInput struct { // The data type for a String parameter. Supported data types include plain text // and Amazon Machine Image IDs. The following data type values are supported. // + // * + // text // - // * text - // - // * aws:ec2:image + // * aws:ec2:image // // When you create a String parameter and specify // aws:ec2:image, Systems Manager validates the parameter value is in the required @@ -106,13 +106,13 @@ type PutParameterInput struct { // type. If you don't specify a key ID, the system uses the default key associated // with your AWS account. // - // * To use your default AWS KMS key, choose the + // * To use your default AWS KMS key, choose the // SecureString data type, and do not specify the Key ID when you create the // parameter. The system automatically populates Key ID with your default KMS // key. // - // * To use a custom KMS key, choose the SecureString data type with the - // Key ID parameter. + // * To use a custom KMS key, choose the SecureString data type with the Key + // ID parameter. KeyId *string // Overwrite an existing parameter. If not specified, will default to "false". @@ -143,15 +143,15 @@ type PutParameterInput struct { // data referenced by the parameter. In this case, you could specify the following // key name/value pairs: // - // * Key=Resource,Value=S3bucket + // * Key=Resource,Value=S3bucket // - // * - // Key=OS,Value=Windows + // * Key=OS,Value=Windows // - // * Key=ParameterType,Value=LicenseKey + // * + // Key=ParameterType,Value=LicenseKey // - // To add tags to - // an existing Systems Manager parameter, use the AddTagsToResource action. + // To add tags to an existing Systems Manager + // parameter, use the AddTagsToResource action. Tags []*types.Tag // The parameter tier to assign to a parameter. Parameter Store offers a standard @@ -181,13 +181,13 @@ type PutParameterInput struct { // begin using Parameter Store is the standard-parameter tier. If you use the // advanced-parameter tier, you can specify one of the following as the default: // - // - // * Advanced: With this option, Parameter Store evaluates all requests as advanced + // * + // Advanced: With this option, Parameter Store evaluates all requests as advanced // parameters. // - // * Intelligent-Tiering: With this option, Parameter Store - // evaluates each request to determine if the parameter is standard or advanced. If - // the request doesn't include any options that require an advanced parameter, the + // * Intelligent-Tiering: With this option, Parameter Store evaluates + // each request to determine if the parameter is standard or advanced. If the + // request doesn't include any options that require an advanced parameter, the // parameter is created in the standard-parameter tier. If one or more options // requiring an advanced parameter are included in the request, Parameter Store // create a parameter in the advanced-parameter tier. This approach helps control @@ -197,17 +197,17 @@ type PutParameterInput struct { // Options that require an advanced parameter // include the following: // - // * The content size of the parameter is more than 4 + // * The content size of the parameter is more than 4 // KB. // - // * The parameter uses a parameter policy. + // * The parameter uses a parameter policy. // - // * More than 10,000 - // parameters already exist in your AWS account in the current Region. + // * More than 10,000 parameters + // already exist in your AWS account in the current Region. // - // For more - // information about configuring the default tier option, see Specifying a default - // parameter tier + // For more information + // about configuring the default tier option, see Specifying a default parameter + // tier // (https://docs.aws.amazon.com/systems-manager/latest/userguide/ps-default-tier.html) // in the AWS Systems Manager User Guide. Tier types.ParameterTier diff --git a/service/ssm/api_op_RegisterTaskWithMaintenanceWindow.go b/service/ssm/api_op_RegisterTaskWithMaintenanceWindow.go index 602f53915d8..49462b568ae 100644 --- a/service/ssm/api_op_RegisterTaskWithMaintenanceWindow.go +++ b/service/ssm/api_op_RegisterTaskWithMaintenanceWindow.go @@ -91,12 +91,12 @@ type RegisterTaskWithMaintenanceWindowInput struct { // RegisterTaskWithMaintenanceWindow. For more information, see the following // topics in the in the AWS Systems Manager User Guide: // - // * Using service-linked + // * Using service-linked // roles for Systems Manager // (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) // - // - // * Should I use a service-linked role or a custom service role to run maintenance + // * + // Should I use a service-linked role or a custom service role to run maintenance // window tasks? // (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string diff --git a/service/ssm/api_op_StartAutomationExecution.go b/service/ssm/api_op_StartAutomationExecution.go index c9e8bc6c1e8..b92def35a97 100644 --- a/service/ssm/api_op_StartAutomationExecution.go +++ b/service/ssm/api_op_StartAutomationExecution.go @@ -74,13 +74,13 @@ type StartAutomationExecutionInput struct { // might want to tag an automation to identify an environment or operating system. // In this case, you could specify the following key name/value pairs: // - // * + // * // Key=environment,Value=test // - // * Key=OS,Value=Windows + // * Key=OS,Value=Windows // - // To add tags to an - // existing patch baseline, use the AddTagsToResource action. + // To add tags to an existing + // patch baseline, use the AddTagsToResource action. Tags []*types.Tag // A location is a combination of AWS Regions and/or AWS accounts where you want to diff --git a/service/ssm/api_op_UpdateMaintenanceWindowTarget.go b/service/ssm/api_op_UpdateMaintenanceWindowTarget.go index 7070b367d7a..d0521f98a7d 100644 --- a/service/ssm/api_op_UpdateMaintenanceWindowTarget.go +++ b/service/ssm/api_op_UpdateMaintenanceWindowTarget.go @@ -14,23 +14,22 @@ import ( // Modifies the target of an existing maintenance window. You can change the // following: // -// * Name +// * Name // -// * Description +// * Description // -// * Owner +// * Owner // -// * IDs for an ID -// target +// * IDs for an ID target // -// * Tags for a Tag target +// * Tags for a +// Tag target // -// * From any supported tag type to -// another. The three supported tag types are ID target, Tag target, and resource -// group. For more information, see Target. +// * From any supported tag type to another. The three supported tag +// types are ID target, Tag target, and resource group. For more information, see +// Target. // -// If a parameter is null, then the -// corresponding field is not modified. +// If a parameter is null, then the corresponding field is not modified. func (c *Client) UpdateMaintenanceWindowTarget(ctx context.Context, params *UpdateMaintenanceWindowTargetInput, optFns ...func(*Options)) (*UpdateMaintenanceWindowTargetOutput, error) { if params == nil { params = &UpdateMaintenanceWindowTargetInput{} diff --git a/service/ssm/api_op_UpdateMaintenanceWindowTask.go b/service/ssm/api_op_UpdateMaintenanceWindowTask.go index 2820c42bfa8..62c3b6e21ab 100644 --- a/service/ssm/api_op_UpdateMaintenanceWindowTask.go +++ b/service/ssm/api_op_UpdateMaintenanceWindowTask.go @@ -14,33 +14,33 @@ import ( // Modifies a task assigned to a maintenance window. You can't change the task // type, but you can change the following values: // -// * TaskARN. For example, you -// can change a RUN_COMMAND task from AWS-RunPowerShellScript to -// AWS-RunShellScript. +// * TaskARN. For example, you can +// change a RUN_COMMAND task from AWS-RunPowerShellScript to AWS-RunShellScript. // -// * ServiceRoleArn +// * +// ServiceRoleArn // -// * TaskInvocationParameters +// * TaskInvocationParameters // -// * -// Priority +// * Priority // -// * MaxConcurrency +// * MaxConcurrency // -// * MaxErrors +// * +// MaxErrors // -// If the value for a parameter in -// UpdateMaintenanceWindowTask is null, then the corresponding field is not -// modified. If you set Replace to true, then all fields required by the -// RegisterTaskWithMaintenanceWindow action are required for this request. Optional -// fields that aren't specified are set to null. When you update a maintenance -// window task that has options specified in TaskInvocationParameters, you must -// provide again all the TaskInvocationParameters values that you want to retain. -// The values you do not specify again are removed. For example, suppose that when -// you registered a Run Command task, you specified TaskInvocationParameters values -// for Comment, NotificationConfig, and OutputS3BucketName. If you update the -// maintenance window task and specify only a different OutputS3BucketName value, -// the values for Comment and NotificationConfig are removed. +// If the value for a parameter in UpdateMaintenanceWindowTask is null, +// then the corresponding field is not modified. If you set Replace to true, then +// all fields required by the RegisterTaskWithMaintenanceWindow action are required +// for this request. Optional fields that aren't specified are set to null. When +// you update a maintenance window task that has options specified in +// TaskInvocationParameters, you must provide again all the +// TaskInvocationParameters values that you want to retain. The values you do not +// specify again are removed. For example, suppose that when you registered a Run +// Command task, you specified TaskInvocationParameters values for Comment, +// NotificationConfig, and OutputS3BucketName. If you update the maintenance window +// task and specify only a different OutputS3BucketName value, the values for +// Comment and NotificationConfig are removed. func (c *Client) UpdateMaintenanceWindowTask(ctx context.Context, params *UpdateMaintenanceWindowTaskInput, optFns ...func(*Options)) (*UpdateMaintenanceWindowTaskOutput, error) { if params == nil { params = &UpdateMaintenanceWindowTaskInput{} @@ -106,12 +106,12 @@ type UpdateMaintenanceWindowTaskInput struct { // RegisterTaskWithMaintenanceWindow. For more information, see the following // topics in the in the AWS Systems Manager User Guide: // - // * Using service-linked + // * Using service-linked // roles for Systems Manager // (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) // - // - // * Should I use a service-linked role or a custom service role to run maintenance + // * + // Should I use a service-linked role or a custom service role to run maintenance // window tasks? // (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string diff --git a/service/ssm/api_op_UpdatePatchBaseline.go b/service/ssm/api_op_UpdatePatchBaseline.go index 282f796d55b..c5a018ca8c4 100644 --- a/service/ssm/api_op_UpdatePatchBaseline.go +++ b/service/ssm/api_op_UpdatePatchBaseline.go @@ -75,14 +75,14 @@ type UpdatePatchBaselineInput struct { // The action for Patch Manager to take on patches included in the RejectedPackages // list. // - // * ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is + // * ALLOW_AS_DEPENDENCY: A package in the Rejected patches list is // installed only if it is a dependency of another package. It is considered // compliant with the patch baseline, and its status is reported as InstalledOther. // This is the default action if no option is specified. // - // * BLOCK: Packages in - // the RejectedPatches list, and packages that include them as dependencies, are - // not installed under any circumstances. If a package was installed before it was + // * BLOCK: Packages in the + // RejectedPatches list, and packages that include them as dependencies, are not + // installed under any circumstances. If a package was installed before it was // added to the Rejected patches list, it is considered non-compliant with the // patch baseline, and its status is reported as InstalledRejected. RejectedPatchesAction types.PatchAction diff --git a/service/ssm/api_op_UpdateServiceSetting.go b/service/ssm/api_op_UpdateServiceSetting.go index 61b366d7628..5d58dcee3ea 100644 --- a/service/ssm/api_op_UpdateServiceSetting.go +++ b/service/ssm/api_op_UpdateServiceSetting.go @@ -44,13 +44,13 @@ type UpdateServiceSettingInput struct { // arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. // The setting ID can be one of the following. // - // * + // * // /ssm/parameter-store/default-parameter-tier // - // * + // * // /ssm/parameter-store/high-throughput-enabled // - // * + // * // /ssm/managed-instance/activation-tier // // This member is required. @@ -60,14 +60,14 @@ type UpdateServiceSettingInput struct { // /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be // one of the following. // - // * Standard + // * Standard // - // * Advanced + // * Advanced // - // * - // Intelligent-Tiering + // * Intelligent-Tiering // - // For the /ssm/parameter-store/high-throughput-enabled, and + // For the + // /ssm/parameter-store/high-throughput-enabled, and // /ssm/managed-instance/activation-tier setting IDs, the setting value can be true // or false. // diff --git a/service/ssm/types/enums.go b/service/ssm/types/enums.go index 62b3c9ecdea..fda3849439a 100644 --- a/service/ssm/types/enums.go +++ b/service/ssm/types/enums.go @@ -198,15 +198,15 @@ type AutomationExecutionFilterKey string // Enum values for AutomationExecutionFilterKey const ( - AutomationExecutionFilterKeyDocument_name_prefix AutomationExecutionFilterKey = "DocumentNamePrefix" - AutomationExecutionFilterKeyExecution_status AutomationExecutionFilterKey = "ExecutionStatus" - AutomationExecutionFilterKeyExecution_id AutomationExecutionFilterKey = "ExecutionId" - AutomationExecutionFilterKeyParent_execution_id AutomationExecutionFilterKey = "ParentExecutionId" - AutomationExecutionFilterKeyCurrent_action AutomationExecutionFilterKey = "CurrentAction" - AutomationExecutionFilterKeyStart_time_before AutomationExecutionFilterKey = "StartTimeBefore" - AutomationExecutionFilterKeyStart_time_after AutomationExecutionFilterKey = "StartTimeAfter" - AutomationExecutionFilterKeyAutomation_type AutomationExecutionFilterKey = "AutomationType" - AutomationExecutionFilterKeyTag_key AutomationExecutionFilterKey = "TagKey" + AutomationExecutionFilterKeyDocumentNamePrefix AutomationExecutionFilterKey = "DocumentNamePrefix" + AutomationExecutionFilterKeyExecutionStatus AutomationExecutionFilterKey = "ExecutionStatus" + AutomationExecutionFilterKeyExecutionId AutomationExecutionFilterKey = "ExecutionId" + AutomationExecutionFilterKeyParentExecutionId AutomationExecutionFilterKey = "ParentExecutionId" + AutomationExecutionFilterKeyCurrentAction AutomationExecutionFilterKey = "CurrentAction" + AutomationExecutionFilterKeyStartTimeBefore AutomationExecutionFilterKey = "StartTimeBefore" + AutomationExecutionFilterKeyStartTimeAfter AutomationExecutionFilterKey = "StartTimeAfter" + AutomationExecutionFilterKeyAutomationType AutomationExecutionFilterKey = "AutomationType" + AutomationExecutionFilterKeyTagKey AutomationExecutionFilterKey = "TagKey" ) // Values returns all known values for AutomationExecutionFilterKey. Note that this @@ -296,11 +296,11 @@ type CommandFilterKey string // Enum values for CommandFilterKey const ( - CommandFilterKeyInvoked_after CommandFilterKey = "InvokedAfter" - CommandFilterKeyInvoked_before CommandFilterKey = "InvokedBefore" - CommandFilterKeyStatus CommandFilterKey = "Status" - CommandFilterKeyExecution_stage CommandFilterKey = "ExecutionStage" - CommandFilterKeyDocument_name CommandFilterKey = "DocumentName" + CommandFilterKeyInvokedAfter CommandFilterKey = "InvokedAfter" + CommandFilterKeyInvokedBefore CommandFilterKey = "InvokedBefore" + CommandFilterKeyStatus CommandFilterKey = "Status" + CommandFilterKeyExecutionStage CommandFilterKey = "ExecutionStage" + CommandFilterKeyDocumentName CommandFilterKey = "DocumentName" ) // Values returns all known values for CommandFilterKey. Note that this can be @@ -320,14 +320,14 @@ type CommandInvocationStatus string // Enum values for CommandInvocationStatus const ( - CommandInvocationStatusPending CommandInvocationStatus = "Pending" - CommandInvocationStatusIn_progress CommandInvocationStatus = "InProgress" - CommandInvocationStatusDelayed CommandInvocationStatus = "Delayed" - CommandInvocationStatusSuccess CommandInvocationStatus = "Success" - CommandInvocationStatusCancelled CommandInvocationStatus = "Cancelled" - CommandInvocationStatusTimed_out CommandInvocationStatus = "TimedOut" - CommandInvocationStatusFailed CommandInvocationStatus = "Failed" - CommandInvocationStatusCancelling CommandInvocationStatus = "Cancelling" + CommandInvocationStatusPending CommandInvocationStatus = "Pending" + CommandInvocationStatusInProgress CommandInvocationStatus = "InProgress" + CommandInvocationStatusDelayed CommandInvocationStatus = "Delayed" + CommandInvocationStatusSuccess CommandInvocationStatus = "Success" + CommandInvocationStatusCancelled CommandInvocationStatus = "Cancelled" + CommandInvocationStatusTimedOut CommandInvocationStatus = "TimedOut" + CommandInvocationStatusFailed CommandInvocationStatus = "Failed" + CommandInvocationStatusCancelling CommandInvocationStatus = "Cancelling" ) // Values returns all known values for CommandInvocationStatus. Note that this can @@ -350,12 +350,12 @@ type CommandPluginStatus string // Enum values for CommandPluginStatus const ( - CommandPluginStatusPending CommandPluginStatus = "Pending" - CommandPluginStatusIn_progress CommandPluginStatus = "InProgress" - CommandPluginStatusSuccess CommandPluginStatus = "Success" - CommandPluginStatusTimed_out CommandPluginStatus = "TimedOut" - CommandPluginStatusCancelled CommandPluginStatus = "Cancelled" - CommandPluginStatusFailed CommandPluginStatus = "Failed" + CommandPluginStatusPending CommandPluginStatus = "Pending" + CommandPluginStatusInProgress CommandPluginStatus = "InProgress" + CommandPluginStatusSuccess CommandPluginStatus = "Success" + CommandPluginStatusTimedOut CommandPluginStatus = "TimedOut" + CommandPluginStatusCancelled CommandPluginStatus = "Cancelled" + CommandPluginStatusFailed CommandPluginStatus = "Failed" ) // Values returns all known values for CommandPluginStatus. Note that this can be @@ -376,13 +376,13 @@ type CommandStatus string // Enum values for CommandStatus const ( - CommandStatusPending CommandStatus = "Pending" - CommandStatusIn_progress CommandStatus = "InProgress" - CommandStatusSuccess CommandStatus = "Success" - CommandStatusCancelled CommandStatus = "Cancelled" - CommandStatusFailed CommandStatus = "Failed" - CommandStatusTimed_out CommandStatus = "TimedOut" - CommandStatusCancelling CommandStatus = "Cancelling" + CommandStatusPending CommandStatus = "Pending" + CommandStatusInProgress CommandStatus = "InProgress" + CommandStatusSuccess CommandStatus = "Success" + CommandStatusCancelled CommandStatus = "Cancelled" + CommandStatusFailed CommandStatus = "Failed" + CommandStatusTimedOut CommandStatus = "TimedOut" + CommandStatusCancelling CommandStatus = "Cancelling" ) // Values returns all known values for CommandStatus. Note that this can be @@ -490,8 +490,8 @@ type ConnectionStatus string // Enum values for ConnectionStatus const ( - ConnectionStatusConnected ConnectionStatus = "Connected" - ConnectionStatusNot_connected ConnectionStatus = "NotConnected" + ConnectionStatusConnected ConnectionStatus = "Connected" + ConnectionStatusNotConnected ConnectionStatus = "NotConnected" ) // Values returns all known values for ConnectionStatus. Note that this can be @@ -508,9 +508,9 @@ type DescribeActivationsFilterKeys string // Enum values for DescribeActivationsFilterKeys const ( - DescribeActivationsFilterKeysActivation_ids DescribeActivationsFilterKeys = "ActivationIds" - DescribeActivationsFilterKeysDefault_instance_name DescribeActivationsFilterKeys = "DefaultInstanceName" - DescribeActivationsFilterKeysIam_role DescribeActivationsFilterKeys = "IamRole" + DescribeActivationsFilterKeysActivationIds DescribeActivationsFilterKeys = "ActivationIds" + DescribeActivationsFilterKeysDefaultInstanceName DescribeActivationsFilterKeys = "DefaultInstanceName" + DescribeActivationsFilterKeysIamRole DescribeActivationsFilterKeys = "IamRole" ) // Values returns all known values for DescribeActivationsFilterKeys. Note that @@ -717,14 +717,14 @@ type InstanceInformationFilterKey string // Enum values for InstanceInformationFilterKey const ( - InstanceInformationFilterKeyInstance_ids InstanceInformationFilterKey = "InstanceIds" - InstanceInformationFilterKeyAgent_version InstanceInformationFilterKey = "AgentVersion" - InstanceInformationFilterKeyPing_status InstanceInformationFilterKey = "PingStatus" - InstanceInformationFilterKeyPlatform_types InstanceInformationFilterKey = "PlatformTypes" - InstanceInformationFilterKeyActivation_ids InstanceInformationFilterKey = "ActivationIds" - InstanceInformationFilterKeyIam_role InstanceInformationFilterKey = "IamRole" - InstanceInformationFilterKeyResource_type InstanceInformationFilterKey = "ResourceType" - InstanceInformationFilterKeyAssociation_status InstanceInformationFilterKey = "AssociationStatus" + InstanceInformationFilterKeyInstanceIds InstanceInformationFilterKey = "InstanceIds" + InstanceInformationFilterKeyAgentVersion InstanceInformationFilterKey = "AgentVersion" + InstanceInformationFilterKeyPingStatus InstanceInformationFilterKey = "PingStatus" + InstanceInformationFilterKeyPlatformTypes InstanceInformationFilterKey = "PlatformTypes" + InstanceInformationFilterKeyActivationIds InstanceInformationFilterKey = "ActivationIds" + InstanceInformationFilterKeyIamRole InstanceInformationFilterKey = "IamRole" + InstanceInformationFilterKeyResourceType InstanceInformationFilterKey = "ResourceType" + InstanceInformationFilterKeyAssociationStatus InstanceInformationFilterKey = "AssociationStatus" ) // Values returns all known values for InstanceInformationFilterKey. Note that this @@ -747,10 +747,10 @@ type InstancePatchStateOperatorType string // Enum values for InstancePatchStateOperatorType const ( - InstancePatchStateOperatorTypeEqual InstancePatchStateOperatorType = "Equal" - InstancePatchStateOperatorTypeNot_equal InstancePatchStateOperatorType = "NotEqual" - InstancePatchStateOperatorTypeLess_than InstancePatchStateOperatorType = "LessThan" - InstancePatchStateOperatorTypeGreater_than InstancePatchStateOperatorType = "GreaterThan" + InstancePatchStateOperatorTypeEqual InstancePatchStateOperatorType = "Equal" + InstancePatchStateOperatorTypeNotEqual InstancePatchStateOperatorType = "NotEqual" + InstancePatchStateOperatorTypeLessThan InstancePatchStateOperatorType = "LessThan" + InstancePatchStateOperatorTypeGreaterThan InstancePatchStateOperatorType = "GreaterThan" ) // Values returns all known values for InstancePatchStateOperatorType. Note that @@ -788,8 +788,8 @@ type InventoryDeletionStatus string // Enum values for InventoryDeletionStatus const ( - InventoryDeletionStatusIn_progress InventoryDeletionStatus = "InProgress" - InventoryDeletionStatusComplete InventoryDeletionStatus = "Complete" + InventoryDeletionStatusInProgress InventoryDeletionStatus = "InProgress" + InventoryDeletionStatusComplete InventoryDeletionStatus = "Complete" ) // Values returns all known values for InventoryDeletionStatus. Note that this can @@ -806,12 +806,12 @@ type InventoryQueryOperatorType string // Enum values for InventoryQueryOperatorType const ( - InventoryQueryOperatorTypeEqual InventoryQueryOperatorType = "Equal" - InventoryQueryOperatorTypeNot_equal InventoryQueryOperatorType = "NotEqual" - InventoryQueryOperatorTypeBegin_with InventoryQueryOperatorType = "BeginWith" - InventoryQueryOperatorTypeLess_than InventoryQueryOperatorType = "LessThan" - InventoryQueryOperatorTypeGreater_than InventoryQueryOperatorType = "GreaterThan" - InventoryQueryOperatorTypeExists InventoryQueryOperatorType = "Exists" + InventoryQueryOperatorTypeEqual InventoryQueryOperatorType = "Equal" + InventoryQueryOperatorTypeNotEqual InventoryQueryOperatorType = "NotEqual" + InventoryQueryOperatorTypeBeginWith InventoryQueryOperatorType = "BeginWith" + InventoryQueryOperatorTypeLessThan InventoryQueryOperatorType = "LessThan" + InventoryQueryOperatorTypeGreaterThan InventoryQueryOperatorType = "GreaterThan" + InventoryQueryOperatorTypeExists InventoryQueryOperatorType = "Exists" ) // Values returns all known values for InventoryQueryOperatorType. Note that this @@ -832,8 +832,8 @@ type InventorySchemaDeleteOption string // Enum values for InventorySchemaDeleteOption const ( - InventorySchemaDeleteOptionDisable_schema InventorySchemaDeleteOption = "DisableSchema" - InventorySchemaDeleteOptionDelete_schema InventorySchemaDeleteOption = "DeleteSchema" + InventorySchemaDeleteOptionDisableSchema InventorySchemaDeleteOption = "DisableSchema" + InventorySchemaDeleteOptionDeleteSchema InventorySchemaDeleteOption = "DeleteSchema" ) // Values returns all known values for InventorySchemaDeleteOption. Note that this @@ -942,12 +942,12 @@ type NotificationEvent string // Enum values for NotificationEvent const ( - NotificationEventAll NotificationEvent = "All" - NotificationEventIn_progress NotificationEvent = "InProgress" - NotificationEventSuccess NotificationEvent = "Success" - NotificationEventTimed_out NotificationEvent = "TimedOut" - NotificationEventCancelled NotificationEvent = "Cancelled" - NotificationEventFailed NotificationEvent = "Failed" + NotificationEventAll NotificationEvent = "All" + NotificationEventInProgress NotificationEvent = "InProgress" + NotificationEventSuccess NotificationEvent = "Success" + NotificationEventTimedOut NotificationEvent = "TimedOut" + NotificationEventCancelled NotificationEvent = "Cancelled" + NotificationEventFailed NotificationEvent = "Failed" ) // Values returns all known values for NotificationEvent. Note that this can be @@ -1018,12 +1018,12 @@ type OpsFilterOperatorType string // Enum values for OpsFilterOperatorType const ( - OpsFilterOperatorTypeEqual OpsFilterOperatorType = "Equal" - OpsFilterOperatorTypeNot_equal OpsFilterOperatorType = "NotEqual" - OpsFilterOperatorTypeBegin_with OpsFilterOperatorType = "BeginWith" - OpsFilterOperatorTypeLess_than OpsFilterOperatorType = "LessThan" - OpsFilterOperatorTypeGreater_than OpsFilterOperatorType = "GreaterThan" - OpsFilterOperatorTypeExists OpsFilterOperatorType = "Exists" + OpsFilterOperatorTypeEqual OpsFilterOperatorType = "Equal" + OpsFilterOperatorTypeNotEqual OpsFilterOperatorType = "NotEqual" + OpsFilterOperatorTypeBeginWith OpsFilterOperatorType = "BeginWith" + OpsFilterOperatorTypeLessThan OpsFilterOperatorType = "LessThan" + OpsFilterOperatorTypeGreaterThan OpsFilterOperatorType = "GreaterThan" + OpsFilterOperatorTypeExists OpsFilterOperatorType = "Exists" ) // Values returns all known values for OpsFilterOperatorType. Note that this can be @@ -1044,8 +1044,8 @@ type OpsItemDataType string // Enum values for OpsItemDataType const ( - OpsItemDataTypeSearchable_string OpsItemDataType = "SearchableString" - OpsItemDataTypeString OpsItemDataType = "String" + OpsItemDataTypeSearchableString OpsItemDataType = "SearchableString" + OpsItemDataTypeString OpsItemDataType = "String" ) // Values returns all known values for OpsItemDataType. Note that this can be @@ -1062,21 +1062,21 @@ type OpsItemFilterKey string // Enum values for OpsItemFilterKey const ( - OpsItemFilterKeyStatus OpsItemFilterKey = "Status" - OpsItemFilterKeyCreated_by OpsItemFilterKey = "CreatedBy" - OpsItemFilterKeySource OpsItemFilterKey = "Source" - OpsItemFilterKeyPriority OpsItemFilterKey = "Priority" - OpsItemFilterKeyTitle OpsItemFilterKey = "Title" - OpsItemFilterKeyOpsitem_id OpsItemFilterKey = "OpsItemId" - OpsItemFilterKeyCreated_time OpsItemFilterKey = "CreatedTime" - OpsItemFilterKeyLast_modified_time OpsItemFilterKey = "LastModifiedTime" - OpsItemFilterKeyOperational_data OpsItemFilterKey = "OperationalData" - OpsItemFilterKeyOperational_data_key OpsItemFilterKey = "OperationalDataKey" - OpsItemFilterKeyOperational_data_value OpsItemFilterKey = "OperationalDataValue" - OpsItemFilterKeyResource_id OpsItemFilterKey = "ResourceId" - OpsItemFilterKeyAutomation_id OpsItemFilterKey = "AutomationId" - OpsItemFilterKeyCategory OpsItemFilterKey = "Category" - OpsItemFilterKeySeverity OpsItemFilterKey = "Severity" + OpsItemFilterKeyStatus OpsItemFilterKey = "Status" + OpsItemFilterKeyCreatedBy OpsItemFilterKey = "CreatedBy" + OpsItemFilterKeySource OpsItemFilterKey = "Source" + OpsItemFilterKeyPriority OpsItemFilterKey = "Priority" + OpsItemFilterKeyTitle OpsItemFilterKey = "Title" + OpsItemFilterKeyOpsitemId OpsItemFilterKey = "OpsItemId" + OpsItemFilterKeyCreatedTime OpsItemFilterKey = "CreatedTime" + OpsItemFilterKeyLastModifiedTime OpsItemFilterKey = "LastModifiedTime" + OpsItemFilterKeyOperationalData OpsItemFilterKey = "OperationalData" + OpsItemFilterKeyOperationalDataKey OpsItemFilterKey = "OperationalDataKey" + OpsItemFilterKeyOperationalDataValue OpsItemFilterKey = "OperationalDataValue" + OpsItemFilterKeyResourceId OpsItemFilterKey = "ResourceId" + OpsItemFilterKeyAutomationId OpsItemFilterKey = "AutomationId" + OpsItemFilterKeyCategory OpsItemFilterKey = "Category" + OpsItemFilterKeySeverity OpsItemFilterKey = "Severity" ) // Values returns all known values for OpsItemFilterKey. Note that this can be @@ -1106,10 +1106,10 @@ type OpsItemFilterOperator string // Enum values for OpsItemFilterOperator const ( - OpsItemFilterOperatorEqual OpsItemFilterOperator = "Equal" - OpsItemFilterOperatorContains OpsItemFilterOperator = "Contains" - OpsItemFilterOperatorGreater_than OpsItemFilterOperator = "GreaterThan" - OpsItemFilterOperatorLess_than OpsItemFilterOperator = "LessThan" + OpsItemFilterOperatorEqual OpsItemFilterOperator = "Equal" + OpsItemFilterOperatorContains OpsItemFilterOperator = "Contains" + OpsItemFilterOperatorGreaterThan OpsItemFilterOperator = "GreaterThan" + OpsItemFilterOperatorLessThan OpsItemFilterOperator = "LessThan" ) // Values returns all known values for OpsItemFilterOperator. Note that this can be @@ -1128,9 +1128,9 @@ type OpsItemStatus string // Enum values for OpsItemStatus const ( - OpsItemStatusOpen OpsItemStatus = "Open" - OpsItemStatusIn_progress OpsItemStatus = "InProgress" - OpsItemStatusResolved OpsItemStatus = "Resolved" + OpsItemStatusOpen OpsItemStatus = "Open" + OpsItemStatusInProgress OpsItemStatus = "InProgress" + OpsItemStatusResolved OpsItemStatus = "Resolved" ) // Values returns all known values for OpsItemStatus. Note that this can be @@ -1148,9 +1148,9 @@ type ParametersFilterKey string // Enum values for ParametersFilterKey const ( - ParametersFilterKeyName ParametersFilterKey = "Name" - ParametersFilterKeyType ParametersFilterKey = "Type" - ParametersFilterKeyKey_id ParametersFilterKey = "KeyId" + ParametersFilterKeyName ParametersFilterKey = "Name" + ParametersFilterKeyType ParametersFilterKey = "Type" + ParametersFilterKeyKeyId ParametersFilterKey = "KeyId" ) // Values returns all known values for ParametersFilterKey. Note that this can be @@ -1168,9 +1168,9 @@ type ParameterTier string // Enum values for ParameterTier const ( - ParameterTierStandard ParameterTier = "Standard" - ParameterTierAdvanced ParameterTier = "Advanced" - ParameterTierIntelligent_tiering ParameterTier = "Intelligent-Tiering" + ParameterTierStandard ParameterTier = "Standard" + ParameterTierAdvanced ParameterTier = "Advanced" + ParameterTierIntelligentTiering ParameterTier = "Intelligent-Tiering" ) // Values returns all known values for ParameterTier. Note that this can be @@ -1188,9 +1188,9 @@ type ParameterType string // Enum values for ParameterType const ( - ParameterTypeString ParameterType = "String" - ParameterTypeString_list ParameterType = "StringList" - ParameterTypeSecure_string ParameterType = "SecureString" + ParameterTypeString ParameterType = "String" + ParameterTypeStringList ParameterType = "StringList" + ParameterTypeSecureString ParameterType = "SecureString" ) // Values returns all known values for ParameterType. Note that this can be @@ -1416,9 +1416,9 @@ type PingStatus string // Enum values for PingStatus const ( - PingStatusOnline PingStatus = "Online" - PingStatusConnection_lost PingStatus = "ConnectionLost" - PingStatusInactive PingStatus = "Inactive" + PingStatusOnline PingStatus = "Online" + PingStatusConnectionLost PingStatus = "ConnectionLost" + PingStatusInactive PingStatus = "Inactive" ) // Values returns all known values for PingStatus. Note that this can be expanded @@ -1454,8 +1454,8 @@ type RebootOption string // Enum values for RebootOption const ( - RebootOptionReboot_if_needed RebootOption = "RebootIfNeeded" - RebootOptionNo_reboot RebootOption = "NoReboot" + RebootOptionRebootIfNeeded RebootOption = "RebootIfNeeded" + RebootOptionNoReboot RebootOption = "NoReboot" ) // Values returns all known values for RebootOption. Note that this can be expanded @@ -1472,7 +1472,7 @@ type ResourceDataSyncS3Format string // Enum values for ResourceDataSyncS3Format const ( - ResourceDataSyncS3FormatJson_serde ResourceDataSyncS3Format = "JsonSerDe" + ResourceDataSyncS3FormatJsonSerde ResourceDataSyncS3Format = "JsonSerDe" ) // Values returns all known values for ResourceDataSyncS3Format. Note that this can @@ -1488,9 +1488,9 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeManaged_instance ResourceType = "ManagedInstance" - ResourceTypeDocument ResourceType = "Document" - ResourceTypeEc2_instance ResourceType = "EC2Instance" + ResourceTypeManagedInstance ResourceType = "ManagedInstance" + ResourceTypeDocument ResourceType = "Document" + ResourceTypeEc2Instance ResourceType = "EC2Instance" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -1508,12 +1508,12 @@ type ResourceTypeForTagging string // Enum values for ResourceTypeForTagging const ( - ResourceTypeForTaggingDocument ResourceTypeForTagging = "Document" - ResourceTypeForTaggingManaged_instance ResourceTypeForTagging = "ManagedInstance" - ResourceTypeForTaggingMaintenance_window ResourceTypeForTagging = "MaintenanceWindow" - ResourceTypeForTaggingParameter ResourceTypeForTagging = "Parameter" - ResourceTypeForTaggingPatch_baseline ResourceTypeForTagging = "PatchBaseline" - ResourceTypeForTaggingOps_item ResourceTypeForTagging = "OpsItem" + ResourceTypeForTaggingDocument ResourceTypeForTagging = "Document" + ResourceTypeForTaggingManagedInstance ResourceTypeForTagging = "ManagedInstance" + ResourceTypeForTaggingMaintenanceWindow ResourceTypeForTagging = "MaintenanceWindow" + ResourceTypeForTaggingParameter ResourceTypeForTagging = "Parameter" + ResourceTypeForTaggingPatchBaseline ResourceTypeForTagging = "PatchBaseline" + ResourceTypeForTaggingOpsItem ResourceTypeForTagging = "OpsItem" ) // Values returns all known values for ResourceTypeForTagging. Note that this can @@ -1534,11 +1534,11 @@ type SessionFilterKey string // Enum values for SessionFilterKey const ( - SessionFilterKeyInvoked_after SessionFilterKey = "InvokedAfter" - SessionFilterKeyInvoked_before SessionFilterKey = "InvokedBefore" - SessionFilterKeyTarget_id SessionFilterKey = "Target" - SessionFilterKeyOwner SessionFilterKey = "Owner" - SessionFilterKeyStatus SessionFilterKey = "Status" + SessionFilterKeyInvokedAfter SessionFilterKey = "InvokedAfter" + SessionFilterKeyInvokedBefore SessionFilterKey = "InvokedBefore" + SessionFilterKeyTargetId SessionFilterKey = "Target" + SessionFilterKeyOwner SessionFilterKey = "Owner" + SessionFilterKeyStatus SessionFilterKey = "Status" ) // Values returns all known values for SessionFilterKey. Note that this can be @@ -1602,11 +1602,11 @@ type SignalType string // Enum values for SignalType const ( - SignalTypeApprove SignalType = "Approve" - SignalTypeReject SignalType = "Reject" - SignalTypeStart_step SignalType = "StartStep" - SignalTypeStop_step SignalType = "StopStep" - SignalTypeResume SignalType = "Resume" + SignalTypeApprove SignalType = "Approve" + SignalTypeReject SignalType = "Reject" + SignalTypeStartStep SignalType = "StartStep" + SignalTypeStopStep SignalType = "StopStep" + SignalTypeResume SignalType = "Resume" ) // Values returns all known values for SignalType. Note that this can be expanded @@ -1626,12 +1626,12 @@ type StepExecutionFilterKey string // Enum values for StepExecutionFilterKey const ( - StepExecutionFilterKeyStart_time_before StepExecutionFilterKey = "StartTimeBefore" - StepExecutionFilterKeyStart_time_after StepExecutionFilterKey = "StartTimeAfter" - StepExecutionFilterKeyStep_execution_status StepExecutionFilterKey = "StepExecutionStatus" - StepExecutionFilterKeyStep_execution_id StepExecutionFilterKey = "StepExecutionId" - StepExecutionFilterKeyStep_name StepExecutionFilterKey = "StepName" - StepExecutionFilterKeyAction StepExecutionFilterKey = "Action" + StepExecutionFilterKeyStartTimeBefore StepExecutionFilterKey = "StartTimeBefore" + StepExecutionFilterKeyStartTimeAfter StepExecutionFilterKey = "StartTimeAfter" + StepExecutionFilterKeyStepExecutionStatus StepExecutionFilterKey = "StepExecutionStatus" + StepExecutionFilterKeyStepExecutionId StepExecutionFilterKey = "StepExecutionId" + StepExecutionFilterKeyStepName StepExecutionFilterKey = "StepName" + StepExecutionFilterKeyAction StepExecutionFilterKey = "Action" ) // Values returns all known values for StepExecutionFilterKey. Note that this can diff --git a/service/ssm/types/types.go b/service/ssm/types/types.go index e4bd3147937..123f5a33762 100644 --- a/service/ssm/types/types.go +++ b/service/ssm/types/types.go @@ -457,15 +457,15 @@ type AttachmentsSource struct { // The value of a key-value pair that identifies the location of an attachment to a // document. The format for Value depends on the type of key you specify. // - // * - // For the key SourceUrl, the value is an S3 bucket location. For example: - // "Values": [ "s3://doc-example-bucket/my-folder" ] + // * For + // the key SourceUrl, the value is an S3 bucket location. For example: "Values": [ + // "s3://doc-example-bucket/my-folder" ] // - // * For the key S3FileUrl, - // the value is a file in an S3 bucket. For example: "Values": [ + // * For the key S3FileUrl, the value is a + // file in an S3 bucket. For example: "Values": [ // "s3://doc-example-bucket/my-folder/my-file.py" ] // - // * For the key + // * For the key // AttachmentReference, the value is constructed from the name of another SSM // document in your account, a version number of that document, and a file attached // to that document version that you want to reuse. For example: "Values": [ @@ -765,38 +765,38 @@ type Command struct { // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // - // * Pending: The command has not been sent to any instances. + // * Pending: The command has not been sent to any instances. // - // * - // In Progress: The command has been sent to at least one instance but has not - // reached a final state on all instances. + // * In + // Progress: The command has been sent to at least one instance but has not reached + // a final state on all instances. // - // * Success: The command successfully - // ran on all invocations. This is a terminal state. + // * Success: The command successfully ran on all + // invocations. This is a terminal state. // - // * Delivery Timed Out: The - // value of MaxErrors or more command invocations shows a status of Delivery Timed - // Out. This is a terminal state. + // * Delivery Timed Out: The value of + // MaxErrors or more command invocations shows a status of Delivery Timed Out. This + // is a terminal state. // - // * Execution Timed Out: The value of - // MaxErrors or more command invocations shows a status of Execution Timed Out. - // This is a terminal state. + // * Execution Timed Out: The value of MaxErrors or more + // command invocations shows a status of Execution Timed Out. This is a terminal + // state. // - // * Failed: The value of MaxErrors or more command - // invocations shows a status of Failed. This is a terminal state. + // * Failed: The value of MaxErrors or more command invocations shows a + // status of Failed. This is a terminal state. // - // * - // Incomplete: The command was attempted on all instances and one or more - // invocations does not have a value of Success but not enough invocations failed - // for the status to be Failed. This is a terminal state. + // * Incomplete: The command was + // attempted on all instances and one or more invocations does not have a value of + // Success but not enough invocations failed for the status to be Failed. This is a + // terminal state. // - // * Canceled: The - // command was terminated before it was completed. This is a terminal state. + // * Canceled: The command was terminated before it was completed. + // This is a terminal state. // - // * - // Rate Exceeded: The number of instances targeted by the command exceeded the - // account limit for pending invocations. The system has canceled the command - // before running it on any instance. This is a terminal state. + // * Rate Exceeded: The number of instances targeted by + // the command exceeded the account limit for pending invocations. The system has + // canceled the command before running it on any instance. This is a terminal + // state. StatusDetails *string // The number of targets for the command. @@ -822,48 +822,47 @@ type CommandFilter struct { // The filter value. Valid values for each filter key are as follows: // - // * + // * // InvokedAfter: Specify a timestamp to limit your results. For example, specify // 2018-07-07T00:00:00Z to see a list of command executions occurring July 7, 2018, // and later. // - // * InvokedBefore: Specify a timestamp to limit your results. For + // * InvokedBefore: Specify a timestamp to limit your results. For // example, specify 2018-07-07T00:00:00Z to see a list of command executions from // before July 7, 2018. // - // * Status: Specify a valid command status to see a list - // of all command executions with that status. Status values you can specify + // * Status: Specify a valid command status to see a list of + // all command executions with that status. Status values you can specify // include: // - // * Pending + // * Pending // - // * InProgress + // * InProgress // - // * Success + // * Success // - // * - // Cancelled + // * Cancelled // - // * Failed + // * Failed // - // * TimedOut + // * + // TimedOut // - // * Cancelling + // * Cancelling // - // * - // DocumentName: Specify name of the SSM document for which you want to see command - // execution results. For example, specify AWS-RunPatchBaseline to see command - // executions that used this SSM document to perform security patching operations - // on instances. + // * DocumentName: Specify name of the SSM document for + // which you want to see command execution results. For example, specify + // AWS-RunPatchBaseline to see command executions that used this SSM document to + // perform security patching operations on instances. // - // * ExecutionStage: Specify one of the following values: + // * ExecutionStage: Specify + // one of the following values: // + // * Executing: Returns a list of command executions + // that are currently still running. // - // * Executing: Returns a list of command executions that are currently still - // running. - // - // * Complete: Returns a list of command executions that have - // already completed. + // * Complete: Returns a list of command + // executions that have already completed. // // This member is required. Value *string @@ -937,43 +936,43 @@ type CommandInvocation struct { // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // - // * Pending: The command has not been sent to the instance. + // * Pending: The command has not been sent to the instance. // - // * In + // * In // Progress: The command has been sent to the instance but has not reached a // terminal state. // - // * Success: The execution of the command or plugin was + // * Success: The execution of the command or plugin was // successfully completed. This is a terminal state. // - // * Delivery Timed Out: The + // * Delivery Timed Out: The // command was not delivered to the instance before the delivery timeout expired. // Delivery timeouts do not count against the parent command's MaxErrors limit, but // they do contribute to whether the parent command status is Success or // Incomplete. This is a terminal state. // - // * Execution Timed Out: Command - // execution started on the instance, but the execution was not complete before the - // execution timeout expired. Execution timeouts count against the MaxErrors limit - // of the parent command. This is a terminal state. + // * Execution Timed Out: Command execution + // started on the instance, but the execution was not complete before the execution + // timeout expired. Execution timeouts count against the MaxErrors limit of the + // parent command. This is a terminal state. // - // * Failed: The command was - // not successful on the instance. For a plugin, this indicates that the result - // code was not zero. For a command invocation, this indicates that the result code - // for one or more plugins was not zero. Invocation failures count against the + // * Failed: The command was not + // successful on the instance. For a plugin, this indicates that the result code + // was not zero. For a command invocation, this indicates that the result code for + // one or more plugins was not zero. Invocation failures count against the // MaxErrors limit of the parent command. This is a terminal state. // - // * - // Canceled: The command was terminated before it was completed. This is a terminal - // state. + // * Canceled: + // The command was terminated before it was completed. This is a terminal state. // - // * Undeliverable: The command can't be delivered to the instance. The - // instance might not exist or might not be responding. Undeliverable invocations - // don't count against the parent command's MaxErrors limit and don't contribute to + // * + // Undeliverable: The command can't be delivered to the instance. The instance + // might not exist or might not be responding. Undeliverable invocations don't + // count against the parent command's MaxErrors limit and don't contribute to // whether the parent command status is Success or Incomplete. This is a terminal // state. // - // * Terminated: The parent command exceeded its MaxErrors limit and + // * Terminated: The parent command exceeded its MaxErrors limit and // subsequent command invocations were canceled by the system. This is a terminal // state. StatusDetails *string @@ -1047,45 +1046,45 @@ type CommandPlugin struct { // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // - // * Pending: The command has not been sent to the instance. + // * Pending: The command has not been sent to the instance. // - // * In + // * In // Progress: The command has been sent to the instance but has not reached a // terminal state. // - // * Success: The execution of the command or plugin was + // * Success: The execution of the command or plugin was // successfully completed. This is a terminal state. // - // * Delivery Timed Out: The + // * Delivery Timed Out: The // command was not delivered to the instance before the delivery timeout expired. // Delivery timeouts do not count against the parent command's MaxErrors limit, but // they do contribute to whether the parent command status is Success or // Incomplete. This is a terminal state. // - // * Execution Timed Out: Command - // execution started on the instance, but the execution was not complete before the - // execution timeout expired. Execution timeouts count against the MaxErrors limit - // of the parent command. This is a terminal state. + // * Execution Timed Out: Command execution + // started on the instance, but the execution was not complete before the execution + // timeout expired. Execution timeouts count against the MaxErrors limit of the + // parent command. This is a terminal state. // - // * Failed: The command was - // not successful on the instance. For a plugin, this indicates that the result - // code was not zero. For a command invocation, this indicates that the result code - // for one or more plugins was not zero. Invocation failures count against the + // * Failed: The command was not + // successful on the instance. For a plugin, this indicates that the result code + // was not zero. For a command invocation, this indicates that the result code for + // one or more plugins was not zero. Invocation failures count against the // MaxErrors limit of the parent command. This is a terminal state. // - // * - // Canceled: The command was terminated before it was completed. This is a terminal - // state. + // * Canceled: + // The command was terminated before it was completed. This is a terminal state. // - // * Undeliverable: The command can't be delivered to the instance. The - // instance might not exist, or it might not be responding. Undeliverable - // invocations don't count against the parent command's MaxErrors limit, and they - // don't contribute to whether the parent command status is Success or Incomplete. - // This is a terminal state. + // * + // Undeliverable: The command can't be delivered to the instance. The instance + // might not exist, or it might not be responding. Undeliverable invocations don't + // count against the parent command's MaxErrors limit, and they don't contribute to + // whether the parent command status is Success or Incomplete. This is a terminal + // state. // - // * Terminated: The parent command exceeded its - // MaxErrors limit and subsequent command invocations were canceled by the system. - // This is a terminal state. + // * Terminated: The parent command exceeded its MaxErrors limit and + // subsequent command invocations were canceled by the system. This is a terminal + // state. StatusDetails *string } @@ -1471,55 +1470,54 @@ type DocumentIdentifier struct { // You can also use AWS-provided keys, some of which have specific allowed values. // These keys and their associated values are as follows: DocumentType // -// * +// * // ApplicationConfiguration // -// * ApplicationConfigurationSchema +// * ApplicationConfigurationSchema // -// * -// Automation +// * Automation // -// * ChangeCalendar +// * +// ChangeCalendar // -// * Command +// * Command // -// * DeploymentStrategy +// * DeploymentStrategy // -// * -// Package +// * Package // -// * Policy +// * Policy // -// * Session +// * +// Session // -// Owner Note that only one Owner can be -// specified in a request. For example: Key=Owner,Values=Self. +// Owner Note that only one Owner can be specified in a request. For +// example: Key=Owner,Values=Self. // -// * Amazon +// * Amazon // -// * -// Private +// * Private // -// * Public +// * Public // -// * Self +// * Self // -// * ThirdParty +// * +// ThirdParty // // PlatformTypes // -// * -// Linux +// * Linux // -// * Windows +// * Windows // -// Name is another AWS-provided key. If you use Name as a -// key, you can use a name prefix to return a list of documents. For example, in -// the AWS CLI, to return a list of all documents that begin with Te, run the -// following command: aws ssm list-documents --filters Key=Name,Values=Te You can -// also use the TargetType AWS-provided key. For a list of valid resource type -// values that can be used with this key, see AWS resource and property types -// reference +// Name is another AWS-provided key. +// If you use Name as a key, you can use a name prefix to return a list of +// documents. For example, in the AWS CLI, to return a list of all documents that +// begin with Te, run the following command: aws ssm list-documents --filters +// Key=Name,Values=Te You can also use the TargetType AWS-provided key. For a list +// of valid resource type values that can be used with this key, see AWS resource +// and property types reference // (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide. If you specify more than two keys, only // documents that are identified by all the tags are returned in the results. If @@ -1946,14 +1944,14 @@ type InstancePatchState struct { // apply to Install operations only. Reboots are not attempted for Patch Manager // Scan operations. // - // * RebootIfNeeded: Patch Manager tries to reboot the - // instance if it installed any patches, or if any patches are detected with a - // status of InstalledPendingReboot. + // * RebootIfNeeded: Patch Manager tries to reboot the instance + // if it installed any patches, or if any patches are detected with a status of + // InstalledPendingReboot. // - // * NoReboot: Patch Manager attempts to - // install missing packages without trying to reboot the system. Patches installed - // with this option are assigned a status of InstalledPendingReboot. These patches - // might not be in effect until a reboot is performed. + // * NoReboot: Patch Manager attempts to install missing + // packages without trying to reboot the system. Patches installed with this option + // are assigned a status of InstalledPendingReboot. These patches might not be in + // effect until a reboot is performed. RebootOption RebootOption // The ID of the patch baseline snapshot used during the patching operation when @@ -3717,15 +3715,14 @@ type ServiceSetting struct { // The status of the service setting. The value can be Default, Customized or // PendingUpdate. // - // * Default: The current setting uses a default value - // provisioned by the AWS service team. + // * Default: The current setting uses a default value provisioned + // by the AWS service team. // - // * Customized: The current setting use - // a custom value specified by the customer. + // * Customized: The current setting use a custom value + // specified by the customer. // - // * PendingUpdate: The current - // setting uses a default or custom value, but a setting change request is pending - // approval. + // * PendingUpdate: The current setting uses a default + // or custom value, but a setting change request is pending approval. Status *string } @@ -3771,37 +3768,36 @@ type SessionFilter struct { // The filter value. Valid values for each filter key are as follows: // - // * + // * // InvokedAfter: Specify a timestamp to limit your results. For example, specify // 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later. // - // - // * InvokedBefore: Specify a timestamp to limit your results. For example, specify + // * + // InvokedBefore: Specify a timestamp to limit your results. For example, specify // 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018. // - // * + // * // Target: Specify an instance to which session connections have been made. // - // * + // * // Owner: Specify an AWS user account to see a list of sessions started by that // user. // - // * Status: Specify a valid session status to see a list of all - // sessions with that status. Status values you can specify include: + // * Status: Specify a valid session status to see a list of all sessions + // with that status. Status values you can specify include: // - // * - // Connected + // * Connected // - // * Connecting + // * + // Connecting // - // * Disconnected - // - // * Terminated + // * Disconnected // + // * Terminated // // * Terminating // - // * Failed + // * Failed // // This member is required. Value *string @@ -3967,59 +3963,58 @@ type Tag struct { // An array of search criteria that targets instances using a Key,Value combination // that you specify. Supported formats include the following. // -// * +// * // Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 // -// * +// * // Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 // -// * +// * // Key=tag-key,Values=my-tag-key-1,my-tag-key-2 // -// * Run Command and Maintenance +// * Run Command and Maintenance // window targets only: Key=resource-groups:Name,Values=resource-group-name // -// * +// * // Maintenance window targets only: // Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 // -// -// * Automation targets only: Key=ResourceGroup;Values=resource-group-name +// * +// Automation targets only: Key=ResourceGroup;Values=resource-group-name // // For // example: // -// * +// * // Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE // +// * +// Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3 // -// * Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3 -// -// * +// * // Key=tag-key,Values=Name,Instance-Type,CostCenter // -// * Run Command and -// Maintenance window targets only: -// Key=resource-groups:Name,Values=ProductionResourceGroup This example -// demonstrates how to target all resources in the resource group +// * Run Command and Maintenance +// window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup +// This example demonstrates how to target all resources in the resource group // ProductionResourceGroup in your maintenance window. // -// * Maintenance window +// * Maintenance window // targets only: // Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC // This example demonstrates how to target only EC2 instances and VPCs in your // maintenance window. // -// * Automation targets only: +// * Automation targets only: // Key=ResourceGroup,Values=MyResourceGroup // -// * State Manager association -// targets only: Key=InstanceIds,Values=* This example demonstrates how to target -// all managed instances in the AWS Region where the association was created. +// * State Manager association targets +// only: Key=InstanceIds,Values=* This example demonstrates how to target all +// managed instances in the AWS Region where the association was created. // -// For -// more information about how to send commands that target instances using -// Key,Value parameters, see Targeting multiple instances +// For more +// information about how to send commands that target instances using Key,Value +// parameters, see Targeting multiple instances // (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) // in the AWS Systems Manager User Guide. type Target struct { diff --git a/service/ssoadmin/types/enums.go b/service/ssoadmin/types/enums.go index 39048f32b3d..28eb8efced3 100644 --- a/service/ssoadmin/types/enums.go +++ b/service/ssoadmin/types/enums.go @@ -24,8 +24,8 @@ type ProvisioningStatus string // Enum values for ProvisioningStatus const ( - ProvisioningStatusLatest_permission_set_provisioned ProvisioningStatus = "LATEST_PERMISSION_SET_PROVISIONED" - ProvisioningStatusLatest_permission_set_not_provisioned ProvisioningStatus = "LATEST_PERMISSION_SET_NOT_PROVISIONED" + ProvisioningStatusLatestPermissionSetProvisioned ProvisioningStatus = "LATEST_PERMISSION_SET_PROVISIONED" + ProvisioningStatusLatestPermissionSetNotProvisioned ProvisioningStatus = "LATEST_PERMISSION_SET_NOT_PROVISIONED" ) // Values returns all known values for ProvisioningStatus. Note that this can be @@ -42,8 +42,8 @@ type ProvisionTargetType string // Enum values for ProvisionTargetType const ( - ProvisionTargetTypeAws_account ProvisionTargetType = "AWS_ACCOUNT" - ProvisionTargetTypeAll_provisioned_accounts ProvisionTargetType = "ALL_PROVISIONED_ACCOUNTS" + ProvisionTargetTypeAwsAccount ProvisionTargetType = "AWS_ACCOUNT" + ProvisionTargetTypeAllProvisionedAccounts ProvisionTargetType = "ALL_PROVISIONED_ACCOUNTS" ) // Values returns all known values for ProvisionTargetType. Note that this can be @@ -60,9 +60,9 @@ type StatusValues string // Enum values for StatusValues const ( - StatusValuesIn_progress StatusValues = "IN_PROGRESS" - StatusValuesFailed StatusValues = "FAILED" - StatusValuesSucceeded StatusValues = "SUCCEEDED" + StatusValuesInProgress StatusValues = "IN_PROGRESS" + StatusValuesFailed StatusValues = "FAILED" + StatusValuesSucceeded StatusValues = "SUCCEEDED" ) // Values returns all known values for StatusValues. Note that this can be expanded @@ -80,7 +80,7 @@ type TargetType string // Enum values for TargetType const ( - TargetTypeAws_account TargetType = "AWS_ACCOUNT" + TargetTypeAwsAccount TargetType = "AWS_ACCOUNT" ) // Values returns all known values for TargetType. Note that this can be expanded diff --git a/service/storagegateway/api_op_ActivateGateway.go b/service/storagegateway/api_op_ActivateGateway.go index 54ec5905175..c7687f0f18a 100644 --- a/service/storagegateway/api_op_ActivateGateway.go +++ b/service/storagegateway/api_op_ActivateGateway.go @@ -35,23 +35,22 @@ func (c *Client) ActivateGateway(ctx context.Context, params *ActivateGatewayInp // A JSON object containing one or more of the following fields: // -// * +// * // ActivateGatewayInput$ActivationKey // -// * ActivateGatewayInput$GatewayName +// * ActivateGatewayInput$GatewayName // +// * +// ActivateGatewayInput$GatewayRegion // -// * ActivateGatewayInput$GatewayRegion -// -// * -// ActivateGatewayInput$GatewayTimezone -// -// * ActivateGatewayInput$GatewayType +// * ActivateGatewayInput$GatewayTimezone // +// * +// ActivateGatewayInput$GatewayType // // * ActivateGatewayInput$MediumChangerType // -// * +// * // ActivateGatewayInput$TapeDriveType type ActivateGatewayInput struct { diff --git a/service/storagegateway/api_op_AddTagsToResource.go b/service/storagegateway/api_op_AddTagsToResource.go index bbc325f5067..6ee160b5f16 100644 --- a/service/storagegateway/api_op_AddTagsToResource.go +++ b/service/storagegateway/api_op_AddTagsToResource.go @@ -17,18 +17,18 @@ import ( // of a key and a value, which you define. You can add tags to the following AWS // Storage Gateway resources: // -// * Storage gateways of all types +// * Storage gateways of all types // -// * Storage +// * Storage // volumes // -// * Virtual tapes +// * Virtual tapes // -// * NFS and SMB file shares +// * NFS and SMB file shares // -// You can create a -// maximum of 50 tags for each resource. Virtual tapes and storage volumes that are -// recovered to a new gateway maintain their tags. +// You can create a maximum of +// 50 tags for each resource. Virtual tapes and storage volumes that are recovered +// to a new gateway maintain their tags. func (c *Client) AddTagsToResource(ctx context.Context, params *AddTagsToResourceInput, optFns ...func(*Options)) (*AddTagsToResourceOutput, error) { if params == nil { params = &AddTagsToResourceInput{} diff --git a/service/storagegateway/api_op_AddWorkingStorage.go b/service/storagegateway/api_op_AddWorkingStorage.go index 277830a7170..85d6c1765f8 100644 --- a/service/storagegateway/api_op_AddWorkingStorage.go +++ b/service/storagegateway/api_op_AddWorkingStorage.go @@ -35,7 +35,7 @@ func (c *Client) AddWorkingStorage(ctx context.Context, params *AddWorkingStorag // A JSON object containing one or more of the following fields: // -// * +// * // AddWorkingStorageInput$DiskIds type AddWorkingStorageInput struct { diff --git a/service/storagegateway/api_op_CreateNFSFileShare.go b/service/storagegateway/api_op_CreateNFSFileShare.go index 75a8aac704b..6cd8eab54fb 100644 --- a/service/storagegateway/api_op_CreateNFSFileShare.go +++ b/service/storagegateway/api_op_CreateNFSFileShare.go @@ -118,14 +118,13 @@ type CreateNFSFileShareInput struct { // A value that maps a user to anonymous user. Valid values are the following: // + // * + // RootSquash: Only root is mapped to anonymous user. // - // * RootSquash: Only root is mapped to anonymous user. + // * NoSquash: No one is mapped + // to anonymous user. // - // * NoSquash: No one is - // mapped to anonymous user. - // - // * AllSquash: Everyone is mapped to anonymous - // user. + // * AllSquash: Everyone is mapped to anonymous user. Squash *string // A list of up to 50 tags that can be assigned to the NFS file share. Each tag is diff --git a/service/storagegateway/api_op_CreateSnapshot.go b/service/storagegateway/api_op_CreateSnapshot.go index 60696b9b882..3515437f86f 100644 --- a/service/storagegateway/api_op_CreateSnapshot.go +++ b/service/storagegateway/api_op_CreateSnapshot.go @@ -53,10 +53,10 @@ func (c *Client) CreateSnapshot(ctx context.Context, params *CreateSnapshotInput // A JSON object containing one or more of the following fields: // -// * +// * // CreateSnapshotInput$SnapshotDescription // -// * CreateSnapshotInput$VolumeARN +// * CreateSnapshotInput$VolumeARN type CreateSnapshotInput struct { // Textual description of the snapshot that appears in the Amazon EC2 console, diff --git a/service/storagegateway/api_op_CreateStorediSCSIVolume.go b/service/storagegateway/api_op_CreateStorediSCSIVolume.go index 0e04c67809a..750dd034e74 100644 --- a/service/storagegateway/api_op_CreateStorediSCSIVolume.go +++ b/service/storagegateway/api_op_CreateStorediSCSIVolume.go @@ -37,19 +37,19 @@ func (c *Client) CreateStorediSCSIVolume(ctx context.Context, params *CreateStor // A JSON object containing one or more of the following fields: // -// * +// * // CreateStorediSCSIVolumeInput$DiskId // -// * +// * // CreateStorediSCSIVolumeInput$NetworkInterfaceId // -// * +// * // CreateStorediSCSIVolumeInput$PreserveExistingData // -// * +// * // CreateStorediSCSIVolumeInput$SnapshotId // -// * +// * // CreateStorediSCSIVolumeInput$TargetName type CreateStorediSCSIVolumeInput struct { diff --git a/service/storagegateway/api_op_DeleteBandwidthRateLimit.go b/service/storagegateway/api_op_DeleteBandwidthRateLimit.go index ea8d67c6fcc..ccc5011812d 100644 --- a/service/storagegateway/api_op_DeleteBandwidthRateLimit.go +++ b/service/storagegateway/api_op_DeleteBandwidthRateLimit.go @@ -33,7 +33,7 @@ func (c *Client) DeleteBandwidthRateLimit(ctx context.Context, params *DeleteBan // A JSON object containing the following fields: // -// * +// * // DeleteBandwidthRateLimitInput$BandwidthType type DeleteBandwidthRateLimitInput struct { diff --git a/service/storagegateway/api_op_DeleteChapCredentials.go b/service/storagegateway/api_op_DeleteChapCredentials.go index 339cf0b0eeb..ea5a4bad5ed 100644 --- a/service/storagegateway/api_op_DeleteChapCredentials.go +++ b/service/storagegateway/api_op_DeleteChapCredentials.go @@ -30,11 +30,10 @@ func (c *Client) DeleteChapCredentials(ctx context.Context, params *DeleteChapCr // A JSON object containing one or more of the following fields: // -// * +// * // DeleteChapCredentialsInput$InitiatorName // -// * -// DeleteChapCredentialsInput$TargetARN +// * DeleteChapCredentialsInput$TargetARN type DeleteChapCredentialsInput struct { // The iSCSI initiator that connects to the target. diff --git a/service/storagegateway/api_op_DescribeChapCredentials.go b/service/storagegateway/api_op_DescribeChapCredentials.go index 15d5ec21481..32dab0d686b 100644 --- a/service/storagegateway/api_op_DescribeChapCredentials.go +++ b/service/storagegateway/api_op_DescribeChapCredentials.go @@ -50,19 +50,19 @@ type DescribeChapCredentialsOutput struct { // CHAP credentials are set, an empty array is returned. CHAP credential // information is provided in a JSON object with the following fields: // - // * + // * // InitiatorName: The iSCSI initiator that connects to the target. // - // * + // * // SecretToAuthenticateInitiator: The secret key that the initiator (for example, // the Windows client) must provide to participate in mutual CHAP with the // target. // - // * SecretToAuthenticateTarget: The secret key that the target must + // * SecretToAuthenticateTarget: The secret key that the target must // provide to participate in mutual CHAP with the initiator (e.g. Windows // client). // - // * TargetARN: The Amazon Resource Name (ARN) of the storage volume. + // * TargetARN: The Amazon Resource Name (ARN) of the storage volume. ChapCredentials []*types.ChapInfo // Metadata pertaining to the operation's result. diff --git a/service/storagegateway/api_op_DescribeMaintenanceStartTime.go b/service/storagegateway/api_op_DescribeMaintenanceStartTime.go index 87d8a8670de..90f234e0733 100644 --- a/service/storagegateway/api_op_DescribeMaintenanceStartTime.go +++ b/service/storagegateway/api_op_DescribeMaintenanceStartTime.go @@ -39,19 +39,19 @@ type DescribeMaintenanceStartTimeInput struct { // A JSON object containing the following fields: // -// * +// * // DescribeMaintenanceStartTimeOutput$DayOfMonth // -// * +// * // DescribeMaintenanceStartTimeOutput$DayOfWeek // -// * +// * // DescribeMaintenanceStartTimeOutput$HourOfDay // -// * +// * // DescribeMaintenanceStartTimeOutput$MinuteOfHour // -// * +// * // DescribeMaintenanceStartTimeOutput$Timezone type DescribeMaintenanceStartTimeOutput struct { diff --git a/service/storagegateway/api_op_DescribeSMBSettings.go b/service/storagegateway/api_op_DescribeSMBSettings.go index 37b5839b35f..b325c763027 100644 --- a/service/storagegateway/api_op_DescribeSMBSettings.go +++ b/service/storagegateway/api_op_DescribeSMBSettings.go @@ -42,27 +42,27 @@ type DescribeSMBSettingsOutput struct { // Indicates the status of a gateway that is a member of the Active Directory // domain. // - // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due - // to an authentication error. + // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due to + // an authentication error. // - // * DETACHED: Indicates that gateway is not - // joined to a domain. + // * DETACHED: Indicates that gateway is not joined to a + // domain. // - // * JOINED: Indicates that the gateway has successfully - // joined a domain. + // * JOINED: Indicates that the gateway has successfully joined a + // domain. // - // * JOINING: Indicates that a JoinDomain operation is in - // progress. + // * JOINING: Indicates that a JoinDomain operation is in progress. // - // * NETWORK_ERROR: Indicates that JoinDomain operation failed due - // to a network or connectivity error. + // * + // NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or + // connectivity error. // - // * TIMEOUT: Indicates that the - // JoinDomain operation failed because the operation didn't complete within the - // allotted time. + // * TIMEOUT: Indicates that the JoinDomain operation failed + // because the operation didn't complete within the allotted time. // - // * UNKNOWN_ERROR: Indicates that the JoinDomain operation - // failed due to another type of error. + // * + // UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another + // type of error. ActiveDirectoryStatus types.ActiveDirectoryStatus // The name of the domain that the gateway is joined to. @@ -78,17 +78,17 @@ type DescribeSMBSettingsOutput struct { // The type of security strategy that was specified for file gateway. // - // * + // * // ClientSpecified: If you use this option, requests are established based on what // is negotiated by the client. This option is recommended when you want to // maximize compatibility across different clients in your environment. // - // * + // * // MandatorySigning: If you use this option, file gateway only allows connections // from SMBv2 or SMBv3 clients that have signing enabled. This option works with // SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. // - // * + // * // MandatoryEncryption: If you use this option, file gateway only allows // connections from SMBv3 clients that have encryption enabled. This option is // highly recommended for environments that handle sensitive data. This option diff --git a/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go b/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go index 5689c88be07..fb226ea3d38 100644 --- a/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go +++ b/service/storagegateway/api_op_DescribeStorediSCSIVolumes.go @@ -46,59 +46,59 @@ type DescribeStorediSCSIVolumesOutput struct { // Describes a single unit of output from DescribeStorediSCSIVolumes. The following // fields are returned: // - // * ChapEnabled: Indicates whether mutual CHAP is - // enabled for the iSCSI target. + // * ChapEnabled: Indicates whether mutual CHAP is enabled + // for the iSCSI target. // - // * LunNumber: The logical disk number. + // * LunNumber: The logical disk number. // - // * + // * // NetworkInterfaceId: The network interface ID of the stored volume that initiator // use to map the stored volume as an iSCSI target. // - // * NetworkInterfacePort: - // The port used to communicate with iSCSI targets. + // * NetworkInterfacePort: The + // port used to communicate with iSCSI targets. // - // * PreservedExistingData: - // Indicates when the stored volume was created, existing data on the underlying - // local disk was preserved. + // * PreservedExistingData: Indicates + // when the stored volume was created, existing data on the underlying local disk + // was preserved. // - // * SourceSnapshotId: If the stored volume was - // created from a snapshot, this field contains the snapshot ID used, e.g. - // snap-1122aabb. Otherwise, this field is not included. + // * SourceSnapshotId: If the stored volume was created from a + // snapshot, this field contains the snapshot ID used, e.g. snap-1122aabb. + // Otherwise, this field is not included. // - // * StorediSCSIVolumes: - // An array of StorediSCSIVolume objects where each object contains metadata about - // one stored volume. + // * StorediSCSIVolumes: An array of + // StorediSCSIVolume objects where each object contains metadata about one stored + // volume. // - // * TargetARN: The Amazon Resource Name (ARN) of the - // volume target. + // * TargetARN: The Amazon Resource Name (ARN) of the volume target. // - // * VolumeARN: The Amazon Resource Name (ARN) of the stored - // volume. + // * + // VolumeARN: The Amazon Resource Name (ARN) of the stored volume. // - // * VolumeDiskId: The disk ID of the local disk that was specified in - // the CreateStorediSCSIVolume operation. + // * VolumeDiskId: + // The disk ID of the local disk that was specified in the CreateStorediSCSIVolume + // operation. // - // * VolumeId: The unique identifier of - // the storage volume, e.g. vol-1122AABB. + // * VolumeId: The unique identifier of the storage volume, e.g. + // vol-1122AABB. // - // * VolumeiSCSIAttributes: An - // VolumeiSCSIAttributes object that represents a collection of iSCSI attributes - // for one stored volume. + // * VolumeiSCSIAttributes: An VolumeiSCSIAttributes object that + // represents a collection of iSCSI attributes for one stored volume. // - // * VolumeProgress: Represents the percentage complete - // if the volume is restoring or bootstrapping that represents the percent of data - // transferred. This field does not appear in the response if the stored volume is - // not restoring or bootstrapping. + // * + // VolumeProgress: Represents the percentage complete if the volume is restoring or + // bootstrapping that represents the percent of data transferred. This field does + // not appear in the response if the stored volume is not restoring or + // bootstrapping. // - // * VolumeSizeInBytes: The size of the volume - // in bytes. + // * VolumeSizeInBytes: The size of the volume in bytes. // - // * VolumeStatus: One of the VolumeStatus values that indicates the - // state of the volume. + // * + // VolumeStatus: One of the VolumeStatus values that indicates the state of the + // volume. // - // * VolumeType: One of the enumeration values describing - // the type of the volume. Currently, only STORED volumes are supported. + // * VolumeType: One of the enumeration values describing the type of the + // volume. Currently, only STORED volumes are supported. StorediSCSIVolumes []*types.StorediSCSIVolume // Metadata pertaining to the operation's result. diff --git a/service/storagegateway/api_op_JoinDomain.go b/service/storagegateway/api_op_JoinDomain.go index 405ca10454c..bf3edc437f2 100644 --- a/service/storagegateway/api_op_JoinDomain.go +++ b/service/storagegateway/api_op_JoinDomain.go @@ -77,27 +77,27 @@ type JoinDomainOutput struct { // Indicates the status of the gateway as a member of the Active Directory // domain. // - // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due - // to an authentication error. + // * ACCESS_DENIED: Indicates that the JoinDomain operation failed due to + // an authentication error. // - // * DETACHED: Indicates that gateway is not - // joined to a domain. + // * DETACHED: Indicates that gateway is not joined to a + // domain. // - // * JOINED: Indicates that the gateway has successfully - // joined a domain. + // * JOINED: Indicates that the gateway has successfully joined a + // domain. // - // * JOINING: Indicates that a JoinDomain operation is in - // progress. + // * JOINING: Indicates that a JoinDomain operation is in progress. // - // * NETWORK_ERROR: Indicates that JoinDomain operation failed due - // to a network or connectivity error. + // * + // NETWORK_ERROR: Indicates that JoinDomain operation failed due to a network or + // connectivity error. // - // * TIMEOUT: Indicates that the - // JoinDomain operation failed because the operation didn't complete within the - // allotted time. + // * TIMEOUT: Indicates that the JoinDomain operation failed + // because the operation didn't complete within the allotted time. // - // * UNKNOWN_ERROR: Indicates that the JoinDomain operation - // failed due to another type of error. + // * + // UNKNOWN_ERROR: Indicates that the JoinDomain operation failed due to another + // type of error. ActiveDirectoryStatus types.ActiveDirectoryStatus // The unique Amazon Resource Name (ARN) of the gateway that joined the domain. diff --git a/service/storagegateway/api_op_ListGateways.go b/service/storagegateway/api_op_ListGateways.go index f39619f3513..bcafa999a97 100644 --- a/service/storagegateway/api_op_ListGateways.go +++ b/service/storagegateway/api_op_ListGateways.go @@ -36,10 +36,10 @@ func (c *Client) ListGateways(ctx context.Context, params *ListGatewaysInput, op // A JSON object containing zero or more of the following fields: // -// * +// * // ListGatewaysInput$Limit // -// * ListGatewaysInput$Marker +// * ListGatewaysInput$Marker type ListGatewaysInput struct { // Specifies that the list of gateways returned be limited to the specified number diff --git a/service/storagegateway/api_op_ListLocalDisks.go b/service/storagegateway/api_op_ListLocalDisks.go index a3fa071ec41..9bc4e7751d6 100644 --- a/service/storagegateway/api_op_ListLocalDisks.go +++ b/service/storagegateway/api_op_ListLocalDisks.go @@ -48,7 +48,7 @@ type ListLocalDisksOutput struct { // A JSON object containing the following fields: // - // * ListLocalDisksOutput$Disks + // * ListLocalDisksOutput$Disks Disks []*types.Disk // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to diff --git a/service/storagegateway/api_op_ListTapes.go b/service/storagegateway/api_op_ListTapes.go index 1edebbcfcca..2b78aed61f7 100644 --- a/service/storagegateway/api_op_ListTapes.go +++ b/service/storagegateway/api_op_ListTapes.go @@ -38,12 +38,12 @@ func (c *Client) ListTapes(ctx context.Context, params *ListTapesInput, optFns . // A JSON object that contains one or more of the following fields: // -// * +// * // ListTapesInput$Limit // -// * ListTapesInput$Marker +// * ListTapesInput$Marker // -// * ListTapesInput$TapeARNs +// * ListTapesInput$TapeARNs type ListTapesInput struct { // An optional number limit for the tapes in the list returned by this call. @@ -60,10 +60,10 @@ type ListTapesInput struct { // A JSON object containing the following fields: // -// * ListTapesOutput$Marker +// * ListTapesOutput$Marker // -// -// * ListTapesOutput$VolumeInfos +// * +// ListTapesOutput$VolumeInfos type ListTapesOutput struct { // A string that indicates the position at which to begin returning the next list diff --git a/service/storagegateway/api_op_ListVolumes.go b/service/storagegateway/api_op_ListVolumes.go index d772f93ffb4..a142c7d5aec 100644 --- a/service/storagegateway/api_op_ListVolumes.go +++ b/service/storagegateway/api_op_ListVolumes.go @@ -38,10 +38,10 @@ func (c *Client) ListVolumes(ctx context.Context, params *ListVolumesInput, optF // A JSON object that contains one or more of the following fields: // -// * +// * // ListVolumesInput$Limit // -// * ListVolumesInput$Marker +// * ListVolumesInput$Marker type ListVolumesInput struct { // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to @@ -60,10 +60,10 @@ type ListVolumesInput struct { // A JSON object containing the following fields: // -// * ListVolumesOutput$Marker +// * ListVolumesOutput$Marker // -// -// * ListVolumesOutput$VolumeInfos +// * +// ListVolumesOutput$VolumeInfos type ListVolumesOutput struct { // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation to diff --git a/service/storagegateway/api_op_UpdateBandwidthRateLimit.go b/service/storagegateway/api_op_UpdateBandwidthRateLimit.go index 12290eb978a..6be02b4cb63 100644 --- a/service/storagegateway/api_op_UpdateBandwidthRateLimit.go +++ b/service/storagegateway/api_op_UpdateBandwidthRateLimit.go @@ -35,10 +35,10 @@ func (c *Client) UpdateBandwidthRateLimit(ctx context.Context, params *UpdateBan // A JSON object containing one or more of the following fields: // -// * +// * // UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec // -// * +// * // UpdateBandwidthRateLimitInput$AverageUploadRateLimitInBitsPerSec type UpdateBandwidthRateLimitInput struct { diff --git a/service/storagegateway/api_op_UpdateChapCredentials.go b/service/storagegateway/api_op_UpdateChapCredentials.go index 86dfafc362c..45dd3767248 100644 --- a/service/storagegateway/api_op_UpdateChapCredentials.go +++ b/service/storagegateway/api_op_UpdateChapCredentials.go @@ -33,16 +33,16 @@ func (c *Client) UpdateChapCredentials(ctx context.Context, params *UpdateChapCr // A JSON object containing one or more of the following fields: // -// * +// * // UpdateChapCredentialsInput$InitiatorName // -// * +// * // UpdateChapCredentialsInput$SecretToAuthenticateInitiator // -// * +// * // UpdateChapCredentialsInput$SecretToAuthenticateTarget // -// * +// * // UpdateChapCredentialsInput$TargetARN type UpdateChapCredentialsInput struct { diff --git a/service/storagegateway/api_op_UpdateMaintenanceStartTime.go b/service/storagegateway/api_op_UpdateMaintenanceStartTime.go index 6c6d7f078c9..bc6f0fcda2a 100644 --- a/service/storagegateway/api_op_UpdateMaintenanceStartTime.go +++ b/service/storagegateway/api_op_UpdateMaintenanceStartTime.go @@ -29,16 +29,16 @@ func (c *Client) UpdateMaintenanceStartTime(ctx context.Context, params *UpdateM // A JSON object containing the following fields: // -// * +// * // UpdateMaintenanceStartTimeInput$DayOfMonth // -// * +// * // UpdateMaintenanceStartTimeInput$DayOfWeek // -// * +// * // UpdateMaintenanceStartTimeInput$HourOfDay // -// * +// * // UpdateMaintenanceStartTimeInput$MinuteOfHour type UpdateMaintenanceStartTimeInput struct { diff --git a/service/storagegateway/api_op_UpdateNFSFileShare.go b/service/storagegateway/api_op_UpdateNFSFileShare.go index eb3f8b5e403..06d647807ec 100644 --- a/service/storagegateway/api_op_UpdateNFSFileShare.go +++ b/service/storagegateway/api_op_UpdateNFSFileShare.go @@ -15,21 +15,21 @@ import ( // in the file gateway type. To leave a file share field unchanged, set the // corresponding input field to null. Updates the following file share setting: // +// * +// Default storage class for your S3 bucket // -// * Default storage class for your S3 bucket -// -// * Metadata defaults for your S3 +// * Metadata defaults for your S3 // bucket // -// * Allowed NFS clients for your file share -// -// * Squash settings +// * Allowed NFS clients for your file share // +// * Squash settings // -// * Write status of your file share +// * Write +// status of your file share // -// To leave a file share field unchanged, set -// the corresponding input field to null. This operation is only supported in file +// To leave a file share field unchanged, set the +// corresponding input field to null. This operation is only supported in file // gateways. func (c *Client) UpdateNFSFileShare(ctx context.Context, params *UpdateNFSFileShareInput, optFns ...func(*Options)) (*UpdateNFSFileShareOutput, error) { if params == nil { @@ -107,14 +107,13 @@ type UpdateNFSFileShareInput struct { // The user mapped to anonymous user. Valid values are the following: // - // * + // * // RootSquash: Only root is mapped to anonymous user. // - // * NoSquash: No one is - // mapped to anonymous user. + // * NoSquash: No one is mapped + // to anonymous user. // - // * AllSquash: Everyone is mapped to anonymous - // user. + // * AllSquash: Everyone is mapped to anonymous user. Squash *string } diff --git a/service/storagegateway/api_op_UpdateSnapshotSchedule.go b/service/storagegateway/api_op_UpdateSnapshotSchedule.go index 8e0503fd4cf..6c6085f217a 100644 --- a/service/storagegateway/api_op_UpdateSnapshotSchedule.go +++ b/service/storagegateway/api_op_UpdateSnapshotSchedule.go @@ -36,16 +36,16 @@ func (c *Client) UpdateSnapshotSchedule(ctx context.Context, params *UpdateSnaps // A JSON object containing one or more of the following fields: // -// * +// * // UpdateSnapshotScheduleInput$Description // -// * +// * // UpdateSnapshotScheduleInput$RecurrenceInHours // -// * +// * // UpdateSnapshotScheduleInput$StartAt // -// * UpdateSnapshotScheduleInput$VolumeARN +// * UpdateSnapshotScheduleInput$VolumeARN type UpdateSnapshotScheduleInput struct { // Frequency of snapshots. Specify the number of hours between snapshots. diff --git a/service/storagegateway/doc.go b/service/storagegateway/doc.go index e5bbdb6af06..6549d7f69c4 100644 --- a/service/storagegateway/doc.go +++ b/service/storagegateway/doc.go @@ -11,28 +11,28 @@ // following links to get started using the AWS Storage Gateway Service API // Reference: // -// * AWS Storage Gateway required request headers +// * AWS Storage Gateway required request headers // (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewayHTTPRequestsHeaders): // Describes the required headers that you must send with every POST request to AWS // Storage Gateway. // -// * Signing requests +// * Signing requests // (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewaySigningRequests): // AWS Storage Gateway requires that you authenticate every request you send; this // topic describes how sign such a request. // -// * Error responses +// * Error responses // (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#APIErrorResponses): // Provides reference information about AWS Storage Gateway errors. // -// * -// Operations in AWS Storage Gateway +// * Operations +// in AWS Storage Gateway // (https://docs.aws.amazon.com/storagegateway/latest/APIReference/API_Operations.html): // Contains detailed descriptions of all AWS Storage Gateway operations, their // request parameters, response elements, possible errors, and examples of requests // and responses. // -// * AWS Storage Gateway endpoints and quotas +// * AWS Storage Gateway endpoints and quotas // (https://docs.aws.amazon.com/general/latest/gr/sg.html): Provides a list of each // AWS Region and the endpoints available for use with AWS Storage Gateway. // diff --git a/service/storagegateway/types/enums.go b/service/storagegateway/types/enums.go index 7f46df4a545..0ec818707c4 100644 --- a/service/storagegateway/types/enums.go +++ b/service/storagegateway/types/enums.go @@ -6,13 +6,13 @@ type ActiveDirectoryStatus string // Enum values for ActiveDirectoryStatus const ( - ActiveDirectoryStatusAccess_denied ActiveDirectoryStatus = "ACCESS_DENIED" - ActiveDirectoryStatusDetached ActiveDirectoryStatus = "DETACHED" - ActiveDirectoryStatusJoined ActiveDirectoryStatus = "JOINED" - ActiveDirectoryStatusJoining ActiveDirectoryStatus = "JOINING" - ActiveDirectoryStatusNetwork_error ActiveDirectoryStatus = "NETWORK_ERROR" - ActiveDirectoryStatusTimeout ActiveDirectoryStatus = "TIMEOUT" - ActiveDirectoryStatusUnknown_error ActiveDirectoryStatus = "UNKNOWN_ERROR" + ActiveDirectoryStatusAccessDenied ActiveDirectoryStatus = "ACCESS_DENIED" + ActiveDirectoryStatusDetached ActiveDirectoryStatus = "DETACHED" + ActiveDirectoryStatusJoined ActiveDirectoryStatus = "JOINED" + ActiveDirectoryStatusJoining ActiveDirectoryStatus = "JOINING" + ActiveDirectoryStatusNetworkError ActiveDirectoryStatus = "NETWORK_ERROR" + ActiveDirectoryStatusTimeout ActiveDirectoryStatus = "TIMEOUT" + ActiveDirectoryStatusUnknownError ActiveDirectoryStatus = "UNKNOWN_ERROR" ) // Values returns all known values for ActiveDirectoryStatus. Note that this can be @@ -339,8 +339,8 @@ type TapeStorageClass string // Enum values for TapeStorageClass const ( - TapeStorageClassDeep_archive TapeStorageClass = "DEEP_ARCHIVE" - TapeStorageClassGlacier TapeStorageClass = "GLACIER" + TapeStorageClassDeepArchive TapeStorageClass = "DEEP_ARCHIVE" + TapeStorageClassGlacier TapeStorageClass = "GLACIER" ) // Values returns all known values for TapeStorageClass. Note that this can be diff --git a/service/storagegateway/types/types.go b/service/storagegateway/types/types.go index 356e6fd233b..729b70c461c 100644 --- a/service/storagegateway/types/types.go +++ b/service/storagegateway/types/types.go @@ -381,14 +381,13 @@ type NFSFileShareInfo struct { // The user mapped to anonymous user. Valid options are the following: // - // * + // * // RootSquash: Only root is mapped to anonymous user. // - // * NoSquash: No one is - // mapped to anonymous user. + // * NoSquash: No one is mapped + // to anonymous user. // - // * AllSquash: Everyone is mapped to anonymous - // user. + // * AllSquash: Everyone is mapped to anonymous user. Squash *string // A list of up to 50 tags assigned to the NFS file share, sorted alphabetically by diff --git a/service/sts/api_op_AssumeRole.go b/service/sts/api_op_AssumeRole.go index 7d2603f2ef1..5870aa236ad 100644 --- a/service/sts/api_op_AssumeRole.go +++ b/service/sts/api_op_AssumeRole.go @@ -70,14 +70,14 @@ import ( // other account. If the user is in the same account as the role, then you can do // either of the following: // -// * Attach a policy to the user (identical to the +// * Attach a policy to the user (identical to the // previous user in a different account). // -// * Add the user as a principal -// directly in the role's trust policy. +// * Add the user as a principal directly +// in the role's trust policy. // -// In this case, the trust policy acts as an -// IAM resource-based policy. Users in the same account as the role do not need +// In this case, the trust policy acts as an IAM +// resource-based policy. Users in the same account as the role do not need // explicit permission to assume the role. For more information about trust // policies and resource-based policies, see IAM Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the diff --git a/service/sts/api_op_AssumeRoleWithSAML.go b/service/sts/api_op_AssumeRoleWithSAML.go index fdf94aaee1b..714d0743788 100644 --- a/service/sts/api_op_AssumeRoleWithSAML.go +++ b/service/sts/api_op_AssumeRoleWithSAML.go @@ -97,19 +97,19 @@ import ( // specifies this SAML provider in its trust policy. For more information, see the // following resources: // -// * About SAML 2.0-based Federation +// * About SAML 2.0-based Federation // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) // in the IAM User Guide. // -// * Creating SAML Identity Providers +// * Creating SAML Identity Providers // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) // in the IAM User Guide. // -// * Configuring a Relying Party and Claims +// * Configuring a Relying Party and Claims // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) // in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation +// * Creating a Role for SAML 2.0 Federation // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) // in the IAM User Guide. func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { diff --git a/service/sts/api_op_AssumeRoleWithWebIdentity.go b/service/sts/api_op_AssumeRoleWithWebIdentity.go index b6ef95aa465..ffcfd62c4a7 100644 --- a/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -109,27 +109,27 @@ import ( // information about how to use web identity federation and the // AssumeRoleWithWebIdentity API, see the following resources: // -// * Using Web +// * Using Web // Identity Federation API Operations for Mobile Apps // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) // and Federation Through a Web-based Identity Provider // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// -// * Web Identity Federation Playground +// * +// Web Identity Federation Playground // (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). // Walk through the process of authenticating through Login with Amazon, Facebook, // or Google, getting temporary security credentials, and then using those // credentials to make a request to AWS. // -// * AWS SDK for iOS Developer Guide +// * AWS SDK for iOS Developer Guide // (http://aws.amazon.com/sdkforios/) and AWS SDK for Android Developer Guide // (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps that // show how to invoke the identity providers. The toolkits then show how to use the // information from these providers to get and use temporary security // credentials. // -// * Web Identity Federation with Mobile Applications +// * Web Identity Federation with Mobile Applications // (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of how to // use web identity federation to get access to content in Amazon S3. diff --git a/service/sts/api_op_DecodeAuthorizationMessage.go b/service/sts/api_op_DecodeAuthorizationMessage.go index 45775e92cdf..5f30fa3dddc 100644 --- a/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/service/sts/api_op_DecodeAuthorizationMessage.go @@ -25,21 +25,21 @@ import ( // DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action. The decoded // message includes the following type of information: // -// * Whether the request -// was denied due to an explicit deny or due to the absence of an explicit allow. -// For more information, see Determining Whether a Request is Allowed or Denied +// * Whether the request was +// denied due to an explicit deny or due to the absence of an explicit allow. For +// more information, see Determining Whether a Request is Allowed or Denied // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) // in the IAM User Guide. // -// * The principal who made the request. +// * The principal who made the request. // -// * The -// requested action. +// * The requested +// action. // -// * The requested resource. +// * The requested resource. // -// * The values of condition -// keys in the context of the user's request. +// * The values of condition keys in the +// context of the user's request. func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} diff --git a/service/sts/api_op_GetFederationToken.go b/service/sts/api_op_GetFederationToken.go index c82dc0970b2..48c607c0730 100644 --- a/service/sts/api_op_GetFederationToken.go +++ b/service/sts/api_op_GetFederationToken.go @@ -45,10 +45,10 @@ import ( // can use the temporary credentials created by GetFederationToken in any AWS // service except the following: // -// * You cannot call any IAM operations using -// the AWS CLI or the AWS API. +// * You cannot call any IAM operations using the +// AWS CLI or the AWS API. // -// * You cannot call any STS operations except +// * You cannot call any STS operations except // GetCallerIdentity. // // You must pass an inline or managed session policy diff --git a/service/sts/api_op_GetSessionToken.go b/service/sts/api_op_GetSessionToken.go index ef5dbf42da3..a0b1c42138d 100644 --- a/service/sts/api_op_GetSessionToken.go +++ b/service/sts/api_op_GetSessionToken.go @@ -35,11 +35,11 @@ import ( // The temporary security credentials created by GetSessionToken can be used to // make API calls to any AWS service with the following exceptions: // -// * You -// cannot call any IAM API operations unless MFA authentication information is -// included in the request. +// * You cannot +// call any IAM API operations unless MFA authentication information is included in +// the request. // -// * You cannot call any STS API except AssumeRole or +// * You cannot call any STS API except AssumeRole or // GetCallerIdentity. // // We recommend that you do not call GetSessionToken with AWS diff --git a/service/support/api_op_AddAttachmentsToSet.go b/service/support/api_op_AddAttachmentsToSet.go index 1718cd078d8..6d52aea25ff 100644 --- a/service/support/api_op_AddAttachmentsToSet.go +++ b/service/support/api_op_AddAttachmentsToSet.go @@ -16,13 +16,13 @@ import ( // communication. The set is available for 1 hour after it's created. The // expiryTime returned in the response is when the set expires. // -// * You must -// have a Business or Enterprise support plan to use the AWS Support API. +// * You must have a +// Business or Enterprise support plan to use the AWS Support API. // -// * If -// you call the AWS Support API from an account that does not have a Business or -// Enterprise support plan, the SubscriptionRequiredException error message -// appears. For information about changing your support plan, see AWS Support +// * If you call +// the AWS Support API from an account that does not have a Business or Enterprise +// support plan, the SubscriptionRequiredException error message appears. For +// information about changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) AddAttachmentsToSet(ctx context.Context, params *AddAttachmentsToSetInput, optFns ...func(*Options)) (*AddAttachmentsToSetOutput, error) { if params == nil { diff --git a/service/support/api_op_AddCommunicationToCase.go b/service/support/api_op_AddCommunicationToCase.go index 83cdeb038b7..ae8662b41f8 100644 --- a/service/support/api_op_AddCommunicationToCase.go +++ b/service/support/api_op_AddCommunicationToCase.go @@ -13,16 +13,17 @@ import ( // Adds additional customer communication to an AWS Support case. Use the caseId // parameter to identify the case to which to add communication. You can list a set // of email addresses to copy on the communication by using the ccEmailAddresses -// parameter. The communicationBody value contains the text of the communication. +// parameter. The communicationBody value contains the text of the +// communication. // +// * You must have a Business or Enterprise support plan to use the +// AWS Support API. // -// * You must have a Business or Enterprise support plan to use the AWS Support -// API. -// -// * If you call the AWS Support API from an account that does not have a -// Business or Enterprise support plan, the SubscriptionRequiredException error -// message appears. For information about changing your support plan, see AWS -// Support (http://aws.amazon.com/premiumsupport/). +// * If you call the AWS Support API from an account that does +// not have a Business or Enterprise support plan, the +// SubscriptionRequiredException error message appears. For information about +// changing your support plan, see AWS Support +// (http://aws.amazon.com/premiumsupport/). func (c *Client) AddCommunicationToCase(ctx context.Context, params *AddCommunicationToCaseInput, optFns ...func(*Options)) (*AddCommunicationToCaseOutput, error) { if params == nil { params = &AddCommunicationToCaseInput{} diff --git a/service/support/api_op_CreateCase.go b/service/support/api_op_CreateCase.go index e21f4df7161..d8abc14f83d 100644 --- a/service/support/api_op_CreateCase.go +++ b/service/support/api_op_CreateCase.go @@ -16,12 +16,11 @@ import ( // API doesn't support requesting service limit increases. You can submit a service // limit increase in the following ways: // -// * Submit a request from the AWS -// Support Center Create Case -// (https://console.aws.amazon.com/support/home#/case/create) page. +// * Submit a request from the AWS Support +// Center Create Case (https://console.aws.amazon.com/support/home#/case/create) +// page. // -// * Use the -// Service Quotas RequestServiceQuotaIncrease +// * Use the Service Quotas RequestServiceQuotaIncrease // (https://docs.aws.amazon.com/servicequotas/2019-06-24/apireference/API_RequestServiceQuotaIncrease.html) // operation. // @@ -33,10 +32,10 @@ import ( // the AWS Support Center (https://console.aws.amazon.com/support). Use the // DescribeCases operation to get the displayId. // -// * You must have a Business or +// * You must have a Business or // Enterprise support plan to use the AWS Support API. // -// * If you call the AWS +// * If you call the AWS // Support API from an account that does not have a Business or Enterprise support // plan, the SubscriptionRequiredException error message appears. For information // about changing your support plan, see AWS Support diff --git a/service/support/api_op_DescribeAttachment.go b/service/support/api_op_DescribeAttachment.go index 7c4b21197f0..731736b4a94 100644 --- a/service/support/api_op_DescribeAttachment.go +++ b/service/support/api_op_DescribeAttachment.go @@ -17,12 +17,12 @@ import ( // or case communication. Attachment IDs are returned in the AttachmentDetails // objects that are returned by the DescribeCommunications operation. // -// * You -// must have a Business or Enterprise support plan to use the AWS Support API. +// * You must +// have a Business or Enterprise support plan to use the AWS Support API. // -// -// * If you call the AWS Support API from an account that does not have a Business -// or Enterprise support plan, the SubscriptionRequiredException error message +// * If you +// call the AWS Support API from an account that does not have a Business or +// Enterprise support plan, the SubscriptionRequiredException error message // appears. For information about changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeAttachment(ctx context.Context, params *DescribeAttachmentInput, optFns ...func(*Options)) (*DescribeAttachmentOutput, error) { diff --git a/service/support/api_op_DescribeCases.go b/service/support/api_op_DescribeCases.go index ccc70e8ced0..5695ed97378 100644 --- a/service/support/api_op_DescribeCases.go +++ b/service/support/api_op_DescribeCases.go @@ -17,24 +17,24 @@ import ( // to specify how much information to return. The response returns the following in // JSON format: // -// * One or more CaseDetails +// * One or more CaseDetails // (https://docs.aws.amazon.com/awssupport/latest/APIReference/API_CaseDetails.html) // data types. // -// * One or more nextToken values, which specify where to paginate -// the returned records represented by the CaseDetails objects. +// * One or more nextToken values, which specify where to paginate the +// returned records represented by the CaseDetails objects. // -// Case data is -// available for 12 months after creation. If a case was created more than 12 -// months ago, a request might return an error. +// Case data is available +// for 12 months after creation. If a case was created more than 12 months ago, a +// request might return an error. // -// * You must have a Business or -// Enterprise support plan to use the AWS Support API. +// * You must have a Business or Enterprise support +// plan to use the AWS Support API. // -// * If you call the AWS -// Support API from an account that does not have a Business or Enterprise support -// plan, the SubscriptionRequiredException error message appears. For information -// about changing your support plan, see AWS Support +// * If you call the AWS Support API from an +// account that does not have a Business or Enterprise support plan, the +// SubscriptionRequiredException error message appears. For information about +// changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeCases(ctx context.Context, params *DescribeCasesInput, optFns ...func(*Options)) (*DescribeCasesOutput, error) { if params == nil { diff --git a/service/support/api_op_DescribeCommunications.go b/service/support/api_op_DescribeCommunications.go index fb07197ad9c..012715a8d97 100644 --- a/service/support/api_op_DescribeCommunications.go +++ b/service/support/api_op_DescribeCommunications.go @@ -20,13 +20,13 @@ import ( // number of cases that you want to display on each page, and use nextToken to // specify the resumption of pagination. // -// * You must have a Business or -// Enterprise support plan to use the AWS Support API. +// * You must have a Business or Enterprise +// support plan to use the AWS Support API. // -// * If you call the AWS -// Support API from an account that does not have a Business or Enterprise support -// plan, the SubscriptionRequiredException error message appears. For information -// about changing your support plan, see AWS Support +// * If you call the AWS Support API from +// an account that does not have a Business or Enterprise support plan, the +// SubscriptionRequiredException error message appears. For information about +// changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeCommunications(ctx context.Context, params *DescribeCommunicationsInput, optFns ...func(*Options)) (*DescribeCommunicationsOutput, error) { if params == nil { diff --git a/service/support/api_op_DescribeServices.go b/service/support/api_op_DescribeServices.go index 85cfb976f68..9252179e401 100644 --- a/service/support/api_op_DescribeServices.go +++ b/service/support/api_op_DescribeServices.go @@ -22,11 +22,11 @@ import ( // that the DescribeServices operation returns, so that you have the most recent // set of service and category codes. // -// * You must have a Business or Enterprise +// * You must have a Business or Enterprise // support plan to use the AWS Support API. // -// * If you call the AWS Support API -// from an account that does not have a Business or Enterprise support plan, the +// * If you call the AWS Support API from +// an account that does not have a Business or Enterprise support plan, the // SubscriptionRequiredException error message appears. For information about // changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). diff --git a/service/support/api_op_DescribeSeverityLevels.go b/service/support/api_op_DescribeSeverityLevels.go index 065b9be8b8e..03186535df0 100644 --- a/service/support/api_op_DescribeSeverityLevels.go +++ b/service/support/api_op_DescribeSeverityLevels.go @@ -15,13 +15,13 @@ import ( // The severity level for a case is also a field in the CaseDetails data type that // you include for a CreateCase request. // -// * You must have a Business or -// Enterprise support plan to use the AWS Support API. +// * You must have a Business or Enterprise +// support plan to use the AWS Support API. // -// * If you call the AWS -// Support API from an account that does not have a Business or Enterprise support -// plan, the SubscriptionRequiredException error message appears. For information -// about changing your support plan, see AWS Support +// * If you call the AWS Support API from +// an account that does not have a Business or Enterprise support plan, the +// SubscriptionRequiredException error message appears. For information about +// changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeSeverityLevels(ctx context.Context, params *DescribeSeverityLevelsInput, optFns ...func(*Options)) (*DescribeSeverityLevelsOutput, error) { if params == nil { diff --git a/service/support/api_op_DescribeTrustedAdvisorCheckRefreshStatuses.go b/service/support/api_op_DescribeTrustedAdvisorCheckRefreshStatuses.go index 8b6ec93e1e4..9f1a4a77362 100644 --- a/service/support/api_op_DescribeTrustedAdvisorCheckRefreshStatuses.go +++ b/service/support/api_op_DescribeTrustedAdvisorCheckRefreshStatuses.go @@ -18,13 +18,13 @@ import ( // DescribeTrustedAdvisorCheckRefreshStatuses operation. If you call this operation // for these checks, you might see an InvalidParameterValue error. // -// * You must -// have a Business or Enterprise support plan to use the AWS Support API. +// * You must have +// a Business or Enterprise support plan to use the AWS Support API. // -// * If -// you call the AWS Support API from an account that does not have a Business or -// Enterprise support plan, the SubscriptionRequiredException error message -// appears. For information about changing your support plan, see AWS Support +// * If you call +// the AWS Support API from an account that does not have a Business or Enterprise +// support plan, the SubscriptionRequiredException error message appears. For +// information about changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeTrustedAdvisorCheckRefreshStatuses(ctx context.Context, params *DescribeTrustedAdvisorCheckRefreshStatusesInput, optFns ...func(*Options)) (*DescribeTrustedAdvisorCheckRefreshStatusesOutput, error) { if params == nil { diff --git a/service/support/api_op_DescribeTrustedAdvisorCheckResult.go b/service/support/api_op_DescribeTrustedAdvisorCheckResult.go index 25369d3c453..c514293a2ae 100644 --- a/service/support/api_op_DescribeTrustedAdvisorCheckResult.go +++ b/service/support/api_op_DescribeTrustedAdvisorCheckResult.go @@ -16,32 +16,32 @@ import ( // operation. The response contains a TrustedAdvisorCheckResult object, which // contains these three objects: // -// * TrustedAdvisorCategorySpecificSummary +// * TrustedAdvisorCategorySpecificSummary // +// * +// TrustedAdvisorResourceDetail // -// * TrustedAdvisorResourceDetail +// * TrustedAdvisorResourcesSummary // -// * TrustedAdvisorResourcesSummary +// In addition, the +// response contains these fields: // -// In -// addition, the response contains these fields: +// * status - The alert status of the check: "ok" +// (green), "warning" (yellow), "error" (red), or "not_available". // -// * status - The alert status -// of the check: "ok" (green), "warning" (yellow), "error" (red), or -// "not_available". +// * timestamp - +// The time of the last refresh of the check. // -// * timestamp - The time of the last refresh of the check. +// * checkId - The unique identifier +// for the check. // +// * You must have a Business or Enterprise support plan to use the +// AWS Support API. // -// * checkId - The unique identifier for the check. -// -// * You must have a Business -// or Enterprise support plan to use the AWS Support API. -// -// * If you call the -// AWS Support API from an account that does not have a Business or Enterprise -// support plan, the SubscriptionRequiredException error message appears. For -// information about changing your support plan, see AWS Support +// * If you call the AWS Support API from an account that does +// not have a Business or Enterprise support plan, the +// SubscriptionRequiredException error message appears. For information about +// changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeTrustedAdvisorCheckResult(ctx context.Context, params *DescribeTrustedAdvisorCheckResultInput, optFns ...func(*Options)) (*DescribeTrustedAdvisorCheckResultOutput, error) { if params == nil { diff --git a/service/support/api_op_DescribeTrustedAdvisorCheckSummaries.go b/service/support/api_op_DescribeTrustedAdvisorCheckSummaries.go index 0128b744e88..e7488da7f4a 100644 --- a/service/support/api_op_DescribeTrustedAdvisorCheckSummaries.go +++ b/service/support/api_op_DescribeTrustedAdvisorCheckSummaries.go @@ -16,13 +16,13 @@ import ( // DescribeTrustedAdvisorChecks operation. The response contains an array of // TrustedAdvisorCheckSummary objects. // -// * You must have a Business or -// Enterprise support plan to use the AWS Support API. +// * You must have a Business or Enterprise +// support plan to use the AWS Support API. // -// * If you call the AWS -// Support API from an account that does not have a Business or Enterprise support -// plan, the SubscriptionRequiredException error message appears. For information -// about changing your support plan, see AWS Support +// * If you call the AWS Support API from +// an account that does not have a Business or Enterprise support plan, the +// SubscriptionRequiredException error message appears. For information about +// changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). func (c *Client) DescribeTrustedAdvisorCheckSummaries(ctx context.Context, params *DescribeTrustedAdvisorCheckSummariesInput, optFns ...func(*Options)) (*DescribeTrustedAdvisorCheckSummariesOutput, error) { if params == nil { diff --git a/service/support/api_op_DescribeTrustedAdvisorChecks.go b/service/support/api_op_DescribeTrustedAdvisorChecks.go index 61b69b76cb2..0d6f50a2757 100644 --- a/service/support/api_op_DescribeTrustedAdvisorChecks.go +++ b/service/support/api_op_DescribeTrustedAdvisorChecks.go @@ -17,10 +17,10 @@ import ( // The response contains a TrustedAdvisorCheckDescription object for each check. // You must set the AWS Region to us-east-1. // -// * You must have a Business or +// * You must have a Business or // Enterprise support plan to use the AWS Support API. // -// * If you call the AWS +// * If you call the AWS // Support API from an account that does not have a Business or Enterprise support // plan, the SubscriptionRequiredException error message appears. For information // about changing your support plan, see AWS Support diff --git a/service/support/api_op_RefreshTrustedAdvisorCheck.go b/service/support/api_op_RefreshTrustedAdvisorCheck.go index 2162c1f2348..3da22d4d849 100644 --- a/service/support/api_op_RefreshTrustedAdvisorCheck.go +++ b/service/support/api_op_RefreshTrustedAdvisorCheck.go @@ -18,10 +18,10 @@ import ( // InvalidParameterValue error. The response contains a // TrustedAdvisorCheckRefreshStatus object. // -// * You must have a Business or +// * You must have a Business or // Enterprise support plan to use the AWS Support API. // -// * If you call the AWS +// * If you call the AWS // Support API from an account that does not have a Business or Enterprise support // plan, the SubscriptionRequiredException error message appears. For information // about changing your support plan, see AWS Support diff --git a/service/support/api_op_ResolveCase.go b/service/support/api_op_ResolveCase.go index af491280be0..f1a2079176a 100644 --- a/service/support/api_op_ResolveCase.go +++ b/service/support/api_op_ResolveCase.go @@ -13,11 +13,11 @@ import ( // Resolves a support case. This operation takes a caseId and returns the initial // and final state of the case. // -// * You must have a Business or Enterprise -// support plan to use the AWS Support API. +// * You must have a Business or Enterprise support +// plan to use the AWS Support API. // -// * If you call the AWS Support API -// from an account that does not have a Business or Enterprise support plan, the +// * If you call the AWS Support API from an +// account that does not have a Business or Enterprise support plan, the // SubscriptionRequiredException error message appears. For information about // changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). diff --git a/service/support/doc.go b/service/support/doc.go index 11732a572fb..ebc99f74133 100644 --- a/service/support/doc.go +++ b/service/support/doc.go @@ -8,13 +8,13 @@ // service enables you to manage your AWS Support cases programmatically. It uses // HTTP methods that return results in JSON format. // -// * You must have a Business -// or Enterprise support plan to use the AWS Support API. +// * You must have a Business or +// Enterprise support plan to use the AWS Support API. // -// * If you call the -// AWS Support API from an account that does not have a Business or Enterprise -// support plan, the SubscriptionRequiredException error message appears. For -// information about changing your support plan, see AWS Support +// * If you call the AWS +// Support API from an account that does not have a Business or Enterprise support +// plan, the SubscriptionRequiredException error message appears. For information +// about changing your support plan, see AWS Support // (http://aws.amazon.com/premiumsupport/). // // The AWS Support service also exposes a @@ -24,18 +24,18 @@ // checks to refresh, and get the refresh status of checks. The following list // describes the AWS Support case management operations: // -// * Service names, -// issue categories, and available severity levels. The DescribeServices and +// * Service names, issue +// categories, and available severity levels. The DescribeServices and // DescribeSeverityLevels operations return AWS service names, service codes, // service categories, and problem severity levels. You use these values when you // call the CreateCase operation. // -// * Case creation, case details, and case +// * Case creation, case details, and case // resolution. The CreateCase, DescribeCases, DescribeAttachment, and ResolveCase // operations create AWS Support cases, retrieve information about cases, and // resolve cases. // -// * Case communication. The DescribeCommunications, +// * Case communication. The DescribeCommunications, // AddCommunicationToCase, and AddAttachmentsToSet operations retrieve and add // communications and attachments to AWS Support cases. // @@ -43,26 +43,25 @@ // describes the operations available from the AWS Support service for Trusted // Advisor: // -// * DescribeTrustedAdvisorChecks returns the list of checks that run +// * DescribeTrustedAdvisorChecks returns the list of checks that run // against your AWS resources. // -// * Using the checkId for a specific check -// returned by DescribeTrustedAdvisorChecks, you can call -// DescribeTrustedAdvisorCheckResult to obtain the results for the check that you -// specified. +// * Using the checkId for a specific check returned +// by DescribeTrustedAdvisorChecks, you can call DescribeTrustedAdvisorCheckResult +// to obtain the results for the check that you specified. // -// * DescribeTrustedAdvisorCheckSummaries returns summarized -// results for one or more Trusted Advisor checks. +// * +// DescribeTrustedAdvisorCheckSummaries returns summarized results for one or more +// Trusted Advisor checks. // -// * -// RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a specified -// check. +// * RefreshTrustedAdvisorCheck requests that Trusted +// Advisor rerun a specified check. // -// * DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh -// status of one or more checks. +// * DescribeTrustedAdvisorCheckRefreshStatuses +// reports the refresh status of one or more checks. // -// For authentication of requests, AWS Support uses -// Signature Version 4 Signing Process +// For authentication of +// requests, AWS Support uses Signature Version 4 Signing Process // (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). See // About the AWS Support API // (https://docs.aws.amazon.com/awssupport/latest/user/Welcome.html) in the AWS diff --git a/service/support/types/types.go b/service/support/types/types.go index 7a1171f6978..062f85a9421 100644 --- a/service/support/types/types.go +++ b/service/support/types/types.go @@ -28,61 +28,60 @@ type AttachmentDetails struct { // contained in the response from a DescribeCases request. CaseDetails contains the // following fields: // -// * caseId. The AWS Support case ID requested or returned -// in the call. The case ID is an alphanumeric string formatted as shown in this +// * caseId. The AWS Support case ID requested or returned in +// the call. The case ID is an alphanumeric string formatted as shown in this // example: case-12345678910-2013-c4c1d2bf33c5cf47. // -// * categoryCode. The -// category of problem for the AWS Support case. Corresponds to the CategoryCode -// values returned by a call to DescribeServices. +// * categoryCode. The category +// of problem for the AWS Support case. Corresponds to the CategoryCode values +// returned by a call to DescribeServices. // -// * displayId. The identifier -// for the case on pages in the AWS Support Center. +// * displayId. The identifier for the +// case on pages in the AWS Support Center. // -// * language. The ISO 639-1 -// code for the language in which AWS provides support. AWS Support currently -// supports English ("en") and Japanese ("ja"). Language parameters must be passed -// explicitly for operations that take them. +// * language. The ISO 639-1 code for the +// language in which AWS provides support. AWS Support currently supports English +// ("en") and Japanese ("ja"). Language parameters must be passed explicitly for +// operations that take them. // -// * nextToken. A resumption point -// for pagination. +// * nextToken. A resumption point for pagination. // -// * recentCommunications. One or more Communication objects. -// Fields of these objects are attachments, body, caseId, submittedBy, and -// timeCreated. +// * +// recentCommunications. One or more Communication objects. Fields of these objects +// are attachments, body, caseId, submittedBy, and timeCreated. // -// * serviceCode. The identifier for the AWS service that -// corresponds to the service code defined in the call to DescribeServices. +// * serviceCode. The +// identifier for the AWS service that corresponds to the service code defined in +// the call to DescribeServices. // -// * -// severityCode. The severity code assigned to the case. Contains one of the values -// returned by the call to DescribeSeverityLevels. The possible values are: low, -// normal, high, urgent, and critical. +// * severityCode. The severity code assigned to the +// case. Contains one of the values returned by the call to DescribeSeverityLevels. +// The possible values are: low, normal, high, urgent, and critical. // -// * status. The status of the case in the -// AWS Support Center. Valid values: +// * status. The +// status of the case in the AWS Support Center. Valid values: // -// * opened +// * opened // -// * +// * // pending-customer-action // -// * reopened +// * reopened // -// * resolved +// * resolved // -// * -// unassigned +// * unassigned // -// * work-in-progress +// * +// work-in-progress // -// * subject. The subject line of the -// case. +// * subject. The subject line of the case. // -// * submittedBy. The email address of the account that submitted the -// case. +// * submittedBy. The +// email address of the account that submitted the case. // -// * timeCreated. The time the case was created, in ISO-8601 format. +// * timeCreated. The time +// the case was created, in ISO-8601 format. type CaseDetails struct { // The AWS Support case ID requested or returned in the call. The case ID is an @@ -119,19 +118,18 @@ type CaseDetails struct { // The status of the case. Valid values: // - // * opened + // * opened // - // * - // pending-customer-action + // * pending-customer-action // - // * reopened + // * + // reopened // - // * resolved + // * resolved // - // * unassigned + // * unassigned // - // * - // work-in-progress + // * work-in-progress Status *string // The subject line for the case in the AWS Support Center. @@ -226,20 +224,19 @@ type SeverityLevel struct { // name in the Support Center is "General guidance". These are the Support Center // code/name mappings: // - // * low: General guidance + // * low: General guidance // - // * normal: System - // impaired + // * normal: System impaired // - // * high: Production system impaired + // * high: + // Production system impaired // - // * urgent: Production - // system down + // * urgent: Production system down // - // * critical: Business-critical system down + // * critical: + // Business-critical system down // - // For more - // information, see Choosing a severity + // For more information, see Choosing a severity // (https://docs.aws.amazon.com/awssupport/latest/user/case-management.html#choosing-severity) // in the AWS Support User Guide. Name *string @@ -305,19 +302,19 @@ type TrustedAdvisorCheckRefreshStatus struct { // The status of the Trusted Advisor check for which a refresh has been // requested: // - // * none: The check is not refreshed or the non-success status - // exceeds the timeout + // * none: The check is not refreshed or the non-success status exceeds + // the timeout // - // * enqueued: The check refresh requests has entered the - // refresh queue + // * enqueued: The check refresh requests has entered the refresh + // queue // - // * processing: The check refresh request is picked up by the - // rule processing engine + // * processing: The check refresh request is picked up by the rule + // processing engine // - // * success: The check is successfully refreshed + // * success: The check is successfully refreshed // - // - // * abandoned: The check refresh has failed + // * abandoned: + // The check refresh has failed // // This member is required. Status *string diff --git a/service/swf/api_op_CountClosedWorkflowExecutions.go b/service/swf/api_op_CountClosedWorkflowExecutions.go index b678c2c7056..bd785a104c3 100644 --- a/service/swf/api_op_CountClosedWorkflowExecutions.go +++ b/service/swf/api_op_CountClosedWorkflowExecutions.go @@ -17,22 +17,22 @@ import ( // changes. Access Control You can use IAM policies to control this action's access // to Amazon SWF resources as follows: // -// * Use a Resource element with the -// domain name to limit the action to only specified domains. +// * Use a Resource element with the domain +// name to limit the action to only specified domains. // -// * Use an Action -// element to allow or deny permission to call this action. +// * Use an Action element to +// allow or deny permission to call this action. // -// * Constrain the -// following parameters by using a Condition element with the appropriate keys. +// * Constrain the following +// parameters by using a Condition element with the appropriate keys. // +// * +// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. // -// * tagFilter.tag: String constraint. The key is swf:tagFilter.tag. -// -// * +// * // typeFilter.name: String constraint. The key is swf:typeFilter.name. // -// * +// * // typeFilter.version: String constraint. The key is swf:typeFilter.version. // // If diff --git a/service/swf/api_op_CountOpenWorkflowExecutions.go b/service/swf/api_op_CountOpenWorkflowExecutions.go index 143f4e200d0..07167e0bae8 100644 --- a/service/swf/api_op_CountOpenWorkflowExecutions.go +++ b/service/swf/api_op_CountOpenWorkflowExecutions.go @@ -17,30 +17,29 @@ import ( // Access Control You can use IAM policies to control this action's access to // Amazon SWF resources as follows: // -// * Use a Resource element with the domain -// name to limit the action to only specified domains. +// * Use a Resource element with the domain name +// to limit the action to only specified domains. // -// * Use an Action element -// to allow or deny permission to call this action. +// * Use an Action element to allow +// or deny permission to call this action. // -// * Constrain the following -// parameters by using a Condition element with the appropriate keys. +// * Constrain the following parameters by +// using a Condition element with the appropriate keys. // -// * -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. +// * tagFilter.tag: String +// constraint. The key is swf:tagFilter.tag. // -// * -// typeFilter.name: String constraint. The key is swf:typeFilter.name. +// * typeFilter.name: String constraint. +// The key is swf:typeFilter.name. // -// * -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// * typeFilter.version: String constraint. The +// key is swf:typeFilter.version. // -// If -// the caller doesn't have sufficient permissions to invoke the action, or the -// parameter values fall outside the specified constraints, the action fails. The -// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) CountOpenWorkflowExecutions(ctx context.Context, params *CountOpenWorkflowExecutionsInput, optFns ...func(*Options)) (*CountOpenWorkflowExecutionsOutput, error) { diff --git a/service/swf/api_op_CountPendingActivityTasks.go b/service/swf/api_op_CountPendingActivityTasks.go index d37e1c9bbac..f8976206e10 100644 --- a/service/swf/api_op_CountPendingActivityTasks.go +++ b/service/swf/api_op_CountPendingActivityTasks.go @@ -17,22 +17,21 @@ import ( // returned. Access Control You can use IAM policies to control this action's // access to Amazon SWF resources as follows: // -// * Use a Resource element with -// the domain name to limit the action to only specified domains. +// * Use a Resource element with the +// domain name to limit the action to only specified domains. // -// * Use an -// Action element to allow or deny permission to call this action. +// * Use an Action +// element to allow or deny permission to call this action. // -// * Constrain -// the taskList.name parameter by using a Condition element with the -// swf:taskList.name key to allow the action to access only certain task lists. +// * Constrain the +// taskList.name parameter by using a Condition element with the swf:taskList.name +// key to allow the action to access only certain task lists. // -// If -// the caller doesn't have sufficient permissions to invoke the action, or the -// parameter values fall outside the specified constraints, the action fails. The -// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows +// If the caller +// doesn't have sufficient permissions to invoke the action, or the parameter +// values fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details +// and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) CountPendingActivityTasks(ctx context.Context, params *CountPendingActivityTasksInput, optFns ...func(*Options)) (*CountPendingActivityTasksOutput, error) { diff --git a/service/swf/api_op_CountPendingDecisionTasks.go b/service/swf/api_op_CountPendingDecisionTasks.go index c24d258aa1a..35fe6f816f4 100644 --- a/service/swf/api_op_CountPendingDecisionTasks.go +++ b/service/swf/api_op_CountPendingDecisionTasks.go @@ -17,22 +17,21 @@ import ( // returned. Access Control You can use IAM policies to control this action's // access to Amazon SWF resources as follows: // -// * Use a Resource element with -// the domain name to limit the action to only specified domains. +// * Use a Resource element with the +// domain name to limit the action to only specified domains. // -// * Use an -// Action element to allow or deny permission to call this action. +// * Use an Action +// element to allow or deny permission to call this action. // -// * Constrain -// the taskList.name parameter by using a Condition element with the -// swf:taskList.name key to allow the action to access only certain task lists. +// * Constrain the +// taskList.name parameter by using a Condition element with the swf:taskList.name +// key to allow the action to access only certain task lists. // -// If -// the caller doesn't have sufficient permissions to invoke the action, or the -// parameter values fall outside the specified constraints, the action fails. The -// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows +// If the caller +// doesn't have sufficient permissions to invoke the action, or the parameter +// values fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details +// and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) CountPendingDecisionTasks(ctx context.Context, params *CountPendingDecisionTasksInput, optFns ...func(*Options)) (*CountPendingDecisionTasksOutput, error) { diff --git a/service/swf/api_op_DeprecateActivityType.go b/service/swf/api_op_DeprecateActivityType.go index 252ed9e635b..a2cb337a117 100644 --- a/service/swf/api_op_DeprecateActivityType.go +++ b/service/swf/api_op_DeprecateActivityType.go @@ -18,27 +18,27 @@ import ( // exactly reflect recent updates and changes. Access Control You can use IAM // policies to control this action's access to Amazon SWF resources as follows: // -// -// * Use a Resource element with the domain name to limit the action to only +// * +// Use a Resource element with the domain name to limit the action to only // specified domains. // -// * Use an Action element to allow or deny permission to -// call this action. +// * Use an Action element to allow or deny permission to call +// this action. // -// * Constrain the following parameters by using a Condition -// element with the appropriate keys. +// * Constrain the following parameters by using a Condition element +// with the appropriate keys. // -// * activityType.name: String -// constraint. The key is swf:activityType.name. +// * activityType.name: String constraint. The key is +// swf:activityType.name. // -// * activityType.version: -// String constraint. The key is swf:activityType.version. +// * activityType.version: String constraint. The key is +// swf:activityType.version. // -// If the caller doesn't -// have sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) DeprecateActivityType(ctx context.Context, params *DeprecateActivityTypeInput, optFns ...func(*Options)) (*DeprecateActivityTypeOutput, error) { diff --git a/service/swf/api_op_DeprecateDomain.go b/service/swf/api_op_DeprecateDomain.go index ebfe9c324e4..7a7ffb694da 100644 --- a/service/swf/api_op_DeprecateDomain.go +++ b/service/swf/api_op_DeprecateDomain.go @@ -19,14 +19,14 @@ import ( // exactly reflect recent updates and changes. Access Control You can use IAM // policies to control this action's access to Amazon SWF resources as follows: // -// -// * Use a Resource element with the domain name to limit the action to only +// * +// Use a Resource element with the domain name to limit the action to only // specified domains. // -// * Use an Action element to allow or deny permission to -// call this action. +// * Use an Action element to allow or deny permission to call +// this action. // -// * You cannot use an IAM policy to constrain this action's +// * You cannot use an IAM policy to constrain this action's // parameters. // // If the caller doesn't have sufficient permissions to invoke the diff --git a/service/swf/api_op_DeprecateWorkflowType.go b/service/swf/api_op_DeprecateWorkflowType.go index 4a4e13c5103..8fbb47c2052 100644 --- a/service/swf/api_op_DeprecateWorkflowType.go +++ b/service/swf/api_op_DeprecateWorkflowType.go @@ -19,27 +19,27 @@ import ( // recent updates and changes. Access Control You can use IAM policies to control // this action's access to Amazon SWF resources as follows: // -// * Use a Resource +// * Use a Resource // element with the domain name to limit the action to only specified domains. // +// * +// Use an Action element to allow or deny permission to call this action. // -// * Use an Action element to allow or deny permission to call this action. -// -// * +// * // Constrain the following parameters by using a Condition element with the // appropriate keys. // -// * workflowType.name: String constraint. The key is +// * workflowType.name: String constraint. The key is // swf:workflowType.name. // -// * workflowType.version: String constraint. The -// key is swf:workflowType.version. +// * workflowType.version: String constraint. The key is +// swf:workflowType.version. // -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) DeprecateWorkflowType(ctx context.Context, params *DeprecateWorkflowTypeInput, optFns ...func(*Options)) (*DeprecateWorkflowTypeOutput, error) { diff --git a/service/swf/api_op_DescribeActivityType.go b/service/swf/api_op_DescribeActivityType.go index f09835158de..c764c7f8f4e 100644 --- a/service/swf/api_op_DescribeActivityType.go +++ b/service/swf/api_op_DescribeActivityType.go @@ -16,27 +16,27 @@ import ( // information about the type. Access Control You can use IAM policies to control // this action's access to Amazon SWF resources as follows: // -// * Use a Resource +// * Use a Resource // element with the domain name to limit the action to only specified domains. // +// * +// Use an Action element to allow or deny permission to call this action. // -// * Use an Action element to allow or deny permission to call this action. -// -// * +// * // Constrain the following parameters by using a Condition element with the // appropriate keys. // -// * activityType.name: String constraint. The key is +// * activityType.name: String constraint. The key is // swf:activityType.name. // -// * activityType.version: String constraint. The -// key is swf:activityType.version. +// * activityType.version: String constraint. The key is +// swf:activityType.version. // -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) DescribeActivityType(ctx context.Context, params *DescribeActivityTypeInput, optFns ...func(*Options)) (*DescribeActivityTypeOutput, error) { @@ -79,11 +79,11 @@ type DescribeActivityTypeOutput struct { // General information about the activity type. The status of activity type // (returned in the ActivityTypeInfo structure) can be one of the following. // - // * + // * // REGISTERED – The type is registered and available. Workers supporting this type // should be running. // - // * DEPRECATED – The type was deprecated using + // * DEPRECATED – The type was deprecated using // DeprecateActivityType, but is still in use. You should keep workers supporting // this type running. You cannot create new tasks of this type. // diff --git a/service/swf/api_op_DescribeDomain.go b/service/swf/api_op_DescribeDomain.go index 86dff990bbd..0026864debf 100644 --- a/service/swf/api_op_DescribeDomain.go +++ b/service/swf/api_op_DescribeDomain.go @@ -15,20 +15,20 @@ import ( // status. Access Control You can use IAM policies to control this action's access // to Amazon SWF resources as follows: // -// * Use a Resource element with the -// domain name to limit the action to only specified domains. +// * Use a Resource element with the domain +// name to limit the action to only specified domains. // -// * Use an Action -// element to allow or deny permission to call this action. +// * Use an Action element to +// allow or deny permission to call this action. // -// * You cannot use -// an IAM policy to constrain this action's parameters. +// * You cannot use an IAM policy to +// constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) DescribeDomain(ctx context.Context, params *DescribeDomainInput, optFns ...func(*Options)) (*DescribeDomainOutput, error) { diff --git a/service/swf/api_op_DescribeWorkflowExecution.go b/service/swf/api_op_DescribeWorkflowExecution.go index f7900da22c4..255d417be01 100644 --- a/service/swf/api_op_DescribeWorkflowExecution.go +++ b/service/swf/api_op_DescribeWorkflowExecution.go @@ -18,20 +18,20 @@ import ( // Control You can use IAM policies to control this action's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to constrain +// this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) DescribeWorkflowExecution(ctx context.Context, params *DescribeWorkflowExecutionInput, optFns ...func(*Options)) (*DescribeWorkflowExecutionOutput, error) { diff --git a/service/swf/api_op_DescribeWorkflowType.go b/service/swf/api_op_DescribeWorkflowType.go index 4d92c11c8e8..204aed0a663 100644 --- a/service/swf/api_op_DescribeWorkflowType.go +++ b/service/swf/api_op_DescribeWorkflowType.go @@ -17,27 +17,26 @@ import ( // use IAM policies to control this action's access to Amazon SWF resources as // follows: // -// * Use a Resource element with the domain name to limit the action -// to only specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to +// call this action. // -// * Constrain the following parameters by -// using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a Condition +// element with the appropriate keys. // -// * -// workflowType.name: String constraint. The key is swf:workflowType.name. +// * workflowType.name: String constraint. The +// key is swf:workflowType.name. // +// * workflowType.version: String constraint. The +// key is swf:workflowType.version. // -// * workflowType.version: String constraint. The key is -// swf:workflowType.version. -// -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) DescribeWorkflowType(ctx context.Context, params *DescribeWorkflowTypeInput, optFns ...func(*Options)) (*DescribeWorkflowTypeOutput, error) { @@ -80,11 +79,11 @@ type DescribeWorkflowTypeOutput struct { // General information about the workflow type. The status of the workflow type // (returned in the WorkflowTypeInfo structure) can be one of the following. // - // * + // * // REGISTERED – The type is registered and available. Workers supporting this type // should be running. // - // * DEPRECATED – The type was deprecated using + // * DEPRECATED – The type was deprecated using // DeprecateWorkflowType, but is still in use. You should keep workers supporting // this type running. You cannot create new workflow executions of this type. // diff --git a/service/swf/api_op_GetWorkflowExecutionHistory.go b/service/swf/api_op_GetWorkflowExecutionHistory.go index 4f8e37d8293..5174ad94951 100644 --- a/service/swf/api_op_GetWorkflowExecutionHistory.go +++ b/service/swf/api_op_GetWorkflowExecutionHistory.go @@ -18,21 +18,20 @@ import ( // recent updates and changes. Access Control You can use IAM policies to control // this action's access to Amazon SWF resources as follows: // -// * Use a Resource +// * Use a Resource // element with the domain name to limit the action to only specified domains. // +// * +// Use an Action element to allow or deny permission to call this action. // -// * Use an Action element to allow or deny permission to call this action. +// * You +// cannot use an IAM policy to constrain this action's parameters. // -// * -// You cannot use an IAM policy to constrain this action's parameters. -// -// If the -// caller doesn't have sufficient permissions to invoke the action, or the -// parameter values fall outside the specified constraints, the action fails. The -// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows +// If the caller +// doesn't have sufficient permissions to invoke the action, or the parameter +// values fall outside the specified constraints, the action fails. The associated +// event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details +// and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) GetWorkflowExecutionHistory(ctx context.Context, params *GetWorkflowExecutionHistoryInput, optFns ...func(*Options)) (*GetWorkflowExecutionHistoryOutput, error) { diff --git a/service/swf/api_op_ListActivityTypes.go b/service/swf/api_op_ListActivityTypes.go index bceb159d29d..10ecd2ced02 100644 --- a/service/swf/api_op_ListActivityTypes.go +++ b/service/swf/api_op_ListActivityTypes.go @@ -19,20 +19,20 @@ import ( // can use IAM policies to control this action's access to Amazon SWF resources as // follows: // -// * Use a Resource element with the domain name to limit the action -// to only specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to +// call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) ListActivityTypes(ctx context.Context, params *ListActivityTypesInput, optFns ...func(*Options)) (*ListActivityTypesOutput, error) { diff --git a/service/swf/api_op_ListClosedWorkflowExecutions.go b/service/swf/api_op_ListClosedWorkflowExecutions.go index 2ea3df3dd01..fda594c28f5 100644 --- a/service/swf/api_op_ListClosedWorkflowExecutions.go +++ b/service/swf/api_op_ListClosedWorkflowExecutions.go @@ -19,30 +19,29 @@ import ( // Control You can use IAM policies to control this action's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * Constrain the following -// parameters by using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a +// Condition element with the appropriate keys. // -// * -// tagFilter.tag: String constraint. The key is swf:tagFilter.tag. +// * tagFilter.tag: String +// constraint. The key is swf:tagFilter.tag. // -// * -// typeFilter.name: String constraint. The key is swf:typeFilter.name. +// * typeFilter.name: String constraint. +// The key is swf:typeFilter.name. // -// * -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// * typeFilter.version: String constraint. The +// key is swf:typeFilter.version. // -// If -// the caller doesn't have sufficient permissions to invoke the action, or the -// parameter values fall outside the specified constraints, the action fails. The -// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) ListClosedWorkflowExecutions(ctx context.Context, params *ListClosedWorkflowExecutionsInput, optFns ...func(*Options)) (*ListClosedWorkflowExecutionsOutput, error) { diff --git a/service/swf/api_op_ListDomains.go b/service/swf/api_op_ListDomains.go index 25cc6dc905d..e3d237aa333 100644 --- a/service/swf/api_op_ListDomains.go +++ b/service/swf/api_op_ListDomains.go @@ -18,22 +18,22 @@ import ( // updates and changes. Access Control You can use IAM policies to control this // action's access to Amazon SWF resources as follows: // -// * Use a Resource -// element with the domain name to limit the action to only specified domains. The -// element must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the -// account ID, with no dashes. +// * Use a Resource element +// with the domain name to limit the action to only specified domains. The element +// must be set to arn:aws:swf::AccountID:domain/*, where AccountID is the account +// ID, with no dashes. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to call +// this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) ListDomains(ctx context.Context, params *ListDomainsInput, optFns ...func(*Options)) (*ListDomainsOutput, error) { diff --git a/service/swf/api_op_ListOpenWorkflowExecutions.go b/service/swf/api_op_ListOpenWorkflowExecutions.go index 0a2df2e9c69..1b2ad299087 100644 --- a/service/swf/api_op_ListOpenWorkflowExecutions.go +++ b/service/swf/api_op_ListOpenWorkflowExecutions.go @@ -19,30 +19,29 @@ import ( // You can use IAM policies to control this action's access to Amazon SWF resources // as follows: // -// * Use a Resource element with the domain name to limit the -// action to only specified domains. +// * Use a Resource element with the domain name to limit the action +// to only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission +// to call this action. // -// * Constrain the following parameters by -// using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a Condition +// element with the appropriate keys. // -// * tagFilter.tag: -// String constraint. The key is swf:tagFilter.tag. +// * tagFilter.tag: String constraint. The key +// is swf:tagFilter.tag. // -// * typeFilter.name: -// String constraint. The key is swf:typeFilter.name. +// * typeFilter.name: String constraint. The key is +// swf:typeFilter.name. // -// * -// typeFilter.version: String constraint. The key is swf:typeFilter.version. +// * typeFilter.version: String constraint. The key is +// swf:typeFilter.version. // -// If -// the caller doesn't have sufficient permissions to invoke the action, or the -// parameter values fall outside the specified constraints, the action fails. The -// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. -// For details and example IAM policies, see Using IAM to Manage Access to Amazon -// SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) ListOpenWorkflowExecutions(ctx context.Context, params *ListOpenWorkflowExecutionsInput, optFns ...func(*Options)) (*ListOpenWorkflowExecutionsOutput, error) { diff --git a/service/swf/api_op_ListWorkflowTypes.go b/service/swf/api_op_ListWorkflowTypes.go index f90596bb7ec..5ceabdcb6c5 100644 --- a/service/swf/api_op_ListWorkflowTypes.go +++ b/service/swf/api_op_ListWorkflowTypes.go @@ -16,20 +16,20 @@ import ( // repeatedly. Access Control You can use IAM policies to control this action's // access to Amazon SWF resources as follows: // -// * Use a Resource element with -// the domain name to limit the action to only specified domains. +// * Use a Resource element with the +// domain name to limit the action to only specified domains. // -// * Use an -// Action element to allow or deny permission to call this action. +// * Use an Action +// element to allow or deny permission to call this action. // -// * You -// cannot use an IAM policy to constrain this action's parameters. +// * You cannot use an +// IAM policy to constrain this action's parameters. // -// If the caller -// doesn't have sufficient permissions to invoke the action, or the parameter -// values fall outside the specified constraints, the action fails. The associated -// event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details -// and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have +// sufficient permissions to invoke the action, or the parameter values fall +// outside the specified constraints, the action fails. The associated event +// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and +// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) ListWorkflowTypes(ctx context.Context, params *ListWorkflowTypesInput, optFns ...func(*Options)) (*ListWorkflowTypesOutput, error) { diff --git a/service/swf/api_op_PollForActivityTask.go b/service/swf/api_op_PollForActivityTask.go index 06502905608..1017ced240b 100644 --- a/service/swf/api_op_PollForActivityTask.go +++ b/service/swf/api_op_PollForActivityTask.go @@ -23,13 +23,13 @@ import ( // may hold the poll request). Access Control You can use IAM policies to control // this action's access to Amazon SWF resources as follows: // -// * Use a Resource +// * Use a Resource // element with the domain name to limit the action to only specified domains. // +// * +// Use an Action element to allow or deny permission to call this action. // -// * Use an Action element to allow or deny permission to call this action. -// -// * +// * // Constrain the taskList.name parameter by using a Condition element with the // swf:taskList.name key to allow the action to access only certain task lists. // diff --git a/service/swf/api_op_PollForDecisionTask.go b/service/swf/api_op_PollForDecisionTask.go index 3b9f72e299c..b39ca7d9c21 100644 --- a/service/swf/api_op_PollForDecisionTask.go +++ b/service/swf/api_op_PollForDecisionTask.go @@ -30,20 +30,20 @@ import ( // Instead, call PollForDecisionTask again. Access Control You can use IAM policies // to control this action's access to Amazon SWF resources as follows: // -// * Use a +// * Use a // Resource element with the domain name to limit the action to only specified // domains. // -// * Use an Action element to allow or deny permission to call this +// * Use an Action element to allow or deny permission to call this // action. // -// * Constrain the taskList.name parameter by using a Condition -// element with the swf:taskList.name key to allow the action to access only -// certain task lists. +// * Constrain the taskList.name parameter by using a Condition element +// with the swf:taskList.name key to allow the action to access only certain task +// lists. // -// If the caller doesn't have sufficient permissions to invoke -// the action, or the parameter values fall outside the specified constraints, the -// action fails. The associated event attribute's cause parameter is set to +// If the caller doesn't have sufficient permissions to invoke the action, +// or the parameter values fall outside the specified constraints, the action +// fails. The associated event attribute's cause parameter is set to // OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to // Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) diff --git a/service/swf/api_op_RecordActivityTaskHeartbeat.go b/service/swf/api_op_RecordActivityTaskHeartbeat.go index e5d302955ae..a0fbfbc5226 100644 --- a/service/swf/api_op_RecordActivityTaskHeartbeat.go +++ b/service/swf/api_op_RecordActivityTaskHeartbeat.go @@ -33,20 +33,20 @@ import ( // request. Access Control You can use IAM policies to control this action's access // to Amazon SWF resources as follows: // -// * Use a Resource element with the -// domain name to limit the action to only specified domains. +// * Use a Resource element with the domain +// name to limit the action to only specified domains. // -// * Use an Action -// element to allow or deny permission to call this action. +// * Use an Action element to +// allow or deny permission to call this action. // -// * You cannot use -// an IAM policy to constrain this action's parameters. +// * You cannot use an IAM policy to +// constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RecordActivityTaskHeartbeat(ctx context.Context, params *RecordActivityTaskHeartbeatInput, optFns ...func(*Options)) (*RecordActivityTaskHeartbeatOutput, error) { diff --git a/service/swf/api_op_RegisterActivityType.go b/service/swf/api_op_RegisterActivityType.go index c983f676a35..7e96d2e19ce 100644 --- a/service/swf/api_op_RegisterActivityType.go +++ b/service/swf/api_op_RegisterActivityType.go @@ -18,29 +18,29 @@ import ( // Control You can use IAM policies to control this action's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * Constrain the following -// parameters by using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a +// Condition element with the appropriate keys. // -// * -// defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name. +// * defaultTaskList.name: String +// constraint. The key is swf:defaultTaskList.name. // +// * name: String constraint. The +// key is swf:name. // -// * name: String constraint. The key is swf:name. +// * version: String constraint. The key is swf:version. // -// * version: String -// constraint. The key is swf:version. -// -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the +// caller doesn't have sufficient permissions to invoke the action, or the +// parameter values fall outside the specified constraints, the action fails. The +// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RegisterActivityType(ctx context.Context, params *RegisterActivityTypeInput, optFns ...func(*Options)) (*RegisterActivityTypeOutput, error) { diff --git a/service/swf/api_op_RegisterDomain.go b/service/swf/api_op_RegisterDomain.go index c60018358ff..1831d20ac7c 100644 --- a/service/swf/api_op_RegisterDomain.go +++ b/service/swf/api_op_RegisterDomain.go @@ -14,21 +14,21 @@ import ( // Registers a new domain. Access Control You can use IAM policies to control this // action's access to Amazon SWF resources as follows: // -// * You cannot use an IAM +// * You cannot use an IAM // policy to control domain access for this action. The name of the domain being // registered is available as the resource of this action. // -// * Use an Action -// element to allow or deny permission to call this action. +// * Use an Action element +// to allow or deny permission to call this action. // -// * You cannot use -// an IAM policy to constrain this action's parameters. +// * You cannot use an IAM policy +// to constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RegisterDomain(ctx context.Context, params *RegisterDomainInput, optFns ...func(*Options)) (*RegisterDomainOutput, error) { diff --git a/service/swf/api_op_RegisterWorkflowType.go b/service/swf/api_op_RegisterWorkflowType.go index f3b1cabc9fd..c586750f768 100644 --- a/service/swf/api_op_RegisterWorkflowType.go +++ b/service/swf/api_op_RegisterWorkflowType.go @@ -19,29 +19,29 @@ import ( // Control You can use IAM policies to control this action's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * Constrain the following -// parameters by using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a +// Condition element with the appropriate keys. // -// * -// defaultTaskList.name: String constraint. The key is swf:defaultTaskList.name. +// * defaultTaskList.name: String +// constraint. The key is swf:defaultTaskList.name. // +// * name: String constraint. The +// key is swf:name. // -// * name: String constraint. The key is swf:name. +// * version: String constraint. The key is swf:version. // -// * version: String -// constraint. The key is swf:version. -// -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the +// caller doesn't have sufficient permissions to invoke the action, or the +// parameter values fall outside the specified constraints, the action fails. The +// associated event attribute's cause parameter is set to OPERATION_NOT_PERMITTED. +// For details and example IAM policies, see Using IAM to Manage Access to Amazon +// SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RegisterWorkflowType(ctx context.Context, params *RegisterWorkflowTypeInput, optFns ...func(*Options)) (*RegisterWorkflowTypeOutput, error) { @@ -91,16 +91,16 @@ type RegisterWorkflowTypeInput struct { // StartWorkflowExecution action or the StartChildWorkflowExecutionDecision. The // supported child policies are: // - // * TERMINATE – The child executions are + // * TERMINATE – The child executions are // terminated. // - // * REQUEST_CANCEL – A request to cancel is attempted for each - // child execution by recording a WorkflowExecutionCancelRequested event in its - // history. It is up to the decider to take appropriate actions when it receives an + // * REQUEST_CANCEL – A request to cancel is attempted for each child + // execution by recording a WorkflowExecutionCancelRequested event in its history. + // It is up to the decider to take appropriate actions when it receives an // execution history with this event. // - // * ABANDON – No action is taken. The - // child executions continue to run. + // * ABANDON – No action is taken. The child + // executions continue to run. DefaultChildPolicy types.ChildPolicy // If set, specifies the default maximum duration for executions of this workflow diff --git a/service/swf/api_op_RequestCancelWorkflowExecution.go b/service/swf/api_op_RequestCancelWorkflowExecution.go index 074cd2be394..3c644ff081f 100644 --- a/service/swf/api_op_RequestCancelWorkflowExecution.go +++ b/service/swf/api_op_RequestCancelWorkflowExecution.go @@ -22,20 +22,20 @@ import ( // Control You can use IAM policies to control this action's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to constrain +// this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RequestCancelWorkflowExecution(ctx context.Context, params *RequestCancelWorkflowExecutionInput, optFns ...func(*Options)) (*RequestCancelWorkflowExecutionOutput, error) { diff --git a/service/swf/api_op_RespondActivityTaskCanceled.go b/service/swf/api_op_RespondActivityTaskCanceled.go index c884d299f54..85d76bf4d07 100644 --- a/service/swf/api_op_RespondActivityTaskCanceled.go +++ b/service/swf/api_op_RespondActivityTaskCanceled.go @@ -25,20 +25,20 @@ import ( // Access Control You can use IAM policies to control this action's access to // Amazon SWF resources as follows: // -// * Use a Resource element with the domain -// name to limit the action to only specified domains. +// * Use a Resource element with the domain name +// to limit the action to only specified domains. // -// * Use an Action element -// to allow or deny permission to call this action. +// * Use an Action element to allow +// or deny permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to +// constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RespondActivityTaskCanceled(ctx context.Context, params *RespondActivityTaskCanceledInput, optFns ...func(*Options)) (*RespondActivityTaskCanceledOutput, error) { diff --git a/service/swf/api_op_RespondActivityTaskCompleted.go b/service/swf/api_op_RespondActivityTaskCompleted.go index 10a73a21920..6d87d8db0d8 100644 --- a/service/swf/api_op_RespondActivityTaskCompleted.go +++ b/service/swf/api_op_RespondActivityTaskCompleted.go @@ -25,20 +25,20 @@ import ( // Access Control You can use IAM policies to control this action's access to // Amazon SWF resources as follows: // -// * Use a Resource element with the domain -// name to limit the action to only specified domains. +// * Use a Resource element with the domain name +// to limit the action to only specified domains. // -// * Use an Action element -// to allow or deny permission to call this action. +// * Use an Action element to allow +// or deny permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to +// constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RespondActivityTaskCompleted(ctx context.Context, params *RespondActivityTaskCompletedInput, optFns ...func(*Options)) (*RespondActivityTaskCompletedOutput, error) { diff --git a/service/swf/api_op_RespondActivityTaskFailed.go b/service/swf/api_op_RespondActivityTaskFailed.go index d45297bfb67..84cec144b70 100644 --- a/service/swf/api_op_RespondActivityTaskFailed.go +++ b/service/swf/api_op_RespondActivityTaskFailed.go @@ -22,20 +22,20 @@ import ( // Access Control You can use IAM policies to control this action's access to // Amazon SWF resources as follows: // -// * Use a Resource element with the domain -// name to limit the action to only specified domains. +// * Use a Resource element with the domain name +// to limit the action to only specified domains. // -// * Use an Action element -// to allow or deny permission to call this action. +// * Use an Action element to allow +// or deny permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to +// constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) RespondActivityTaskFailed(ctx context.Context, params *RespondActivityTaskFailedInput, optFns ...func(*Options)) (*RespondActivityTaskFailedOutput, error) { diff --git a/service/swf/api_op_SignalWorkflowExecution.go b/service/swf/api_op_SignalWorkflowExecution.go index 8ab50276ab8..47d282fd0cc 100644 --- a/service/swf/api_op_SignalWorkflowExecution.go +++ b/service/swf/api_op_SignalWorkflowExecution.go @@ -20,20 +20,20 @@ import ( // can use IAM policies to control this action's access to Amazon SWF resources as // follows: // -// * Use a Resource element with the domain name to limit the action -// to only specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to +// call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) SignalWorkflowExecution(ctx context.Context, params *SignalWorkflowExecutionInput, optFns ...func(*Options)) (*SignalWorkflowExecutionOutput, error) { diff --git a/service/swf/api_op_StartWorkflowExecution.go b/service/swf/api_op_StartWorkflowExecution.go index 9577c0ee155..7e28971d61c 100644 --- a/service/swf/api_op_StartWorkflowExecution.go +++ b/service/swf/api_op_StartWorkflowExecution.go @@ -16,45 +16,43 @@ import ( // workflow execution. Access Control You can use IAM policies to control this // action's access to Amazon SWF resources as follows: // -// * Use a Resource -// element with the domain name to limit the action to only specified domains. +// * Use a Resource element +// with the domain name to limit the action to only specified domains. // +// * Use an +// Action element to allow or deny permission to call this action. // -// * Use an Action element to allow or deny permission to call this action. +// * Constrain the +// following parameters by using a Condition element with the appropriate keys. // -// * -// Constrain the following parameters by using a Condition element with the -// appropriate keys. +// * +// tagList.member.0: The key is swf:tagList.member.0. // -// * tagList.member.0: The key is -// swf:tagList.member.0. +// * tagList.member.1: The key +// is swf:tagList.member.1. // -// * tagList.member.1: The key is -// swf:tagList.member.1. -// -// * tagList.member.2: The key is +// * tagList.member.2: The key is // swf:tagList.member.2. // -// * tagList.member.3: The key is -// swf:tagList.member.3. +// * tagList.member.3: The key is swf:tagList.member.3. // -// * tagList.member.4: The key is -// swf:tagList.member.4. +// * +// tagList.member.4: The key is swf:tagList.member.4. // -// * taskList: String constraint. The key is -// swf:taskList.name. +// * taskList: String +// constraint. The key is swf:taskList.name. // -// * workflowType.name: String constraint. The key is -// swf:workflowType.name. +// * workflowType.name: String +// constraint. The key is swf:workflowType.name. // -// * workflowType.version: String constraint. The -// key is swf:workflowType.version. +// * workflowType.version: String +// constraint. The key is swf:workflowType.version. // -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have +// sufficient permissions to invoke the action, or the parameter values fall +// outside the specified constraints, the action fails. The associated event +// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and +// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) StartWorkflowExecution(ctx context.Context, params *StartWorkflowExecutionInput, optFns ...func(*Options)) (*StartWorkflowExecutionOutput, error) { @@ -102,22 +100,22 @@ type StartWorkflowExecutionInput struct { // policy overrides the default child policy specified when registering the // workflow type using RegisterWorkflowType. The supported child policies are: // + // * + // TERMINATE – The child executions are terminated. // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A - // request to cancel is attempted for each child execution by recording a + // * REQUEST_CANCEL – A request + // to cancel is attempted for each child execution by recording a // WorkflowExecutionCancelRequested event in its history. It is up to the decider // to take appropriate actions when it receives an execution history with this // event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child executions continue to run. // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration time - // then a fault is returned. + // A + // child policy for this workflow execution must be specified either as a default + // for the workflow type or through this parameter. If neither this parameter is + // set nor a default child policy was specified at registration time then a fault + // is returned. ChildPolicy types.ChildPolicy // The total duration for this workflow execution. This overrides the diff --git a/service/swf/api_op_TerminateWorkflowExecution.go b/service/swf/api_op_TerminateWorkflowExecution.go index 2985093e93b..709f9617573 100644 --- a/service/swf/api_op_TerminateWorkflowExecution.go +++ b/service/swf/api_op_TerminateWorkflowExecution.go @@ -24,20 +24,20 @@ import ( // can use IAM policies to control this action's access to Amazon SWF resources as // follows: // -// * Use a Resource element with the domain name to limit the action -// to only specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to +// call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) TerminateWorkflowExecution(ctx context.Context, params *TerminateWorkflowExecutionInput, optFns ...func(*Options)) (*TerminateWorkflowExecutionOutput, error) { @@ -72,22 +72,21 @@ type TerminateWorkflowExecutionInput struct { // specified for the workflow execution at registration time or when starting the // execution. The supported child policies are: // - // * TERMINATE – The child - // executions are terminated. + // * TERMINATE – The child executions + // are terminated. // - // * REQUEST_CANCEL – A request to cancel is - // attempted for each child execution by recording a - // WorkflowExecutionCancelRequested event in its history. It is up to the decider - // to take appropriate actions when it receives an execution history with this - // event. + // * REQUEST_CANCEL – A request to cancel is attempted for each + // child execution by recording a WorkflowExecutionCancelRequested event in its + // history. It is up to the decider to take appropriate actions when it receives an + // execution history with this event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child + // executions continue to run. // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration time - // then a fault is returned. + // A child policy for this workflow execution must be + // specified either as a default for the workflow type or through this parameter. + // If neither this parameter is set nor a default child policy was specified at + // registration time then a fault is returned. ChildPolicy types.ChildPolicy // Details for terminating the workflow execution. diff --git a/service/swf/api_op_UndeprecateActivityType.go b/service/swf/api_op_UndeprecateActivityType.go index f29daa64a7e..9c49cc05470 100644 --- a/service/swf/api_op_UndeprecateActivityType.go +++ b/service/swf/api_op_UndeprecateActivityType.go @@ -17,27 +17,27 @@ import ( // exactly reflect recent updates and changes. Access Control You can use IAM // policies to control this action's access to Amazon SWF resources as follows: // -// -// * Use a Resource element with the domain name to limit the action to only +// * +// Use a Resource element with the domain name to limit the action to only // specified domains. // -// * Use an Action element to allow or deny permission to -// call this action. +// * Use an Action element to allow or deny permission to call +// this action. // -// * Constrain the following parameters by using a Condition -// element with the appropriate keys. +// * Constrain the following parameters by using a Condition element +// with the appropriate keys. // -// * activityType.name: String -// constraint. The key is swf:activityType.name. +// * activityType.name: String constraint. The key is +// swf:activityType.name. // -// * activityType.version: -// String constraint. The key is swf:activityType.version. +// * activityType.version: String constraint. The key is +// swf:activityType.version. // -// If the caller doesn't -// have sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) UndeprecateActivityType(ctx context.Context, params *UndeprecateActivityTypeInput, optFns ...func(*Options)) (*UndeprecateActivityTypeOutput, error) { diff --git a/service/swf/api_op_UndeprecateDomain.go b/service/swf/api_op_UndeprecateDomain.go index 7f300f59a66..9926d25c57b 100644 --- a/service/swf/api_op_UndeprecateDomain.go +++ b/service/swf/api_op_UndeprecateDomain.go @@ -17,20 +17,20 @@ import ( // IAM policies to control this action's access to Amazon SWF resources as // follows: // -// * Use a Resource element with the domain name to limit the action -// to only specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to +// call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) UndeprecateDomain(ctx context.Context, params *UndeprecateDomainInput, optFns ...func(*Options)) (*UndeprecateDomainOutput, error) { diff --git a/service/swf/api_op_UndeprecateWorkflowType.go b/service/swf/api_op_UndeprecateWorkflowType.go index 9a6483aae24..3e4b18fa34a 100644 --- a/service/swf/api_op_UndeprecateWorkflowType.go +++ b/service/swf/api_op_UndeprecateWorkflowType.go @@ -17,27 +17,27 @@ import ( // recent updates and changes. Access Control You can use IAM policies to control // this action's access to Amazon SWF resources as follows: // -// * Use a Resource +// * Use a Resource // element with the domain name to limit the action to only specified domains. // +// * +// Use an Action element to allow or deny permission to call this action. // -// * Use an Action element to allow or deny permission to call this action. -// -// * +// * // Constrain the following parameters by using a Condition element with the // appropriate keys. // -// * workflowType.name: String constraint. The key is +// * workflowType.name: String constraint. The key is // swf:workflowType.name. // -// * workflowType.version: String constraint. The -// key is swf:workflowType.version. +// * workflowType.version: String constraint. The key is +// swf:workflowType.version. // -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. func (c *Client) UndeprecateWorkflowType(ctx context.Context, params *UndeprecateWorkflowTypeInput, optFns ...func(*Options)) (*UndeprecateWorkflowTypeOutput, error) { diff --git a/service/swf/types/enums.go b/service/swf/types/enums.go index fe9cbcb6c7a..86afca87eac 100644 --- a/service/swf/types/enums.go +++ b/service/swf/types/enums.go @@ -6,10 +6,10 @@ type ActivityTaskTimeoutType string // Enum values for ActivityTaskTimeoutType const ( - ActivityTaskTimeoutTypeStart_to_close ActivityTaskTimeoutType = "START_TO_CLOSE" - ActivityTaskTimeoutTypeSchedule_to_start ActivityTaskTimeoutType = "SCHEDULE_TO_START" - ActivityTaskTimeoutTypeSchedule_to_close ActivityTaskTimeoutType = "SCHEDULE_TO_CLOSE" - ActivityTaskTimeoutTypeHeartbeat ActivityTaskTimeoutType = "HEARTBEAT" + ActivityTaskTimeoutTypeStartToClose ActivityTaskTimeoutType = "START_TO_CLOSE" + ActivityTaskTimeoutTypeScheduleToStart ActivityTaskTimeoutType = "SCHEDULE_TO_START" + ActivityTaskTimeoutTypeScheduleToClose ActivityTaskTimeoutType = "SCHEDULE_TO_CLOSE" + ActivityTaskTimeoutTypeHeartbeat ActivityTaskTimeoutType = "HEARTBEAT" ) // Values returns all known values for ActivityTaskTimeoutType. Note that this can @@ -28,8 +28,8 @@ type CancelTimerFailedCause string // Enum values for CancelTimerFailedCause const ( - CancelTimerFailedCauseTimer_id_unknown CancelTimerFailedCause = "TIMER_ID_UNKNOWN" - CancelTimerFailedCauseOperation_not_permitted CancelTimerFailedCause = "OPERATION_NOT_PERMITTED" + CancelTimerFailedCauseTimerIdUnknown CancelTimerFailedCause = "TIMER_ID_UNKNOWN" + CancelTimerFailedCauseOperationNotPermitted CancelTimerFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for CancelTimerFailedCause. Note that this can @@ -46,8 +46,8 @@ type CancelWorkflowExecutionFailedCause string // Enum values for CancelWorkflowExecutionFailedCause const ( - CancelWorkflowExecutionFailedCauseUnhandled_decision CancelWorkflowExecutionFailedCause = "UNHANDLED_DECISION" - CancelWorkflowExecutionFailedCauseOperation_not_permitted CancelWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + CancelWorkflowExecutionFailedCauseUnhandledDecision CancelWorkflowExecutionFailedCause = "UNHANDLED_DECISION" + CancelWorkflowExecutionFailedCauseOperationNotPermitted CancelWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for CancelWorkflowExecutionFailedCause. Note @@ -65,9 +65,9 @@ type ChildPolicy string // Enum values for ChildPolicy const ( - ChildPolicyTerminate ChildPolicy = "TERMINATE" - ChildPolicyRequest_cancel ChildPolicy = "REQUEST_CANCEL" - ChildPolicyAbandon ChildPolicy = "ABANDON" + ChildPolicyTerminate ChildPolicy = "TERMINATE" + ChildPolicyRequestCancel ChildPolicy = "REQUEST_CANCEL" + ChildPolicyAbandon ChildPolicy = "ABANDON" ) // Values returns all known values for ChildPolicy. Note that this can be expanded @@ -85,12 +85,12 @@ type CloseStatus string // Enum values for CloseStatus const ( - CloseStatusCompleted CloseStatus = "COMPLETED" - CloseStatusFailed CloseStatus = "FAILED" - CloseStatusCanceled CloseStatus = "CANCELED" - CloseStatusTerminated CloseStatus = "TERMINATED" - CloseStatusContinued_as_new CloseStatus = "CONTINUED_AS_NEW" - CloseStatusTimed_out CloseStatus = "TIMED_OUT" + CloseStatusCompleted CloseStatus = "COMPLETED" + CloseStatusFailed CloseStatus = "FAILED" + CloseStatusCanceled CloseStatus = "CANCELED" + CloseStatusTerminated CloseStatus = "TERMINATED" + CloseStatusContinuedAsNew CloseStatus = "CONTINUED_AS_NEW" + CloseStatusTimedOut CloseStatus = "TIMED_OUT" ) // Values returns all known values for CloseStatus. Note that this can be expanded @@ -111,8 +111,8 @@ type CompleteWorkflowExecutionFailedCause string // Enum values for CompleteWorkflowExecutionFailedCause const ( - CompleteWorkflowExecutionFailedCauseUnhandled_decision CompleteWorkflowExecutionFailedCause = "UNHANDLED_DECISION" - CompleteWorkflowExecutionFailedCauseOperation_not_permitted CompleteWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + CompleteWorkflowExecutionFailedCauseUnhandledDecision CompleteWorkflowExecutionFailedCause = "UNHANDLED_DECISION" + CompleteWorkflowExecutionFailedCauseOperationNotPermitted CompleteWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for CompleteWorkflowExecutionFailedCause. Note @@ -130,15 +130,15 @@ type ContinueAsNewWorkflowExecutionFailedCause string // Enum values for ContinueAsNewWorkflowExecutionFailedCause const ( - ContinueAsNewWorkflowExecutionFailedCauseUnhandled_decision ContinueAsNewWorkflowExecutionFailedCause = "UNHANDLED_DECISION" - ContinueAsNewWorkflowExecutionFailedCauseWorkflow_type_deprecated ContinueAsNewWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DEPRECATED" - ContinueAsNewWorkflowExecutionFailedCauseWorkflow_type_does_not_exist ContinueAsNewWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DOES_NOT_EXIST" - ContinueAsNewWorkflowExecutionFailedCauseDefault_execution_start_to_close_timeout_undefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" - ContinueAsNewWorkflowExecutionFailedCauseDefault_task_start_to_close_timeout_undefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" - ContinueAsNewWorkflowExecutionFailedCauseDefault_task_list_undefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_TASK_LIST_UNDEFINED" - ContinueAsNewWorkflowExecutionFailedCauseDefault_child_policy_undefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_CHILD_POLICY_UNDEFINED" - ContinueAsNewWorkflowExecutionFailedCauseContinue_as_new_workflow_execution_rate_exceeded ContinueAsNewWorkflowExecutionFailedCause = "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED" - ContinueAsNewWorkflowExecutionFailedCauseOperation_not_permitted ContinueAsNewWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision ContinueAsNewWorkflowExecutionFailedCause = "UNHANDLED_DECISION" + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDeprecated ContinueAsNewWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DEPRECATED" + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist ContinueAsNewWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DOES_NOT_EXIST" + ContinueAsNewWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskListUndefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_TASK_LIST_UNDEFINED" + ContinueAsNewWorkflowExecutionFailedCauseDefaultChildPolicyUndefined ContinueAsNewWorkflowExecutionFailedCause = "DEFAULT_CHILD_POLICY_UNDEFINED" + ContinueAsNewWorkflowExecutionFailedCauseContinueAsNewWorkflowExecutionRateExceeded ContinueAsNewWorkflowExecutionFailedCause = "CONTINUE_AS_NEW_WORKFLOW_EXECUTION_RATE_EXCEEDED" + ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted ContinueAsNewWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for ContinueAsNewWorkflowExecutionFailedCause. @@ -163,7 +163,7 @@ type DecisionTaskTimeoutType string // Enum values for DecisionTaskTimeoutType const ( - DecisionTaskTimeoutTypeStart_to_close DecisionTaskTimeoutType = "START_TO_CLOSE" + DecisionTaskTimeoutTypeStartToClose DecisionTaskTimeoutType = "START_TO_CLOSE" ) // Values returns all known values for DecisionTaskTimeoutType. Note that this can @@ -359,8 +359,8 @@ type FailWorkflowExecutionFailedCause string // Enum values for FailWorkflowExecutionFailedCause const ( - FailWorkflowExecutionFailedCauseUnhandled_decision FailWorkflowExecutionFailedCause = "UNHANDLED_DECISION" - FailWorkflowExecutionFailedCauseOperation_not_permitted FailWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + FailWorkflowExecutionFailedCauseUnhandledDecision FailWorkflowExecutionFailedCause = "UNHANDLED_DECISION" + FailWorkflowExecutionFailedCauseOperationNotPermitted FailWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for FailWorkflowExecutionFailedCause. Note that @@ -378,7 +378,7 @@ type LambdaFunctionTimeoutType string // Enum values for LambdaFunctionTimeoutType const ( - LambdaFunctionTimeoutTypeStart_to_close LambdaFunctionTimeoutType = "START_TO_CLOSE" + LambdaFunctionTimeoutTypeStartToClose LambdaFunctionTimeoutType = "START_TO_CLOSE" ) // Values returns all known values for LambdaFunctionTimeoutType. Note that this @@ -394,7 +394,7 @@ type RecordMarkerFailedCause string // Enum values for RecordMarkerFailedCause const ( - RecordMarkerFailedCauseOperation_not_permitted RecordMarkerFailedCause = "OPERATION_NOT_PERMITTED" + RecordMarkerFailedCauseOperationNotPermitted RecordMarkerFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for RecordMarkerFailedCause. Note that this can @@ -428,8 +428,8 @@ type RequestCancelActivityTaskFailedCause string // Enum values for RequestCancelActivityTaskFailedCause const ( - RequestCancelActivityTaskFailedCauseActivity_id_unknown RequestCancelActivityTaskFailedCause = "ACTIVITY_ID_UNKNOWN" - RequestCancelActivityTaskFailedCauseOperation_not_permitted RequestCancelActivityTaskFailedCause = "OPERATION_NOT_PERMITTED" + RequestCancelActivityTaskFailedCauseActivityIdUnknown RequestCancelActivityTaskFailedCause = "ACTIVITY_ID_UNKNOWN" + RequestCancelActivityTaskFailedCauseOperationNotPermitted RequestCancelActivityTaskFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for RequestCancelActivityTaskFailedCause. Note @@ -447,9 +447,9 @@ type RequestCancelExternalWorkflowExecutionFailedCause string // Enum values for RequestCancelExternalWorkflowExecutionFailedCause const ( - RequestCancelExternalWorkflowExecutionFailedCauseUnknown_external_workflow_execution RequestCancelExternalWorkflowExecutionFailedCause = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" - RequestCancelExternalWorkflowExecutionFailedCauseRequest_cancel_external_workflow_execution_rate_exceeded RequestCancelExternalWorkflowExecutionFailedCause = "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" - RequestCancelExternalWorkflowExecutionFailedCauseOperation_not_permitted RequestCancelExternalWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution RequestCancelExternalWorkflowExecutionFailedCause = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + RequestCancelExternalWorkflowExecutionFailedCauseRequestCancelExternalWorkflowExecutionRateExceeded RequestCancelExternalWorkflowExecutionFailedCause = "REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted RequestCancelExternalWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for @@ -468,17 +468,17 @@ type ScheduleActivityTaskFailedCause string // Enum values for ScheduleActivityTaskFailedCause const ( - ScheduleActivityTaskFailedCauseActivity_type_deprecated ScheduleActivityTaskFailedCause = "ACTIVITY_TYPE_DEPRECATED" - ScheduleActivityTaskFailedCauseActivity_type_does_not_exist ScheduleActivityTaskFailedCause = "ACTIVITY_TYPE_DOES_NOT_EXIST" - ScheduleActivityTaskFailedCauseActivity_id_already_in_use ScheduleActivityTaskFailedCause = "ACTIVITY_ID_ALREADY_IN_USE" - ScheduleActivityTaskFailedCauseOpen_activities_limit_exceeded ScheduleActivityTaskFailedCause = "OPEN_ACTIVITIES_LIMIT_EXCEEDED" - ScheduleActivityTaskFailedCauseActivity_creation_rate_exceeded ScheduleActivityTaskFailedCause = "ACTIVITY_CREATION_RATE_EXCEEDED" - ScheduleActivityTaskFailedCauseDefault_schedule_to_close_timeout_undefined ScheduleActivityTaskFailedCause = "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED" - ScheduleActivityTaskFailedCauseDefault_task_list_undefined ScheduleActivityTaskFailedCause = "DEFAULT_TASK_LIST_UNDEFINED" - ScheduleActivityTaskFailedCauseDefault_schedule_to_start_timeout_undefined ScheduleActivityTaskFailedCause = "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED" - ScheduleActivityTaskFailedCauseDefault_start_to_close_timeout_undefined ScheduleActivityTaskFailedCause = "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED" - ScheduleActivityTaskFailedCauseDefault_heartbeat_timeout_undefined ScheduleActivityTaskFailedCause = "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED" - ScheduleActivityTaskFailedCauseOperation_not_permitted ScheduleActivityTaskFailedCause = "OPERATION_NOT_PERMITTED" + ScheduleActivityTaskFailedCauseActivityTypeDeprecated ScheduleActivityTaskFailedCause = "ACTIVITY_TYPE_DEPRECATED" + ScheduleActivityTaskFailedCauseActivityTypeDoesNotExist ScheduleActivityTaskFailedCause = "ACTIVITY_TYPE_DOES_NOT_EXIST" + ScheduleActivityTaskFailedCauseActivityIdAlreadyInUse ScheduleActivityTaskFailedCause = "ACTIVITY_ID_ALREADY_IN_USE" + ScheduleActivityTaskFailedCauseOpenActivitiesLimitExceeded ScheduleActivityTaskFailedCause = "OPEN_ACTIVITIES_LIMIT_EXCEEDED" + ScheduleActivityTaskFailedCauseActivityCreationRateExceeded ScheduleActivityTaskFailedCause = "ACTIVITY_CREATION_RATE_EXCEEDED" + ScheduleActivityTaskFailedCauseDefaultScheduleToCloseTimeoutUndefined ScheduleActivityTaskFailedCause = "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED" + ScheduleActivityTaskFailedCauseDefaultTaskListUndefined ScheduleActivityTaskFailedCause = "DEFAULT_TASK_LIST_UNDEFINED" + ScheduleActivityTaskFailedCauseDefaultScheduleToStartTimeoutUndefined ScheduleActivityTaskFailedCause = "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED" + ScheduleActivityTaskFailedCauseDefaultStartToCloseTimeoutUndefined ScheduleActivityTaskFailedCause = "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED" + ScheduleActivityTaskFailedCauseDefaultHeartbeatTimeoutUndefined ScheduleActivityTaskFailedCause = "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED" + ScheduleActivityTaskFailedCauseOperationNotPermitted ScheduleActivityTaskFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for ScheduleActivityTaskFailedCause. Note that @@ -505,10 +505,10 @@ type ScheduleLambdaFunctionFailedCause string // Enum values for ScheduleLambdaFunctionFailedCause const ( - ScheduleLambdaFunctionFailedCauseId_already_in_use ScheduleLambdaFunctionFailedCause = "ID_ALREADY_IN_USE" - ScheduleLambdaFunctionFailedCauseOpen_lambda_functions_limit_exceeded ScheduleLambdaFunctionFailedCause = "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED" - ScheduleLambdaFunctionFailedCauseLambda_function_creation_rate_exceeded ScheduleLambdaFunctionFailedCause = "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED" - ScheduleLambdaFunctionFailedCauseLambda_service_not_available_in_region ScheduleLambdaFunctionFailedCause = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" + ScheduleLambdaFunctionFailedCauseIdAlreadyInUse ScheduleLambdaFunctionFailedCause = "ID_ALREADY_IN_USE" + ScheduleLambdaFunctionFailedCauseOpenLambdaFunctionsLimitExceeded ScheduleLambdaFunctionFailedCause = "OPEN_LAMBDA_FUNCTIONS_LIMIT_EXCEEDED" + ScheduleLambdaFunctionFailedCauseLambdaFunctionCreationRateExceeded ScheduleLambdaFunctionFailedCause = "LAMBDA_FUNCTION_CREATION_RATE_EXCEEDED" + ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion ScheduleLambdaFunctionFailedCause = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" ) // Values returns all known values for ScheduleLambdaFunctionFailedCause. Note that @@ -528,9 +528,9 @@ type SignalExternalWorkflowExecutionFailedCause string // Enum values for SignalExternalWorkflowExecutionFailedCause const ( - SignalExternalWorkflowExecutionFailedCauseUnknown_external_workflow_execution SignalExternalWorkflowExecutionFailedCause = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" - SignalExternalWorkflowExecutionFailedCauseSignal_external_workflow_execution_rate_exceeded SignalExternalWorkflowExecutionFailedCause = "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" - SignalExternalWorkflowExecutionFailedCauseOperation_not_permitted SignalExternalWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution SignalExternalWorkflowExecutionFailedCause = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" + SignalExternalWorkflowExecutionFailedCauseSignalExternalWorkflowExecutionRateExceeded SignalExternalWorkflowExecutionFailedCause = "SIGNAL_EXTERNAL_WORKFLOW_EXECUTION_RATE_EXCEEDED" + SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted SignalExternalWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for SignalExternalWorkflowExecutionFailedCause. @@ -549,17 +549,17 @@ type StartChildWorkflowExecutionFailedCause string // Enum values for StartChildWorkflowExecutionFailedCause const ( - StartChildWorkflowExecutionFailedCauseWorkflow_type_does_not_exist StartChildWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DOES_NOT_EXIST" - StartChildWorkflowExecutionFailedCauseWorkflow_type_deprecated StartChildWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DEPRECATED" - StartChildWorkflowExecutionFailedCauseOpen_children_limit_exceeded StartChildWorkflowExecutionFailedCause = "OPEN_CHILDREN_LIMIT_EXCEEDED" - StartChildWorkflowExecutionFailedCauseOpen_workflows_limit_exceeded StartChildWorkflowExecutionFailedCause = "OPEN_WORKFLOWS_LIMIT_EXCEEDED" - StartChildWorkflowExecutionFailedCauseChild_creation_rate_exceeded StartChildWorkflowExecutionFailedCause = "CHILD_CREATION_RATE_EXCEEDED" - StartChildWorkflowExecutionFailedCauseWorkflow_already_running StartChildWorkflowExecutionFailedCause = "WORKFLOW_ALREADY_RUNNING" - StartChildWorkflowExecutionFailedCauseDefault_execution_start_to_close_timeout_undefined StartChildWorkflowExecutionFailedCause = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" - StartChildWorkflowExecutionFailedCauseDefault_task_list_undefined StartChildWorkflowExecutionFailedCause = "DEFAULT_TASK_LIST_UNDEFINED" - StartChildWorkflowExecutionFailedCauseDefault_task_start_to_close_timeout_undefined StartChildWorkflowExecutionFailedCause = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" - StartChildWorkflowExecutionFailedCauseDefault_child_policy_undefined StartChildWorkflowExecutionFailedCause = "DEFAULT_CHILD_POLICY_UNDEFINED" - StartChildWorkflowExecutionFailedCauseOperation_not_permitted StartChildWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" + StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist StartChildWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DOES_NOT_EXIST" + StartChildWorkflowExecutionFailedCauseWorkflowTypeDeprecated StartChildWorkflowExecutionFailedCause = "WORKFLOW_TYPE_DEPRECATED" + StartChildWorkflowExecutionFailedCauseOpenChildrenLimitExceeded StartChildWorkflowExecutionFailedCause = "OPEN_CHILDREN_LIMIT_EXCEEDED" + StartChildWorkflowExecutionFailedCauseOpenWorkflowsLimitExceeded StartChildWorkflowExecutionFailedCause = "OPEN_WORKFLOWS_LIMIT_EXCEEDED" + StartChildWorkflowExecutionFailedCauseChildCreationRateExceeded StartChildWorkflowExecutionFailedCause = "CHILD_CREATION_RATE_EXCEEDED" + StartChildWorkflowExecutionFailedCauseWorkflowAlreadyRunning StartChildWorkflowExecutionFailedCause = "WORKFLOW_ALREADY_RUNNING" + StartChildWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined StartChildWorkflowExecutionFailedCause = "DEFAULT_EXECUTION_START_TO_CLOSE_TIMEOUT_UNDEFINED" + StartChildWorkflowExecutionFailedCauseDefaultTaskListUndefined StartChildWorkflowExecutionFailedCause = "DEFAULT_TASK_LIST_UNDEFINED" + StartChildWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined StartChildWorkflowExecutionFailedCause = "DEFAULT_TASK_START_TO_CLOSE_TIMEOUT_UNDEFINED" + StartChildWorkflowExecutionFailedCauseDefaultChildPolicyUndefined StartChildWorkflowExecutionFailedCause = "DEFAULT_CHILD_POLICY_UNDEFINED" + StartChildWorkflowExecutionFailedCauseOperationNotPermitted StartChildWorkflowExecutionFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for StartChildWorkflowExecutionFailedCause. Note @@ -586,7 +586,7 @@ type StartLambdaFunctionFailedCause string // Enum values for StartLambdaFunctionFailedCause const ( - StartLambdaFunctionFailedCauseAssume_role_failed StartLambdaFunctionFailedCause = "ASSUME_ROLE_FAILED" + StartLambdaFunctionFailedCauseAssumeRoleFailed StartLambdaFunctionFailedCause = "ASSUME_ROLE_FAILED" ) // Values returns all known values for StartLambdaFunctionFailedCause. Note that @@ -603,10 +603,10 @@ type StartTimerFailedCause string // Enum values for StartTimerFailedCause const ( - StartTimerFailedCauseTimer_id_already_in_use StartTimerFailedCause = "TIMER_ID_ALREADY_IN_USE" - StartTimerFailedCauseOpen_timers_limit_exceeded StartTimerFailedCause = "OPEN_TIMERS_LIMIT_EXCEEDED" - StartTimerFailedCauseTimer_creation_rate_exceeded StartTimerFailedCause = "TIMER_CREATION_RATE_EXCEEDED" - StartTimerFailedCauseOperation_not_permitted StartTimerFailedCause = "OPERATION_NOT_PERMITTED" + StartTimerFailedCauseTimerIdAlreadyInUse StartTimerFailedCause = "TIMER_ID_ALREADY_IN_USE" + StartTimerFailedCauseOpenTimersLimitExceeded StartTimerFailedCause = "OPEN_TIMERS_LIMIT_EXCEEDED" + StartTimerFailedCauseTimerCreationRateExceeded StartTimerFailedCause = "TIMER_CREATION_RATE_EXCEEDED" + StartTimerFailedCauseOperationNotPermitted StartTimerFailedCause = "OPERATION_NOT_PERMITTED" ) // Values returns all known values for StartTimerFailedCause. Note that this can be @@ -625,7 +625,7 @@ type WorkflowExecutionCancelRequestedCause string // Enum values for WorkflowExecutionCancelRequestedCause const ( - WorkflowExecutionCancelRequestedCauseChild_policy_applied WorkflowExecutionCancelRequestedCause = "CHILD_POLICY_APPLIED" + WorkflowExecutionCancelRequestedCauseChildPolicyApplied WorkflowExecutionCancelRequestedCause = "CHILD_POLICY_APPLIED" ) // Values returns all known values for WorkflowExecutionCancelRequestedCause. Note @@ -642,9 +642,9 @@ type WorkflowExecutionTerminatedCause string // Enum values for WorkflowExecutionTerminatedCause const ( - WorkflowExecutionTerminatedCauseChild_policy_applied WorkflowExecutionTerminatedCause = "CHILD_POLICY_APPLIED" - WorkflowExecutionTerminatedCauseEvent_limit_exceeded WorkflowExecutionTerminatedCause = "EVENT_LIMIT_EXCEEDED" - WorkflowExecutionTerminatedCauseOperator_initiated WorkflowExecutionTerminatedCause = "OPERATOR_INITIATED" + WorkflowExecutionTerminatedCauseChildPolicyApplied WorkflowExecutionTerminatedCause = "CHILD_POLICY_APPLIED" + WorkflowExecutionTerminatedCauseEventLimitExceeded WorkflowExecutionTerminatedCause = "EVENT_LIMIT_EXCEEDED" + WorkflowExecutionTerminatedCauseOperatorInitiated WorkflowExecutionTerminatedCause = "OPERATOR_INITIATED" ) // Values returns all known values for WorkflowExecutionTerminatedCause. Note that @@ -663,7 +663,7 @@ type WorkflowExecutionTimeoutType string // Enum values for WorkflowExecutionTimeoutType const ( - WorkflowExecutionTimeoutTypeStart_to_close WorkflowExecutionTimeoutType = "START_TO_CLOSE" + WorkflowExecutionTimeoutTypeStartToClose WorkflowExecutionTimeoutType = "START_TO_CLOSE" ) // Values returns all known values for WorkflowExecutionTimeoutType. Note that this diff --git a/service/swf/types/types.go b/service/swf/types/types.go index de009a27de8..1bdcd55ca94 100644 --- a/service/swf/types/types.go +++ b/service/swf/types/types.go @@ -291,16 +291,16 @@ type ActivityTypeInfo struct { } // Provides the details of the CancelTimer decision. Access Control You can use IAM -// policies to control this decision's access to Amazon SWF resources as follows: -// +// policies to control this decision's access to Amazon SWF resources as +// follows: // -// * Use a Resource element with the domain name to limit the action to only -// specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny permission to +// * Use an Action element to allow or deny permission to // call this action. // -// * You cannot use an IAM policy to constrain this action's +// * You cannot use an IAM policy to constrain this action's // parameters. // // If the caller doesn't have sufficient permissions to invoke the @@ -349,20 +349,20 @@ type CancelTimerFailedEventAttributes struct { // can use IAM policies to control this decision's access to Amazon SWF resources // as follows: // -// * Use a Resource element with the domain name to limit the -// action to only specified domains. +// * Use a Resource element with the domain name to limit the action +// to only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission +// to call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type CancelWorkflowExecutionDecisionAttributes struct { @@ -592,20 +592,20 @@ type CloseStatusFilter struct { // You can use IAM policies to control this decision's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to constrain +// this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type CompleteWorkflowExecutionDecisionAttributes struct { @@ -641,29 +641,29 @@ type CompleteWorkflowExecutionFailedEventAttributes struct { // Control You can use IAM policies to control this decision's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * Constrain the following -// parameters by using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a +// Condition element with the appropriate keys. // -// * -// tag – A tag used to identify the workflow execution +// * tag – A tag used to identify the +// workflow execution // -// * taskList – String -// constraint. The key is swf:taskList.name. +// * taskList – String constraint. The key is +// swf:taskList.name. // -// * workflowType.version – -// String constraint. The key is swf:workflowType.version. +// * workflowType.version – String constraint. The key is +// swf:workflowType.version. // -// If the caller doesn't -// have sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type ContinueAsNewWorkflowExecutionDecisionAttributes struct { @@ -674,21 +674,21 @@ type ContinueAsNewWorkflowExecutionDecisionAttributes struct { // policy specified when registering the workflow type using RegisterWorkflowType. // The supported child policies are: // - // * TERMINATE – The child executions are + // * TERMINATE – The child executions are // terminated. // - // * REQUEST_CANCEL – A request to cancel is attempted for each - // child execution by recording a WorkflowExecutionCancelRequested event in its - // history. It is up to the decider to take appropriate actions when it receives an + // * REQUEST_CANCEL – A request to cancel is attempted for each child + // execution by recording a WorkflowExecutionCancelRequested event in its history. + // It is up to the decider to take appropriate actions when it receives an // execution history with this event. // - // * ABANDON – No action is taken. The - // child executions continue to run. + // * ABANDON – No action is taken. The child + // executions continue to run. // - // A child policy for this workflow execution - // must be specified either as a default for the workflow type or through this - // parameter. If neither this parameter is set nor a default child policy was - // specified at registration time then a fault is returned. + // A child policy for this workflow execution must be + // specified either as a default for the workflow type or through this parameter. + // If neither this parameter is set nor a default child policy was specified at + // registration time then a fault is returned. ChildPolicy ChildPolicy // If set, specifies the total duration for this workflow execution. This overrides @@ -767,131 +767,131 @@ type ContinueAsNewWorkflowExecutionFailedEventAttributes struct { // Specifies a decision made by the decider. A decision can be one of these // types: // -// * CancelTimer – Cancels a previously started timer and records a +// * CancelTimer – Cancels a previously started timer and records a // TimerCanceled event in the history. // -// * CancelWorkflowExecution – Closes the +// * CancelWorkflowExecution – Closes the // workflow execution and records a WorkflowExecutionCanceled event in the // history. // -// * CompleteWorkflowExecution – Closes the workflow execution and +// * CompleteWorkflowExecution – Closes the workflow execution and // records a WorkflowExecutionCompleted event in the history . // -// * +// * // ContinueAsNewWorkflowExecution – Closes the workflow execution and starts a new // workflow execution of the same type using the same workflow ID and a unique run // Id. A WorkflowExecutionContinuedAsNew event is recorded in the history. // -// * +// * // FailWorkflowExecution – Closes the workflow execution and records a // WorkflowExecutionFailed event in the history. // -// * RecordMarker – Records a +// * RecordMarker – Records a // MarkerRecorded event in the history. Markers can be used for adding custom // information in the history for instance to let deciders know that they don't // need to look at the history beyond the marker event. // -// * +// * // RequestCancelActivityTask – Attempts to cancel a previously scheduled activity // task. If the activity task was scheduled but has not been assigned to a worker, // then it is canceled. If the activity task was already assigned to a worker, then // the worker is informed that cancellation has been requested in the response to // RecordActivityTaskHeartbeat. // -// * RequestCancelExternalWorkflowExecution – +// * RequestCancelExternalWorkflowExecution – // Requests that a request be made to cancel the specified external workflow // execution and records a RequestCancelExternalWorkflowExecutionInitiated event in // the history. // -// * ScheduleActivityTask – Schedules an activity task. +// * ScheduleActivityTask – Schedules an activity task. // -// * +// * // SignalExternalWorkflowExecution – Requests a signal to be delivered to the // specified external workflow execution and records a // SignalExternalWorkflowExecutionInitiated event in the history. // -// * +// * // StartChildWorkflowExecution – Requests that a child workflow execution be // started and records a StartChildWorkflowExecutionInitiated event in the history. // The child workflow execution is a separate workflow execution with its own // history. // -// * StartTimer – Starts a timer for this workflow execution and -// records a TimerStarted event in the history. This timer fires after the -// specified delay and record a TimerFired event. +// * StartTimer – Starts a timer for this workflow execution and records +// a TimerStarted event in the history. This timer fires after the specified delay +// and record a TimerFired event. // -// Access Control If you grant -// permission to use RespondDecisionTaskCompleted, you can use IAM policies to -// express permissions for the list of decisions returned by this action as if they -// were members of the API. Treating decisions as a pseudo API maintains a uniform -// conceptual model and helps keep policies readable. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// Access Control If you grant permission to use +// RespondDecisionTaskCompleted, you can use IAM policies to express permissions +// for the list of decisions returned by this action as if they were members of the +// API. Treating decisions as a pseudo API maintains a uniform conceptual model and +// helps keep policies readable. For details and example IAM policies, see Using +// IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. Decision Failure Decisions can fail for // several reasons // -// * The ordering of decisions should follow a logical flow. -// Some decisions might not make sense in the current context of the workflow -// execution and therefore fails. +// * The ordering of decisions should follow a logical flow. Some +// decisions might not make sense in the current context of the workflow execution +// and therefore fails. // -// * A limit on your account was reached. +// * A limit on your account was reached. // +// * The decision +// lacks sufficient permissions. // -// * The decision lacks sufficient permissions. -// -// One of the following events might -// be added to the history to indicate an error. The event attribute's cause -// parameter indicates the cause. If cause is set to OPERATION_NOT_PERMITTED, the -// decision failed because it lacked sufficient permissions. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// One of the following events might be added to the +// history to indicate an error. The event attribute's cause parameter indicates +// the cause. If cause is set to OPERATION_NOT_PERMITTED, the decision failed +// because it lacked sufficient permissions. For details and example IAM policies, +// see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. // -// * ScheduleActivityTaskFailed – A +// * ScheduleActivityTaskFailed – A // ScheduleActivityTask decision failed. This could happen if the activity type // specified in the decision isn't registered, is in a deprecated state, or the // decision isn't properly configured. // -// * RequestCancelActivityTaskFailed – A +// * RequestCancelActivityTaskFailed – A // RequestCancelActivityTask decision failed. This could happen if there is no open // activity task with the specified activityId. // -// * StartTimerFailed – A -// StartTimer decision failed. This could happen if there is another open timer -// with the same timerId. +// * StartTimerFailed – A StartTimer +// decision failed. This could happen if there is another open timer with the same +// timerId. // -// * CancelTimerFailed – A CancelTimer decision failed. -// This could happen if there is no open timer with the specified timerId. +// * CancelTimerFailed – A CancelTimer decision failed. This could happen +// if there is no open timer with the specified timerId. // -// * +// * // StartChildWorkflowExecutionFailed – A StartChildWorkflowExecution decision // failed. This could happen if the workflow type specified isn't registered, is // deprecated, or the decision isn't properly configured. // -// * +// * // SignalExternalWorkflowExecutionFailed – A SignalExternalWorkflowExecution // decision failed. This could happen if the workflowID specified in the decision // was incorrect. // -// * RequestCancelExternalWorkflowExecutionFailed – A +// * RequestCancelExternalWorkflowExecutionFailed – A // RequestCancelExternalWorkflowExecution decision failed. This could happen if the // workflowID specified in the decision was incorrect. // -// * +// * // CancelWorkflowExecutionFailed – A CancelWorkflowExecution decision failed. This // could happen if there is an unhandled decision task pending in the workflow // execution. // -// * CompleteWorkflowExecutionFailed – A CompleteWorkflowExecution +// * CompleteWorkflowExecutionFailed – A CompleteWorkflowExecution // decision failed. This could happen if there is an unhandled decision task // pending in the workflow execution. // -// * ContinueAsNewWorkflowExecutionFailed – -// A ContinueAsNewWorkflowExecution decision failed. This could happen if there is -// an unhandled decision task pending in the workflow execution or the +// * ContinueAsNewWorkflowExecutionFailed – A +// ContinueAsNewWorkflowExecution decision failed. This could happen if there is an +// unhandled decision task pending in the workflow execution or the // ContinueAsNewWorkflowExecution decision was not configured correctly. // -// * +// * // FailWorkflowExecutionFailed – A FailWorkflowExecution decision failed. This // could happen if there is an unhandled decision task pending in the workflow // execution. @@ -914,39 +914,39 @@ type ContinueAsNewWorkflowExecutionFailedEventAttributes struct { // decision type field to one of the above decision values, and then set the // corresponding attributes field shown below: // -// * +// * // ScheduleActivityTaskDecisionAttributes // -// * +// * // RequestCancelActivityTaskDecisionAttributes // -// * +// * // CompleteWorkflowExecutionDecisionAttributes // -// * +// * // FailWorkflowExecutionDecisionAttributes // -// * +// * // CancelWorkflowExecutionDecisionAttributes // -// * +// * // ContinueAsNewWorkflowExecutionDecisionAttributes // -// * +// * // RecordMarkerDecisionAttributes // -// * StartTimerDecisionAttributes +// * StartTimerDecisionAttributes // -// * +// * // CancelTimerDecisionAttributes // -// * +// * // SignalExternalWorkflowExecutionDecisionAttributes // -// * +// * // RequestCancelExternalWorkflowExecutionDecisionAttributes // -// * +// * // StartChildWorkflowExecutionDecisionAttributes type Decision struct { @@ -1109,11 +1109,11 @@ type DomainInfo struct { // The status of the domain: // - // * REGISTERED – The domain is properly registered - // and available. You can use this domain for registering types and creating new + // * REGISTERED – The domain is properly registered and + // available. You can use this domain for registering types and creating new // workflow executions. // - // * DEPRECATED – The domain was deprecated using + // * DEPRECATED – The domain was deprecated using // DeprecateDomain, but is still in use. You should not create new workflow // executions in this domain. // @@ -1181,20 +1181,20 @@ type ExternalWorkflowExecutionSignaledEventAttributes struct { // can use IAM policies to control this decision's access to Amazon SWF resources // as follows: // -// * Use a Resource element with the domain name to limit the -// action to only specified domains. +// * Use a Resource element with the domain name to limit the action +// to only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission +// to call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type FailWorkflowExecutionDecisionAttributes struct { @@ -1230,175 +1230,171 @@ type FailWorkflowExecutionFailedEventAttributes struct { // Event within a workflow execution. A history event can be one of these types: // +// * +// ActivityTaskCancelRequested – A RequestCancelActivityTask decision was received +// by the system. // -// * ActivityTaskCancelRequested – A RequestCancelActivityTask decision was -// received by the system. -// -// * ActivityTaskCanceled – The activity task was -// successfully canceled. -// -// * ActivityTaskCompleted – An activity worker -// successfully completed an activity task by calling -// RespondActivityTaskCompleted. -// -// * ActivityTaskFailed – An activity worker -// failed an activity task by calling RespondActivityTaskFailed. +// * ActivityTaskCanceled – The activity task was successfully +// canceled. // -// * -// ActivityTaskScheduled – An activity task was scheduled for execution. +// * ActivityTaskCompleted – An activity worker successfully completed +// an activity task by calling RespondActivityTaskCompleted. // -// * -// ActivityTaskStarted – The scheduled activity task was dispatched to a worker. +// * ActivityTaskFailed +// – An activity worker failed an activity task by calling +// RespondActivityTaskFailed. // +// * ActivityTaskScheduled – An activity task was +// scheduled for execution. // -// * ActivityTaskTimedOut – The activity task timed out. +// * ActivityTaskStarted – The scheduled activity task +// was dispatched to a worker. // -// * CancelTimerFailed – -// Failed to process CancelTimer decision. This happens when the decision isn't -// configured properly, for example no timer exists with the specified timer Id. +// * ActivityTaskTimedOut – The activity task timed +// out. // +// * CancelTimerFailed – Failed to process CancelTimer decision. This happens +// when the decision isn't configured properly, for example no timer exists with +// the specified timer Id. // -// * CancelWorkflowExecutionFailed – A request to cancel a workflow execution -// failed. +// * CancelWorkflowExecutionFailed – A request to cancel a +// workflow execution failed. // -// * ChildWorkflowExecutionCanceled – A child workflow execution, -// started by this workflow execution, was canceled and closed. +// * ChildWorkflowExecutionCanceled – A child workflow +// execution, started by this workflow execution, was canceled and closed. // -// * +// * // ChildWorkflowExecutionCompleted – A child workflow execution, started by this // workflow execution, completed successfully and was closed. // -// * +// * // ChildWorkflowExecutionFailed – A child workflow execution, started by this // workflow execution, failed to complete successfully and was closed. // -// * +// * // ChildWorkflowExecutionStarted – A child workflow execution was successfully // started. // -// * ChildWorkflowExecutionTerminated – A child workflow execution, +// * ChildWorkflowExecutionTerminated – A child workflow execution, // started by this workflow execution, was terminated. // -// * +// * // ChildWorkflowExecutionTimedOut – A child workflow execution, started by this // workflow execution, timed out and was closed. // -// * -// CompleteWorkflowExecutionFailed – The workflow execution failed to complete. +// * CompleteWorkflowExecutionFailed +// – The workflow execution failed to complete. // +// * +// ContinueAsNewWorkflowExecutionFailed – The workflow execution failed to complete +// after being continued as a new workflow execution. // -// * ContinueAsNewWorkflowExecutionFailed – The workflow execution failed to -// complete after being continued as a new workflow execution. +// * DecisionTaskCompleted – +// The decider successfully completed a decision task by calling +// RespondDecisionTaskCompleted. // -// * -// DecisionTaskCompleted – The decider successfully completed a decision task by -// calling RespondDecisionTaskCompleted. +// * DecisionTaskScheduled – A decision task was +// scheduled for the workflow execution. // -// * DecisionTaskScheduled – A decision -// task was scheduled for the workflow execution. +// * DecisionTaskStarted – The decision task +// was dispatched to a decider. // -// * DecisionTaskStarted – The -// decision task was dispatched to a decider. +// * DecisionTaskTimedOut – The decision task timed +// out. // -// * DecisionTaskTimedOut – The -// decision task timed out. +// * ExternalWorkflowExecutionCancelRequested – Request to cancel an external +// workflow execution was successfully delivered to the target execution. // -// * ExternalWorkflowExecutionCancelRequested – -// Request to cancel an external workflow execution was successfully delivered to -// the target execution. -// -// * ExternalWorkflowExecutionSignaled – A signal, -// requested by this workflow execution, was successfully delivered to the target -// external workflow execution. +// * +// ExternalWorkflowExecutionSignaled – A signal, requested by this workflow +// execution, was successfully delivered to the target external workflow +// execution. // -// * FailWorkflowExecutionFailed – A request to -// mark a workflow execution as failed, itself failed. +// * FailWorkflowExecutionFailed – A request to mark a workflow +// execution as failed, itself failed. // -// * MarkerRecorded – A -// marker was recorded in the workflow history as the result of a RecordMarker -// decision. +// * MarkerRecorded – A marker was recorded in +// the workflow history as the result of a RecordMarker decision. // -// * RecordMarkerFailed – A RecordMarker decision was returned as -// failed. +// * +// RecordMarkerFailed – A RecordMarker decision was returned as failed. // -// * RequestCancelActivityTaskFailed – Failed to process -// RequestCancelActivityTask decision. This happens when the decision isn't -// configured properly. +// * +// RequestCancelActivityTaskFailed – Failed to process RequestCancelActivityTask +// decision. This happens when the decision isn't configured properly. // -// * RequestCancelExternalWorkflowExecutionFailed – -// Request to cancel an external workflow execution failed. +// * +// RequestCancelExternalWorkflowExecutionFailed – Request to cancel an external +// workflow execution failed. // -// * -// RequestCancelExternalWorkflowExecutionInitiated – A request was made to request -// the cancellation of an external workflow execution. +// * RequestCancelExternalWorkflowExecutionInitiated – +// A request was made to request the cancellation of an external workflow +// execution. // -// * -// ScheduleActivityTaskFailed – Failed to process ScheduleActivityTask decision. -// This happens when the decision isn't configured properly, for example the -// activity type specified isn't registered. +// * ScheduleActivityTaskFailed – Failed to process +// ScheduleActivityTask decision. This happens when the decision isn't configured +// properly, for example the activity type specified isn't registered. // -// * +// * // SignalExternalWorkflowExecutionFailed – The request to signal an external // workflow execution failed. // -// * SignalExternalWorkflowExecutionInitiated – A +// * SignalExternalWorkflowExecutionInitiated – A // request to signal an external workflow was made. // -// * StartActivityTaskFailed -// – A scheduled activity task failed to start. +// * StartActivityTaskFailed – A +// scheduled activity task failed to start. // -// * -// StartChildWorkflowExecutionFailed – Failed to process -// StartChildWorkflowExecution decision. This happens when the decision isn't -// configured properly, for example the workflow type specified isn't registered. +// * StartChildWorkflowExecutionFailed – +// Failed to process StartChildWorkflowExecution decision. This happens when the +// decision isn't configured properly, for example the workflow type specified +// isn't registered. // +// * StartChildWorkflowExecutionInitiated – A request was made +// to start a child workflow execution. // -// * StartChildWorkflowExecutionInitiated – A request was made to start a child -// workflow execution. -// -// * StartTimerFailed – Failed to process StartTimer -// decision. This happens when the decision isn't configured properly, for example -// a timer already exists with the specified timer Id. +// * StartTimerFailed – Failed to process +// StartTimer decision. This happens when the decision isn't configured properly, +// for example a timer already exists with the specified timer Id. // -// * TimerCanceled – A -// timer, previously started for this workflow execution, was successfully +// * TimerCanceled +// – A timer, previously started for this workflow execution, was successfully // canceled. // -// * TimerFired – A timer, previously started for this workflow +// * TimerFired – A timer, previously started for this workflow // execution, fired. // -// * TimerStarted – A timer was started for the workflow +// * TimerStarted – A timer was started for the workflow // execution due to a StartTimer decision. // -// * WorkflowExecutionCancelRequested -// – A request to cancel this workflow execution was made. +// * WorkflowExecutionCancelRequested – A +// request to cancel this workflow execution was made. // -// * -// WorkflowExecutionCanceled – The workflow execution was successfully canceled and -// closed. +// * WorkflowExecutionCanceled +// – The workflow execution was successfully canceled and closed. // -// * WorkflowExecutionCompleted – The workflow execution was closed -// due to successful completion. +// * +// WorkflowExecutionCompleted – The workflow execution was closed due to successful +// completion. // -// * WorkflowExecutionContinuedAsNew – The -// workflow execution was closed and a new execution of the same type was created -// with the same workflowId. +// * WorkflowExecutionContinuedAsNew – The workflow execution was +// closed and a new execution of the same type was created with the same +// workflowId. // -// * WorkflowExecutionFailed – The workflow -// execution closed due to a failure. +// * WorkflowExecutionFailed – The workflow execution closed due to a +// failure. // -// * WorkflowExecutionSignaled – An -// external signal was received for the workflow execution. +// * WorkflowExecutionSignaled – An external signal was received for the +// workflow execution. // -// * -// WorkflowExecutionStarted – The workflow execution was started. +// * WorkflowExecutionStarted – The workflow execution was +// started. // -// * -// WorkflowExecutionTerminated – The workflow execution was terminated. +// * WorkflowExecutionTerminated – The workflow execution was +// terminated. // -// * -// WorkflowExecutionTimedOut – The workflow execution was closed because a time out -// was exceeded. +// * WorkflowExecutionTimedOut – The workflow execution was closed +// because a time out was exceeded. type HistoryEvent struct { // The system generated ID of the event. This ID uniquely identifies the event with @@ -1807,20 +1803,20 @@ type MarkerRecordedEventAttributes struct { // IAM policies to control this decision's access to Amazon SWF resources as // follows: // -// * Use a Resource element with the domain name to limit the action -// to only specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny -// permission to call this action. +// * Use an Action element to allow or deny permission to +// call this action. // -// * You cannot use an IAM policy to constrain -// this action's parameters. +// * You cannot use an IAM policy to constrain this action's +// parameters. // -// If the caller doesn't have sufficient permissions to -// invoke the action, or the parameter values fall outside the specified -// constraints, the action fails. The associated event attribute's cause parameter -// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see -// Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to invoke the +// action, or the parameter values fall outside the specified constraints, the +// action fails. The associated event attribute's cause parameter is set to +// OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to +// Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type RecordMarkerDecisionAttributes struct { @@ -1865,20 +1861,20 @@ type RecordMarkerFailedEventAttributes struct { // You can use IAM policies to control this decision's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to constrain +// this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type RequestCancelActivityTaskDecisionAttributes struct { @@ -1920,20 +1916,20 @@ type RequestCancelActivityTaskFailedEventAttributes struct { // Access Control You can use IAM policies to control this decision's access to // Amazon SWF resources as follows: // -// * Use a Resource element with the domain -// name to limit the action to only specified domains. +// * Use a Resource element with the domain name +// to limit the action to only specified domains. // -// * Use an Action element -// to allow or deny permission to call this action. +// * Use an Action element to allow +// or deny permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to +// constrain this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient +// permissions to invoke the action, or the parameter values fall outside the +// specified constraints, the action fails. The associated event attribute's cause +// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM +// policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type RequestCancelExternalWorkflowExecutionDecisionAttributes struct { @@ -2037,23 +2033,22 @@ type ResourceTag struct { // can use IAM policies to control this decision's access to Amazon SWF resources // as follows: // -// * Use a Resource element with the domain name to limit the -// action to only specified domains. -// -// * Use an Action element to allow or deny -// permission to call this action. +// * Use a Resource element with the domain name to limit the action +// to only specified domains. // -// * Constrain the following parameters by -// using a Condition element with the appropriate keys. +// * Use an Action element to allow or deny permission +// to call this action. // -// * -// activityType.name – String constraint. The key is swf:activityType.name. +// * Constrain the following parameters by using a Condition +// element with the appropriate keys. // +// * activityType.name – String constraint. The +// key is swf:activityType.name. // -// * activityType.version – String constraint. The key is -// swf:activityType.version. +// * activityType.version – String constraint. The +// key is swf:activityType.version. // -// * taskList – String constraint. The key is +// * taskList – String constraint. The key is // swf:taskList.name. // // If the caller doesn't have sufficient permissions to invoke @@ -2241,20 +2236,20 @@ type ScheduleLambdaFunctionFailedEventAttributes struct { // Control You can use IAM policies to control this decision's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * You cannot use an IAM -// policy to constrain this action's parameters. +// * You cannot use an IAM policy to constrain +// this action's parameters. // -// If the caller doesn't have -// sufficient permissions to invoke the action, or the parameter values fall -// outside the specified constraints, the action fails. The associated event -// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and -// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have sufficient permissions to +// invoke the action, or the parameter values fall outside the specified +// constraints, the action fails. The associated event attribute's cause parameter +// is set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see +// Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type SignalExternalWorkflowExecutionDecisionAttributes struct { @@ -2362,33 +2357,32 @@ type SignalExternalWorkflowExecutionInitiatedEventAttributes struct { // You can use IAM policies to control this decision's access to Amazon SWF // resources as follows: // -// * Use a Resource element with the domain name to -// limit the action to only specified domains. +// * Use a Resource element with the domain name to limit +// the action to only specified domains. // -// * Use an Action element to -// allow or deny permission to call this action. +// * Use an Action element to allow or deny +// permission to call this action. // -// * Constrain the following -// parameters by using a Condition element with the appropriate keys. +// * Constrain the following parameters by using a +// Condition element with the appropriate keys. // -// * -// tagList.member.N – The key is "swf:tagList.N" where N is the tag number from 0 -// to 4, inclusive. +// * tagList.member.N – The key is +// "swf:tagList.N" where N is the tag number from 0 to 4, inclusive. // -// * taskList – String constraint. The key is -// swf:taskList.name. +// * taskList – +// String constraint. The key is swf:taskList.name. // -// * workflowType.name – String constraint. The key is -// swf:workflowType.name. +// * workflowType.name – String +// constraint. The key is swf:workflowType.name. // -// * workflowType.version – String constraint. The -// key is swf:workflowType.version. +// * workflowType.version – String +// constraint. The key is swf:workflowType.version. // -// If the caller doesn't have sufficient -// permissions to invoke the action, or the parameter values fall outside the -// specified constraints, the action fails. The associated event attribute's cause -// parameter is set to OPERATION_NOT_PERMITTED. For details and example IAM -// policies, see Using IAM to Manage Access to Amazon SWF Workflows +// If the caller doesn't have +// sufficient permissions to invoke the action, or the parameter values fall +// outside the specified constraints, the action fails. The associated event +// attribute's cause parameter is set to OPERATION_NOT_PERMITTED. For details and +// example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows // (https://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html) // in the Amazon SWF Developer Guide. type StartChildWorkflowExecutionDecisionAttributes struct { @@ -2412,22 +2406,22 @@ type StartChildWorkflowExecutionDecisionAttributes struct { // policy overrides the default child policy specified when registering the // workflow type using RegisterWorkflowType. The supported child policies are: // + // * + // TERMINATE – The child executions are terminated. // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A - // request to cancel is attempted for each child execution by recording a + // * REQUEST_CANCEL – A request + // to cancel is attempted for each child execution by recording a // WorkflowExecutionCancelRequested event in its history. It is up to the decider // to take appropriate actions when it receives an execution history with this // event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child executions continue to run. // - // A child policy for this workflow execution must be specified either as a - // default for the workflow type or through this parameter. If neither this - // parameter is set nor a default child policy was specified at registration time - // then a fault is returned. + // A + // child policy for this workflow execution must be specified either as a default + // for the workflow type or through this parameter. If neither this parameter is + // set nor a default child policy was specified at registration time then a fault + // is returned. ChildPolicy ChildPolicy // The data attached to the event that can be used by the decider in subsequent @@ -2543,17 +2537,16 @@ type StartChildWorkflowExecutionInitiatedEventAttributes struct { // terminated by explicitly calling the TerminateWorkflowExecution action or due to // an expired timeout. The supported child policies are: // - // * TERMINATE – The - // child executions are terminated. + // * TERMINATE – The child + // executions are terminated. // - // * REQUEST_CANCEL – A request to cancel is - // attempted for each child execution by recording a - // WorkflowExecutionCancelRequested event in its history. It is up to the decider - // to take appropriate actions when it receives an execution history with this - // event. + // * REQUEST_CANCEL – A request to cancel is attempted + // for each child execution by recording a WorkflowExecutionCancelRequested event + // in its history. It is up to the decider to take appropriate actions when it + // receives an execution history with this event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. + // The child executions continue to run. // // This member is required. ChildPolicy ChildPolicy @@ -2638,16 +2631,16 @@ type StartLambdaFunctionFailedEventAttributes struct { } // Provides the details of the StartTimer decision. Access Control You can use IAM -// policies to control this decision's access to Amazon SWF resources as follows: -// +// policies to control this decision's access to Amazon SWF resources as +// follows: // -// * Use a Resource element with the domain name to limit the action to only -// specified domains. +// * Use a Resource element with the domain name to limit the action to +// only specified domains. // -// * Use an Action element to allow or deny permission to +// * Use an Action element to allow or deny permission to // call this action. // -// * You cannot use an IAM policy to constrain this action's +// * You cannot use an IAM policy to constrain this action's // parameters. // // If the caller doesn't have sufficient permissions to invoke the @@ -2867,17 +2860,16 @@ type WorkflowExecutionConfiguration struct { // is terminated, by calling the TerminateWorkflowExecution action explicitly or // due to an expired timeout. The supported child policies are: // - // * TERMINATE – - // The child executions are terminated. + // * TERMINATE – The + // child executions are terminated. // - // * REQUEST_CANCEL – A request to cancel - // is attempted for each child execution by recording a + // * REQUEST_CANCEL – A request to cancel is + // attempted for each child execution by recording a // WorkflowExecutionCancelRequested event in its history. It is up to the decider // to take appropriate actions when it receives an execution history with this // event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child executions continue to run. // // This member is required. ChildPolicy ChildPolicy @@ -2920,17 +2912,16 @@ type WorkflowExecutionContinuedAsNewEventAttributes struct { // is terminated by calling the TerminateWorkflowExecution action explicitly or due // to an expired timeout. The supported child policies are: // - // * TERMINATE – The + // * TERMINATE – The // child executions are terminated. // - // * REQUEST_CANCEL – A request to cancel is + // * REQUEST_CANCEL – A request to cancel is // attempted for each child execution by recording a // WorkflowExecutionCancelRequested event in its history. It is up to the decider // to take appropriate actions when it receives an execution history with this // event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child executions continue to run. // // This member is required. ChildPolicy ChildPolicy @@ -3039,24 +3030,24 @@ type WorkflowExecutionInfo struct { // If the execution status is closed then this specifies how the execution was // closed: // - // * COMPLETED – the execution was successfully completed. + // * COMPLETED – the execution was successfully completed. // - // * - // CANCELED – the execution was canceled.Cancellation allows the implementation to - // gracefully clean up before the execution is closed. + // * CANCELED – + // the execution was canceled.Cancellation allows the implementation to gracefully + // clean up before the execution is closed. // - // * TERMINATED – the - // execution was force terminated. + // * TERMINATED – the execution was force + // terminated. // - // * FAILED – the execution failed to - // complete. + // * FAILED – the execution failed to complete. // - // * TIMED_OUT – the execution did not complete in the alloted time - // and was automatically timed out. + // * TIMED_OUT – the + // execution did not complete in the alloted time and was automatically timed + // out. // - // * CONTINUED_AS_NEW – the execution is - // logically continued. This means the current execution was completed and a new - // execution was started to carry on the workflow. + // * CONTINUED_AS_NEW – the execution is logically continued. This means the + // current execution was completed and a new execution was started to carry on the + // workflow. CloseStatus CloseStatus // The time when the workflow execution was closed. Set only if the execution @@ -3135,17 +3126,16 @@ type WorkflowExecutionStartedEventAttributes struct { // is terminated, by calling the TerminateWorkflowExecution action explicitly or // due to an expired timeout. The supported child policies are: // - // * TERMINATE – - // The child executions are terminated. + // * TERMINATE – The + // child executions are terminated. // - // * REQUEST_CANCEL – A request to cancel - // is attempted for each child execution by recording a + // * REQUEST_CANCEL – A request to cancel is + // attempted for each child execution by recording a // WorkflowExecutionCancelRequested event in its history. It is up to the decider // to take appropriate actions when it receives an execution history with this // event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child executions continue to run. // // This member is required. ChildPolicy ChildPolicy @@ -3207,16 +3197,16 @@ type WorkflowExecutionTerminatedEventAttributes struct { // The policy used for the child workflow executions of this workflow execution. // The supported child policies are: // - // * TERMINATE – The child executions are + // * TERMINATE – The child executions are // terminated. // - // * REQUEST_CANCEL – A request to cancel is attempted for each - // child execution by recording a WorkflowExecutionCancelRequested event in its - // history. It is up to the decider to take appropriate actions when it receives an + // * REQUEST_CANCEL – A request to cancel is attempted for each child + // execution by recording a WorkflowExecutionCancelRequested event in its history. + // It is up to the decider to take appropriate actions when it receives an // execution history with this event. // - // * ABANDON – No action is taken. The - // child executions continue to run. + // * ABANDON – No action is taken. The child + // executions continue to run. // // This member is required. ChildPolicy ChildPolicy @@ -3239,16 +3229,16 @@ type WorkflowExecutionTimedOutEventAttributes struct { // The policy used for the child workflow executions of this workflow execution. // The supported child policies are: // - // * TERMINATE – The child executions are + // * TERMINATE – The child executions are // terminated. // - // * REQUEST_CANCEL – A request to cancel is attempted for each - // child execution by recording a WorkflowExecutionCancelRequested event in its - // history. It is up to the decider to take appropriate actions when it receives an + // * REQUEST_CANCEL – A request to cancel is attempted for each child + // execution by recording a WorkflowExecutionCancelRequested event in its history. + // It is up to the decider to take appropriate actions when it receives an // execution history with this event. // - // * ABANDON – No action is taken. The - // child executions continue to run. + // * ABANDON – No action is taken. The child + // executions continue to run. // // This member is required. ChildPolicy ChildPolicy @@ -3284,17 +3274,16 @@ type WorkflowTypeConfiguration struct { // when starting a workflow execution using the StartWorkflowExecution action or // the StartChildWorkflowExecutionDecision. The supported child policies are: // + // * + // TERMINATE – The child executions are terminated. // - // * TERMINATE – The child executions are terminated. - // - // * REQUEST_CANCEL – A - // request to cancel is attempted for each child execution by recording a + // * REQUEST_CANCEL – A request + // to cancel is attempted for each child execution by recording a // WorkflowExecutionCancelRequested event in its history. It is up to the decider // to take appropriate actions when it receives an execution history with this // event. // - // * ABANDON – No action is taken. The child executions continue to - // run. + // * ABANDON – No action is taken. The child executions continue to run. DefaultChildPolicy ChildPolicy // The default maximum duration, specified when registering the workflow type, for diff --git a/service/synthetics/api_op_CreateCanary.go b/service/synthetics/api_op_CreateCanary.go index 2da153f9758..2ca561dbfb9 100644 --- a/service/synthetics/api_op_CreateCanary.go +++ b/service/synthetics/api_op_CreateCanary.go @@ -62,22 +62,21 @@ type CreateCanaryInput struct { // exist, and must include lambda.amazonaws.com as a principal in the trust policy. // The role must also have the following permissions: // - // * s3:PutObject + // * s3:PutObject // - // * + // * // s3:GetBucketLocation // - // * s3:ListAllMyBuckets + // * s3:ListAllMyBuckets // - // * - // cloudwatch:PutMetricData + // * cloudwatch:PutMetricData // - // * logs:CreateLogGroup + // * + // logs:CreateLogGroup // - // * - // logs:CreateLogStream + // * logs:CreateLogStream // - // * logs:PutLogEvents + // * logs:PutLogEvents // // This member is required. ExecutionRoleArn *string diff --git a/service/synthetics/api_op_DeleteCanary.go b/service/synthetics/api_op_DeleteCanary.go index d476a9fb6fa..b49f46485a7 100644 --- a/service/synthetics/api_op_DeleteCanary.go +++ b/service/synthetics/api_op_DeleteCanary.go @@ -15,28 +15,27 @@ import ( // canary that you do not intend to use again, you should also delete the // following: // -// * The Lambda functions and layers used by this canary. These -// have the prefix cwsyn-MyCanaryName . +// * The Lambda functions and layers used by this canary. These have +// the prefix cwsyn-MyCanaryName . // -// * The CloudWatch alarms created for -// this canary. These alarms have a name of Synthetics-SharpDrop-Alarm-MyCanaryName -// . +// * The CloudWatch alarms created for this +// canary. These alarms have a name of Synthetics-SharpDrop-Alarm-MyCanaryName . // -// * Amazon S3 objects and buckets, such as the canary's artifact -// location. +// * +// Amazon S3 objects and buckets, such as the canary's artifact location. // -// * IAM roles created for the canary. If they were created in the -// console, these roles have the name -// role/service-role/CloudWatchSyntheticsRole-MyCanaryName . +// * IAM +// roles created for the canary. If they were created in the console, these roles +// have the name role/service-role/CloudWatchSyntheticsRole-MyCanaryName . // -// * CloudWatch Logs -// log groups created for the canary. These logs groups have the name -// /aws/lambda/cwsyn-MyCanaryName . +// * +// CloudWatch Logs log groups created for the canary. These logs groups have the +// name /aws/lambda/cwsyn-MyCanaryName . // -// Before you delete a canary, you might want to -// use GetCanary to display the information about this canary. Make note of the -// information returned by this operation so that you can delete these resources -// after you delete the canary. +// Before you delete a canary, you might +// want to use GetCanary to display the information about this canary. Make note of +// the information returned by this operation so that you can delete these +// resources after you delete the canary. func (c *Client) DeleteCanary(ctx context.Context, params *DeleteCanaryInput, optFns ...func(*Options)) (*DeleteCanaryOutput, error) { if params == nil { params = &DeleteCanaryInput{} diff --git a/service/synthetics/api_op_UpdateCanary.go b/service/synthetics/api_op_UpdateCanary.go index 473332dc8e7..4fd99262e1c 100644 --- a/service/synthetics/api_op_UpdateCanary.go +++ b/service/synthetics/api_op_UpdateCanary.go @@ -49,22 +49,21 @@ type UpdateCanaryInput struct { // exist, and must include lambda.amazonaws.com as a principal in the trust policy. // The role must also have the following permissions: // - // * s3:PutObject + // * s3:PutObject // - // * + // * // s3:GetBucketLocation // - // * s3:ListAllMyBuckets + // * s3:ListAllMyBuckets // - // * - // cloudwatch:PutMetricData + // * cloudwatch:PutMetricData // - // * logs:CreateLogGroup + // * + // logs:CreateLogGroup // - // * - // logs:CreateLogStream + // * logs:CreateLogStream // - // * logs:CreateLogStream + // * logs:CreateLogStream ExecutionRoleArn *string // The number of days to retain data about failed runs of this canary. diff --git a/service/synthetics/types/enums.go b/service/synthetics/types/enums.go index dfe615c5f2d..41cc4d17509 100644 --- a/service/synthetics/types/enums.go +++ b/service/synthetics/types/enums.go @@ -26,8 +26,8 @@ type CanaryRunStateReasonCode string // Enum values for CanaryRunStateReasonCode const ( - CanaryRunStateReasonCodeCanary_failure CanaryRunStateReasonCode = "CANARY_FAILURE" - CanaryRunStateReasonCodeExecution_failure CanaryRunStateReasonCode = "EXECUTION_FAILURE" + CanaryRunStateReasonCodeCanaryFailure CanaryRunStateReasonCode = "CANARY_FAILURE" + CanaryRunStateReasonCodeExecutionFailure CanaryRunStateReasonCode = "EXECUTION_FAILURE" ) // Values returns all known values for CanaryRunStateReasonCode. Note that this can @@ -76,7 +76,7 @@ type CanaryStateReasonCode string // Enum values for CanaryStateReasonCode const ( - CanaryStateReasonCodeInvalid_permissions CanaryStateReasonCode = "INVALID_PERMISSIONS" + CanaryStateReasonCodeInvalidPermissions CanaryStateReasonCode = "INVALID_PERMISSIONS" ) // Values returns all known values for CanaryStateReasonCode. Note that this can be diff --git a/service/textract/api_op_AnalyzeDocument.go b/service/textract/api_op_AnalyzeDocument.go index 77fd50647bb..2ffa45169e7 100644 --- a/service/textract/api_op_AnalyzeDocument.go +++ b/service/textract/api_op_AnalyzeDocument.go @@ -14,23 +14,23 @@ import ( // Analyzes an input document for relationships between detected items. The types // of information returned are as follows: // -// * Form data (key-value pairs). The +// * Form data (key-value pairs). The // related information is returned in two Block objects, each of type // KEY_VALUE_SET: a KEY Block object and a VALUE Block object. For example, Name: // Ana Silva Carolina contains a key and value. Name: is the key. Ana Silva // Carolina is the value. // -// * Table and table cell data. A TABLE Block object +// * Table and table cell data. A TABLE Block object // contains information about a detected table. A CELL Block object is returned for // each cell in a table. // -// * Lines and words of text. A LINE Block object -// contains one or more WORD Block objects. All lines and words that are detected -// in the document are returned (including text that doesn't have a relationship -// with the value of FeatureTypes). +// * Lines and words of text. A LINE Block object contains +// one or more WORD Block objects. All lines and words that are detected in the +// document are returned (including text that doesn't have a relationship with the +// value of FeatureTypes). // -// Selection elements such as check boxes and -// option buttons (radio buttons) can be detected in form data and in tables. A +// Selection elements such as check boxes and option +// buttons (radio buttons) can be detected in form data and in tables. A // SELECTION_ELEMENT Block object contains information about a selection element, // including the selection status. You can choose which type of analysis to perform // by specifying the FeatureTypes list. The output is returned in a list of Block diff --git a/service/textract/api_op_GetDocumentAnalysis.go b/service/textract/api_op_GetDocumentAnalysis.go index 0280c34cbd3..80a948ebb31 100644 --- a/service/textract/api_op_GetDocumentAnalysis.go +++ b/service/textract/api_op_GetDocumentAnalysis.go @@ -23,16 +23,16 @@ import ( // GetDocumentAnalysis returns an array of Block objects. The following types of // information are returned: // -// * Form data (key-value pairs). The related +// * Form data (key-value pairs). The related // information is returned in two Block objects, each of type KEY_VALUE_SET: a KEY // Block object and a VALUE Block object. For example, Name: Ana Silva Carolina // contains a key and value. Name: is the key. Ana Silva Carolina is the value. // -// -// * Table and table cell data. A TABLE Block object contains information about a +// * +// Table and table cell data. A TABLE Block object contains information about a // detected table. A CELL Block object is returned for each cell in a table. // -// * +// * // Lines and words of text. A LINE Block object contains one or more WORD Block // objects. All lines and words that are detected in the document are returned // (including text that doesn't have a relationship with the value of the diff --git a/service/textract/types/enums.go b/service/textract/types/enums.go index 01b60d4004b..783587b5f45 100644 --- a/service/textract/types/enums.go +++ b/service/textract/types/enums.go @@ -6,13 +6,13 @@ type BlockType string // Enum values for BlockType const ( - BlockTypeKey_value_set BlockType = "KEY_VALUE_SET" - BlockTypePage BlockType = "PAGE" - BlockTypeLine BlockType = "LINE" - BlockTypeWord BlockType = "WORD" - BlockTypeTable BlockType = "TABLE" - BlockTypeCell BlockType = "CELL" - BlockTypeSelection_element BlockType = "SELECTION_ELEMENT" + BlockTypeKeyValueSet BlockType = "KEY_VALUE_SET" + BlockTypePage BlockType = "PAGE" + BlockTypeLine BlockType = "LINE" + BlockTypeWord BlockType = "WORD" + BlockTypeTable BlockType = "TABLE" + BlockTypeCell BlockType = "CELL" + BlockTypeSelectionElement BlockType = "SELECTION_ELEMENT" ) // Values returns all known values for BlockType. Note that this can be expanded in @@ -34,8 +34,8 @@ type ContentClassifier string // Enum values for ContentClassifier const ( - ContentClassifierFree_of_personally_identifiable_information ContentClassifier = "FreeOfPersonallyIdentifiableInformation" - ContentClassifierFree_of_adult_content ContentClassifier = "FreeOfAdultContent" + ContentClassifierFreeOfPersonallyIdentifiableInformation ContentClassifier = "FreeOfPersonallyIdentifiableInformation" + ContentClassifierFreeOfAdultContent ContentClassifier = "FreeOfAdultContent" ) // Values returns all known values for ContentClassifier. Note that this can be @@ -88,10 +88,10 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusIn_progress JobStatus = "IN_PROGRESS" - JobStatusSucceeded JobStatus = "SUCCEEDED" - JobStatusFailed JobStatus = "FAILED" - JobStatusPartial_success JobStatus = "PARTIAL_SUCCESS" + JobStatusInProgress JobStatus = "IN_PROGRESS" + JobStatusSucceeded JobStatus = "SUCCEEDED" + JobStatusFailed JobStatus = "FAILED" + JobStatusPartialSuccess JobStatus = "PARTIAL_SUCCESS" ) // Values returns all known values for JobStatus. Note that this can be expanded in @@ -110,9 +110,9 @@ type RelationshipType string // Enum values for RelationshipType const ( - RelationshipTypeValue RelationshipType = "VALUE" - RelationshipTypeChild RelationshipType = "CHILD" - RelationshipTypeComplex_features RelationshipType = "COMPLEX_FEATURES" + RelationshipTypeValue RelationshipType = "VALUE" + RelationshipTypeChild RelationshipType = "CHILD" + RelationshipTypeComplexFeatures RelationshipType = "COMPLEX_FEATURES" ) // Values returns all known values for RelationshipType. Note that this can be @@ -130,8 +130,8 @@ type SelectionStatus string // Enum values for SelectionStatus const ( - SelectionStatusSelected SelectionStatus = "SELECTED" - SelectionStatusNot_selected SelectionStatus = "NOT_SELECTED" + SelectionStatusSelected SelectionStatus = "SELECTED" + SelectionStatusNotSelected SelectionStatus = "NOT_SELECTED" ) // Values returns all known values for SelectionStatus. Note that this can be diff --git a/service/textract/types/types.go b/service/textract/types/types.go index 059bc01440f..6cbdef90ff8 100644 --- a/service/textract/types/types.go +++ b/service/textract/types/types.go @@ -19,46 +19,45 @@ type Block struct { // The type of text item that's recognized. In operations for text detection, the // following types are returned: // - // * PAGE - Contains a list of the LINE Block + // * PAGE - Contains a list of the LINE Block // objects that are detected on a document page. // - // * WORD - A word detected on a + // * WORD - A word detected on a // document page. A word is one or more ISO basic Latin script characters that // aren't separated by spaces. // - // * LINE - A string of tab-delimited, contiguous + // * LINE - A string of tab-delimited, contiguous // words that are detected on a document page. // // In text analysis operations, the // following types are returned: // - // * PAGE - Contains a list of child Block - // objects that are detected on a document page. + // * PAGE - Contains a list of child Block objects + // that are detected on a document page. // - // * KEY_VALUE_SET - Stores the - // KEY and VALUE Block objects for linked text that's detected on a document page. - // Use the EntityType field to determine if a KEY_VALUE_SET object is a KEY Block - // object or a VALUE Block object. + // * KEY_VALUE_SET - Stores the KEY and + // VALUE Block objects for linked text that's detected on a document page. Use the + // EntityType field to determine if a KEY_VALUE_SET object is a KEY Block object or + // a VALUE Block object. // - // * WORD - A word that's detected on a - // document page. A word is one or more ISO basic Latin script characters that - // aren't separated by spaces. + // * WORD - A word that's detected on a document page. A + // word is one or more ISO basic Latin script characters that aren't separated by + // spaces. // - // * LINE - A string of tab-delimited, contiguous - // words that are detected on a document page. + // * LINE - A string of tab-delimited, contiguous words that are detected + // on a document page. // - // * TABLE - A table that's - // detected on a document page. A table is grid-based information with two or more - // rows or columns, with a cell span of one row and one column each. + // * TABLE - A table that's detected on a document page. A + // table is grid-based information with two or more rows or columns, with a cell + // span of one row and one column each. // - // * CELL - - // A cell within a detected table. The cell is the parent of the block that - // contains the text in the cell. + // * CELL - A cell within a detected table. + // The cell is the parent of the block that contains the text in the cell. // - // * SELECTION_ELEMENT - A selection element - // such as an option button (radio button) or a check box that's detected on a - // document page. Use the value of SelectionStatus to determine the status of the - // selection element. + // * + // SELECTION_ELEMENT - A selection element such as an option button (radio button) + // or a check box that's detected on a document page. Use the value of + // SelectionStatus to determine the status of the selection element. BlockType BlockType // The column in which a table cell appears. The first column position is 1. @@ -76,13 +75,13 @@ type Block struct { // The type of entity. The following can be returned: // - // * KEY - An identifier - // for a field on the document. + // * KEY - An identifier for a + // field on the document. // - // * VALUE - The field text. + // * VALUE - The field text. // - // EntityTypes isn't - // returned by DetectDocumentText and GetDocumentTextDetection. + // EntityTypes isn't returned by + // DetectDocumentText and GetDocumentTextDetection. EntityTypes []EntityType // The location of the recognized text on the image. It includes an axis-aligned, @@ -108,10 +107,10 @@ type Block struct { // when the current block has no child blocks. The list size can be the // following: // - // * 0 - The block has no child blocks. + // * 0 - The block has no child blocks. // - // * 1 - The block has - // child blocks. + // * 1 - The block has child + // blocks. Relationships []*Relationship // The row in which a table cell is located. The first row position is 1. RowIndex diff --git a/service/timestreamquery/api_op_DescribeEndpoints.go b/service/timestreamquery/api_op_DescribeEndpoints.go index 4337deedf4c..742cd408332 100644 --- a/service/timestreamquery/api_op_DescribeEndpoints.go +++ b/service/timestreamquery/api_op_DescribeEndpoints.go @@ -17,14 +17,14 @@ import ( // architecture, including the management and mapping of the service endpoints, it // is not recommended that you use this API unless: // -// * Your application uses a +// * Your application uses a // programming language that does not yet have SDK support // -// * You require -// better control over the client-side implementation +// * You require better +// control over the client-side implementation // -// For detailed information on -// how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs +// For detailed information on how to +// use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs // (https://docs.aws.amazon.com/timestream/latest/developerguide/Using-API.endpoint-discovery.html). func (c *Client) DescribeEndpoints(ctx context.Context, params *DescribeEndpointsInput, optFns ...func(*Options)) (*DescribeEndpointsOutput, error) { if params == nil { diff --git a/service/timestreamquery/api_op_Query.go b/service/timestreamquery/api_op_Query.go index 9f28f3f0733..f68c455afca 100644 --- a/service/timestreamquery/api_op_Query.go +++ b/service/timestreamquery/api_op_Query.go @@ -44,12 +44,12 @@ type QueryInput struct { // idempotent, meaning that multiple identical calls have the same effect as one // single call. Your query request will fail in the following cases: // - // * If you + // * If you // submit a request with the same client token outside the 5-minute idepotency // window. // - // * If you submit a request with the same client token but a change - // in other parameters within the 5-minute idempotency window. + // * If you submit a request with the same client token but a change in + // other parameters within the 5-minute idempotency window. // // After 4 hours, any // request with the same client token is treated as a new request. diff --git a/service/timestreamquery/types/enums.go b/service/timestreamquery/types/enums.go index 7cc0d2c2600..763041e72b4 100644 --- a/service/timestreamquery/types/enums.go +++ b/service/timestreamquery/types/enums.go @@ -6,17 +6,17 @@ type ScalarType string // Enum values for ScalarType const ( - ScalarTypeVarchar ScalarType = "VARCHAR" - ScalarTypeBoolean ScalarType = "BOOLEAN" - ScalarTypeBigint ScalarType = "BIGINT" - ScalarTypeDouble ScalarType = "DOUBLE" - ScalarTypeTimestamp ScalarType = "TIMESTAMP" - ScalarTypeDate ScalarType = "DATE" - ScalarTypeTime ScalarType = "TIME" - ScalarTypeInterval_day_to_second ScalarType = "INTERVAL_DAY_TO_SECOND" - ScalarTypeInterval_year_to_month ScalarType = "INTERVAL_YEAR_TO_MONTH" - ScalarTypeUnknown ScalarType = "UNKNOWN" - ScalarTypeInteger ScalarType = "INTEGER" + ScalarTypeVarchar ScalarType = "VARCHAR" + ScalarTypeBoolean ScalarType = "BOOLEAN" + ScalarTypeBigint ScalarType = "BIGINT" + ScalarTypeDouble ScalarType = "DOUBLE" + ScalarTypeTimestamp ScalarType = "TIMESTAMP" + ScalarTypeDate ScalarType = "DATE" + ScalarTypeTime ScalarType = "TIME" + ScalarTypeIntervalDayToSecond ScalarType = "INTERVAL_DAY_TO_SECOND" + ScalarTypeIntervalYearToMonth ScalarType = "INTERVAL_YEAR_TO_MONTH" + ScalarTypeUnknown ScalarType = "UNKNOWN" + ScalarTypeInteger ScalarType = "INTEGER" ) // Values returns all known values for ScalarType. Note that this can be expanded diff --git a/service/timestreamwrite/api_op_DescribeEndpoints.go b/service/timestreamwrite/api_op_DescribeEndpoints.go index 214bc0f80c7..f21dfc70b0a 100644 --- a/service/timestreamwrite/api_op_DescribeEndpoints.go +++ b/service/timestreamwrite/api_op_DescribeEndpoints.go @@ -17,14 +17,14 @@ import ( // architecture, including the management and mapping of the service endpoints, it // is not recommended that you use this API unless: // -// * Your application uses a +// * Your application uses a // programming language that does not yet have SDK support // -// * You require -// better control over the client-side implementation +// * You require better +// control over the client-side implementation // -// For detailed information on -// how to use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs +// For detailed information on how to +// use DescribeEndpoints, see The Endpoint Discovery Pattern and REST APIs // (https://docs.aws.amazon.com/timestream/latest/developerguide/Using-API.endpoint-discovery.html). func (c *Client) DescribeEndpoints(ctx context.Context, params *DescribeEndpointsInput, optFns ...func(*Options)) (*DescribeEndpointsOutput, error) { if params == nil { diff --git a/service/timestreamwrite/api_op_UpdateDatabase.go b/service/timestreamwrite/api_op_UpdateDatabase.go index b1e08c41fde..69fe1e6e745 100644 --- a/service/timestreamwrite/api_op_UpdateDatabase.go +++ b/service/timestreamwrite/api_op_UpdateDatabase.go @@ -42,16 +42,16 @@ type UpdateDatabaseInput struct { // is the same as the KmsKeyId in the request, there will not be any update. You // can specify the KmsKeyId using any of the following: // - // * Key ID: + // * Key ID: // 1234abcd-12ab-34cd-56ef-1234567890ab // - // * Key ARN: + // * Key ARN: // arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab // + // * + // Alias name: alias/ExampleAlias // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: + // * Alias ARN: // arn:aws:kms:us-east-1:111122223333:alias/ExampleAlias // // This member is required. diff --git a/service/timestreamwrite/types/errors.go b/service/timestreamwrite/types/errors.go index b3624ab170a..95cd07524d6 100644 --- a/service/timestreamwrite/types/errors.go +++ b/service/timestreamwrite/types/errors.go @@ -79,14 +79,14 @@ func (e *InvalidEndpointException) ErrorFault() smithy.ErrorFault { return smith // WriteRecords would throw this exception in the following cases: // -// * Records -// with duplicate data where there are multiple records with the same dimensions, +// * Records with +// duplicate data where there are multiple records with the same dimensions, // timestamps, and measure names but different measure values. // -// * Records with +// * Records with // timestamps that lie outside the retention duration of the memory store // -// * +// * // Records with dimensions or measures that exceed the Timestream defined // limits. // diff --git a/service/timestreamwrite/types/types.go b/service/timestreamwrite/types/types.go index cabc2b71bdc..a65fb3003b4 100644 --- a/service/timestreamwrite/types/types.go +++ b/service/timestreamwrite/types/types.go @@ -108,18 +108,18 @@ type RejectedRecord struct { // The reason why a record was not successfully inserted into Timestream. Possible // causes of failure include: // - // * Records with duplicate data where there are + // * Records with duplicate data where there are // multiple records with the same dimensions, timestamps, and measure names but // different measure values. // - // * Records with timestamps that lie outside the + // * Records with timestamps that lie outside the // retention duration of the memory store // - // * Records with dimensions or - // measures that exceed the Timestream defined limits. + // * Records with dimensions or measures + // that exceed the Timestream defined limits. // - // For more information, see - // Access Management + // For more information, see Access + // Management // (https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html) in // the Timestream Developer Guide. Reason *string @@ -169,10 +169,10 @@ type Table struct { // The current state of the table: // - // * DELETING - The table is being deleted. + // * DELETING - The table is being deleted. // - // - // * ACTIVE - The table is ready for use. + // * + // ACTIVE - The table is ready for use. TableStatus TableStatus } diff --git a/service/transcribe/api_op_StartMedicalTranscriptionJob.go b/service/transcribe/api_op_StartMedicalTranscriptionJob.go index 43e95bbbf1a..9bd2573cb54 100644 --- a/service/transcribe/api_op_StartMedicalTranscriptionJob.go +++ b/service/transcribe/api_op_StartMedicalTranscriptionJob.go @@ -95,26 +95,26 @@ type StartMedicalTranscriptionJobInput struct { // KMS key. You use either of the following to identify a KMS key in the current // account: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * KMS - // Key Alias: "alias/ExampleAlias" + // * KMS Key Alias: + // "alias/ExampleAlias" // - // You can use either of the following to identify - // a KMS key in the current account or another account: + // You can use either of the following to identify a KMS key + // in the current account or another account: // - // * Amazon Resource Name - // (ARN) of a KMS key in the current account or another account: - // "arn:aws:kms:region:account ID:key/1234abcd-12ab-34cd-56ef-1234567890ab" + // * Amazon Resource Name (ARN) of a + // KMS key in the current account or another account: "arn:aws:kms:region:account + // ID:key/1234abcd-12ab-34cd-56ef-1234567890ab" // - // * - // ARN of a KMS Key Alias: "arn:aws:kms:region:account ID:alias/ExampleAlias" + // * ARN of a KMS Key Alias: + // "arn:aws:kms:region:account ID:alias/ExampleAlias" // - // If - // you don't specify an encryption key, the output of the medical transcription job - // is encrypted with the default Amazon S3 key (SSE-S3). If you specify a KMS key - // to encrypt your output, you must also specify an output location in the - // OutputBucketName parameter. + // If you don't specify an + // encryption key, the output of the medical transcription job is encrypted with + // the default Amazon S3 key (SSE-S3). If you specify a KMS key to encrypt your + // output, you must also specify an output location in the OutputBucketName + // parameter. OutputEncryptionKMSKeyId *string // You can specify a location in an Amazon S3 bucket to store the output of your diff --git a/service/transcribe/api_op_StartTranscriptionJob.go b/service/transcribe/api_op_StartTranscriptionJob.go index 7dbe511ec54..56a299ba501 100644 --- a/service/transcribe/api_op_StartTranscriptionJob.go +++ b/service/transcribe/api_op_StartTranscriptionJob.go @@ -102,19 +102,19 @@ type StartTranscriptionJobInput struct { // key. You can use either of the following to identify a KMS key in the current // account: // - // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" + // * KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab" // - // * KMS - // Key Alias: "alias/ExampleAlias" + // * KMS Key Alias: + // "alias/ExampleAlias" // - // You can use either of the following to identify - // a KMS key in the current account or another account: + // You can use either of the following to identify a KMS key + // in the current account or another account: // - // * Amazon Resource Name - // (ARN) of a KMS Key: "arn:aws:kms:region:account + // * Amazon Resource Name (ARN) of a + // KMS Key: "arn:aws:kms:region:account // ID:key/1234abcd-12ab-34cd-56ef-1234567890ab" // - // * ARN of a KMS Key Alias: + // * ARN of a KMS Key Alias: // "arn:aws:kms:region:account ID:alias/ExampleAlias" // // If you don't specify an diff --git a/service/transcribe/types/enums.go b/service/transcribe/types/enums.go index c9c5cb6424e..7f67733e72f 100644 --- a/service/transcribe/types/enums.go +++ b/service/transcribe/types/enums.go @@ -6,8 +6,8 @@ type BaseModelName string // Enum values for BaseModelName const ( - BaseModelNameNarrow_band BaseModelName = "NarrowBand" - BaseModelNameWide_band BaseModelName = "WideBand" + BaseModelNameNarrowBand BaseModelName = "NarrowBand" + BaseModelNameWideBand BaseModelName = "WideBand" ) // Values returns all known values for BaseModelName. Note that this can be @@ -24,7 +24,7 @@ type CLMLanguageCode string // Enum values for CLMLanguageCode const ( - CLMLanguageCodeEn_us CLMLanguageCode = "en-US" + CLMLanguageCodeEnUs CLMLanguageCode = "en-US" ) // Values returns all known values for CLMLanguageCode. Note that this can be @@ -40,42 +40,42 @@ type LanguageCode string // Enum values for LanguageCode const ( - LanguageCodeAf_za LanguageCode = "af-ZA" - LanguageCodeAr_ae LanguageCode = "ar-AE" - LanguageCodeAr_sa LanguageCode = "ar-SA" - LanguageCodeCy_gb LanguageCode = "cy-GB" - LanguageCodeDa_dk LanguageCode = "da-DK" - LanguageCodeDe_ch LanguageCode = "de-CH" - LanguageCodeDe_de LanguageCode = "de-DE" - LanguageCodeEn_ab LanguageCode = "en-AB" - LanguageCodeEn_au LanguageCode = "en-AU" - LanguageCodeEn_gb LanguageCode = "en-GB" - LanguageCodeEn_ie LanguageCode = "en-IE" - LanguageCodeEn_in LanguageCode = "en-IN" - LanguageCodeEn_us LanguageCode = "en-US" - LanguageCodeEn_wl LanguageCode = "en-WL" - LanguageCodeEs_es LanguageCode = "es-ES" - LanguageCodeEs_us LanguageCode = "es-US" - LanguageCodeFa_ir LanguageCode = "fa-IR" - LanguageCodeFr_ca LanguageCode = "fr-CA" - LanguageCodeFr_fr LanguageCode = "fr-FR" - LanguageCodeGa_ie LanguageCode = "ga-IE" - LanguageCodeGd_gb LanguageCode = "gd-GB" - LanguageCodeHe_il LanguageCode = "he-IL" - LanguageCodeHi_in LanguageCode = "hi-IN" - LanguageCodeId_id LanguageCode = "id-ID" - LanguageCodeIt_it LanguageCode = "it-IT" - LanguageCodeJa_jp LanguageCode = "ja-JP" - LanguageCodeKo_kr LanguageCode = "ko-KR" - LanguageCodeMs_my LanguageCode = "ms-MY" - LanguageCodeNl_nl LanguageCode = "nl-NL" - LanguageCodePt_br LanguageCode = "pt-BR" - LanguageCodePt_pt LanguageCode = "pt-PT" - LanguageCodeRu_ru LanguageCode = "ru-RU" - LanguageCodeTa_in LanguageCode = "ta-IN" - LanguageCodeTe_in LanguageCode = "te-IN" - LanguageCodeTr_tr LanguageCode = "tr-TR" - LanguageCodeZh_cn LanguageCode = "zh-CN" + LanguageCodeAfZa LanguageCode = "af-ZA" + LanguageCodeArAe LanguageCode = "ar-AE" + LanguageCodeArSa LanguageCode = "ar-SA" + LanguageCodeCyGb LanguageCode = "cy-GB" + LanguageCodeDaDk LanguageCode = "da-DK" + LanguageCodeDeCh LanguageCode = "de-CH" + LanguageCodeDeDe LanguageCode = "de-DE" + LanguageCodeEnAb LanguageCode = "en-AB" + LanguageCodeEnAu LanguageCode = "en-AU" + LanguageCodeEnGb LanguageCode = "en-GB" + LanguageCodeEnIe LanguageCode = "en-IE" + LanguageCodeEnIn LanguageCode = "en-IN" + LanguageCodeEnUs LanguageCode = "en-US" + LanguageCodeEnWl LanguageCode = "en-WL" + LanguageCodeEsEs LanguageCode = "es-ES" + LanguageCodeEsUs LanguageCode = "es-US" + LanguageCodeFaIr LanguageCode = "fa-IR" + LanguageCodeFrCa LanguageCode = "fr-CA" + LanguageCodeFrFr LanguageCode = "fr-FR" + LanguageCodeGaIe LanguageCode = "ga-IE" + LanguageCodeGdGb LanguageCode = "gd-GB" + LanguageCodeHeIl LanguageCode = "he-IL" + LanguageCodeHiIn LanguageCode = "hi-IN" + LanguageCodeIdId LanguageCode = "id-ID" + LanguageCodeItIt LanguageCode = "it-IT" + LanguageCodeJaJp LanguageCode = "ja-JP" + LanguageCodeKoKr LanguageCode = "ko-KR" + LanguageCodeMsMy LanguageCode = "ms-MY" + LanguageCodeNlNl LanguageCode = "nl-NL" + LanguageCodePtBr LanguageCode = "pt-BR" + LanguageCodePtPt LanguageCode = "pt-PT" + LanguageCodeRuRu LanguageCode = "ru-RU" + LanguageCodeTaIn LanguageCode = "ta-IN" + LanguageCodeTeIn LanguageCode = "te-IN" + LanguageCodeTrTr LanguageCode = "tr-TR" + LanguageCodeZhCn LanguageCode = "zh-CN" ) // Values returns all known values for LanguageCode. Note that this can be expanded @@ -154,9 +154,9 @@ type ModelStatus string // Enum values for ModelStatus const ( - ModelStatusIn_progress ModelStatus = "IN_PROGRESS" - ModelStatusFailed ModelStatus = "FAILED" - ModelStatusCompleted ModelStatus = "COMPLETED" + ModelStatusInProgress ModelStatus = "IN_PROGRESS" + ModelStatusFailed ModelStatus = "FAILED" + ModelStatusCompleted ModelStatus = "COMPLETED" ) // Values returns all known values for ModelStatus. Note that this can be expanded @@ -174,8 +174,8 @@ type OutputLocationType string // Enum values for OutputLocationType const ( - OutputLocationTypeCustomer_bucket OutputLocationType = "CUSTOMER_BUCKET" - OutputLocationTypeService_bucket OutputLocationType = "SERVICE_BUCKET" + OutputLocationTypeCustomerBucket OutputLocationType = "CUSTOMER_BUCKET" + OutputLocationTypeServiceBucket OutputLocationType = "SERVICE_BUCKET" ) // Values returns all known values for OutputLocationType. Note that this can be @@ -192,8 +192,8 @@ type RedactionOutput string // Enum values for RedactionOutput const ( - RedactionOutputRedacted RedactionOutput = "redacted" - RedactionOutputRedacted_and_unredacted RedactionOutput = "redacted_and_unredacted" + RedactionOutputRedacted RedactionOutput = "redacted" + RedactionOutputRedactedAndUnredacted RedactionOutput = "redacted_and_unredacted" ) // Values returns all known values for RedactionOutput. Note that this can be @@ -242,10 +242,10 @@ type TranscriptionJobStatus string // Enum values for TranscriptionJobStatus const ( - TranscriptionJobStatusQueued TranscriptionJobStatus = "QUEUED" - TranscriptionJobStatusIn_progress TranscriptionJobStatus = "IN_PROGRESS" - TranscriptionJobStatusFailed TranscriptionJobStatus = "FAILED" - TranscriptionJobStatusCompleted TranscriptionJobStatus = "COMPLETED" + TranscriptionJobStatusQueued TranscriptionJobStatus = "QUEUED" + TranscriptionJobStatusInProgress TranscriptionJobStatus = "IN_PROGRESS" + TranscriptionJobStatusFailed TranscriptionJobStatus = "FAILED" + TranscriptionJobStatusCompleted TranscriptionJobStatus = "COMPLETED" ) // Values returns all known values for TranscriptionJobStatus. Note that this can diff --git a/service/transcribe/types/types.go b/service/transcribe/types/types.go index b104b0bde0f..63b0d9b2edb 100644 --- a/service/transcribe/types/types.go +++ b/service/transcribe/types/types.go @@ -136,34 +136,34 @@ type MedicalTranscriptionJob struct { // about why the job failed. The FailureReason field contains one of the following // values: // - // * Unsupported media format- The media format specified in the + // * Unsupported media format- The media format specified in the // MediaFormat field of the request isn't valid. See the description of the // MediaFormat field for a list of valid values. // - // * The media format provided - // does not match the detected media format- The media format of the audio file - // doesn't match the format specified in the MediaFormat field in the request. - // Check the media format of your media file and make sure the two values match. + // * The media format provided does + // not match the detected media format- The media format of the audio file doesn't + // match the format specified in the MediaFormat field in the request. Check the + // media format of your media file and make sure the two values match. // - // - // * Invalid sample rate for audio file- The sample rate specified in the + // * Invalid + // sample rate for audio file- The sample rate specified in the // MediaSampleRateHertz of the request isn't valid. The sample rate must be between // 8000 and 48000 Hertz. // - // * The sample rate provided does not match the - // detected sample rate- The sample rate in the audio file doesn't match the sample - // rate specified in the MediaSampleRateHertz field in the request. Check the - // sample rate of your media file and make sure that the two values match. + // * The sample rate provided does not match the detected + // sample rate- The sample rate in the audio file doesn't match the sample rate + // specified in the MediaSampleRateHertz field in the request. Check the sample + // rate of your media file and make sure that the two values match. // - // * - // Invalid file size: file size too large- The size of your audio file is larger - // than what Amazon Transcribe Medical can process. For more information, see - // Guidelines and Quotas + // * Invalid file + // size: file size too large- The size of your audio file is larger than what + // Amazon Transcribe Medical can process. For more information, see Guidelines and + // Quotas // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits) // in the Amazon Transcribe Medical Guide // - // * Invalid number of channels: number - // of channels too large- Your audio contains more channels than Amazon Transcribe + // * Invalid number of channels: number of + // channels too large- Your audio contains more channels than Amazon Transcribe // Medical is configured to process. To request additional channels, see Amazon // Transcribe Medical Endpoints and Quotas // (https://docs.aws.amazon.com/general/latest/gr/transcribe-medical.html) in the @@ -200,7 +200,7 @@ type MedicalTranscriptionJob struct { // specialty enables you to generate transcriptions for the following medical // fields: // - // * Family Medicine + // * Family Medicine Specialty Specialty // A timestamp that shows when the job started processing. @@ -389,35 +389,35 @@ type TranscriptionJob struct { // about why the job failed. The FailureReason field can contain one of the // following values: // - // * Unsupported media format - The media format specified - // in the MediaFormat field of the request isn't valid. See the description of the + // * Unsupported media format - The media format specified in + // the MediaFormat field of the request isn't valid. See the description of the // MediaFormat field for a list of valid values. // - // * The media format provided - // does not match the detected media format - The media format of the audio file - // doesn't match the format specified in the MediaFormat field in the request. - // Check the media format of your media file and make sure that the two values - // match. - // - // * Invalid sample rate for audio file - The sample rate specified in - // the MediaSampleRateHertz of the request isn't valid. The sample rate must be - // between 8000 and 48000 Hertz. + // * The media format provided does + // not match the detected media format - The media format of the audio file doesn't + // match the format specified in the MediaFormat field in the request. Check the + // media format of your media file and make sure that the two values match. // - // * The sample rate provided does not match the - // detected sample rate - The sample rate in the audio file doesn't match the - // sample rate specified in the MediaSampleRateHertz field in the request. Check - // the sample rate of your media file and make sure that the two values match. + // * + // Invalid sample rate for audio file - The sample rate specified in the + // MediaSampleRateHertz of the request isn't valid. The sample rate must be between + // 8000 and 48000 Hertz. // + // * The sample rate provided does not match the detected + // sample rate - The sample rate in the audio file doesn't match the sample rate + // specified in the MediaSampleRateHertz field in the request. Check the sample + // rate of your media file and make sure that the two values match. // - // * Invalid file size: file size too large - The size of your audio file is larger - // than Amazon Transcribe can process. For more information, see Limits + // * Invalid file + // size: file size too large - The size of your audio file is larger than Amazon + // Transcribe can process. For more information, see Limits // (https://docs.aws.amazon.com/transcribe/latest/dg/limits-guidelines.html#limits) // in the Amazon Transcribe Developer Guide. // - // * Invalid number of channels: - // number of channels too large - Your audio contains more channels than Amazon - // Transcribe is configured to process. To request additional channels, see Amazon - // Transcribe Limits + // * Invalid number of channels: number + // of channels too large - Your audio contains more channels than Amazon Transcribe + // is configured to process. To request additional channels, see Amazon Transcribe + // Limits // (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits-amazon-transcribe) // in the Amazon Web Services General Reference. FailureReason *string diff --git a/service/transfer/api_op_CreateServer.go b/service/transfer/api_op_CreateServer.go index 47e0eb905bf..5547aeb506c 100644 --- a/service/transfer/api_op_CreateServer.go +++ b/service/transfer/api_op_CreateServer.go @@ -45,22 +45,22 @@ type CreateServerInput struct { // in the AWS Certificate Manager User Guide. Certificates with the following // cryptographic algorithms and key sizes are supported: // - // * 2048-bit RSA + // * 2048-bit RSA // (RSA_2048) // - // * 4096-bit RSA (RSA_4096) + // * 4096-bit RSA (RSA_4096) // - // * Elliptic Prime Curve 256 bit + // * Elliptic Prime Curve 256 bit // (EC_prime256v1) // - // * Elliptic Prime Curve 384 bit (EC_secp384r1) + // * Elliptic Prime Curve 384 bit (EC_secp384r1) // - // * - // Elliptic Prime Curve 521 bit (EC_secp521r1) + // * Elliptic Prime + // Curve 521 bit (EC_secp521r1) // - // The certificate must be a valid - // SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and - // information about the issuer. + // The certificate must be a valid SSL/TLS X.509 + // version 3 certificate with FQDN or IP address specified and information about + // the issuer. Certificate *string // The virtual private cloud (VPC) endpoint settings that are configured for your @@ -111,16 +111,16 @@ type CreateServerInput struct { // protocol client can connect to your server's endpoint. The available protocols // are: // - // * SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over + // * SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over // SSH // - // * FTPS (File Transfer Protocol Secure): File transfer with TLS + // * FTPS (File Transfer Protocol Secure): File transfer with TLS // encryption // - // * FTP (File Transfer Protocol): Unencrypted file transfer + // * FTP (File Transfer Protocol): Unencrypted file transfer // - // If - // you select FTPS, you must choose a certificate stored in AWS Certificate Manager + // If you + // select FTPS, you must choose a certificate stored in AWS Certificate Manager // (ACM) which will be used to identify your server when clients connect to it over // FTPS. If Protocol includes either FTP or FTPS, then the EndpointType must be VPC // and the IdentityProviderType must be API_GATEWAY. If Protocol includes FTP, then diff --git a/service/transfer/api_op_TestIdentityProvider.go b/service/transfer/api_op_TestIdentityProvider.go index 3b03dd39db5..0e19bb422e2 100644 --- a/service/transfer/api_op_TestIdentityProvider.go +++ b/service/transfer/api_op_TestIdentityProvider.go @@ -47,13 +47,13 @@ type TestIdentityProviderInput struct { // The type of file transfer protocol to be tested. The available protocols are: // + // * + // Secure Shell (SSH) File Transfer Protocol (SFTP) // - // * Secure Shell (SSH) File Transfer Protocol (SFTP) - // - // * File Transfer Protocol + // * File Transfer Protocol // Secure (FTPS) // - // * File Transfer Protocol (FTP) + // * File Transfer Protocol (FTP) ServerProtocol types.Protocol // The source IP address of the user account to be tested. diff --git a/service/transfer/api_op_UpdateServer.go b/service/transfer/api_op_UpdateServer.go index ec09353a79d..0b6fd3cf470 100644 --- a/service/transfer/api_op_UpdateServer.go +++ b/service/transfer/api_op_UpdateServer.go @@ -50,22 +50,22 @@ type UpdateServerInput struct { // in the AWS Certificate Manager User Guide. Certificates with the following // cryptographic algorithms and key sizes are supported: // - // * 2048-bit RSA + // * 2048-bit RSA // (RSA_2048) // - // * 4096-bit RSA (RSA_4096) + // * 4096-bit RSA (RSA_4096) // - // * Elliptic Prime Curve 256 bit + // * Elliptic Prime Curve 256 bit // (EC_prime256v1) // - // * Elliptic Prime Curve 384 bit (EC_secp384r1) + // * Elliptic Prime Curve 384 bit (EC_secp384r1) // - // * - // Elliptic Prime Curve 521 bit (EC_secp521r1) + // * Elliptic Prime + // Curve 521 bit (EC_secp521r1) // - // The certificate must be a valid - // SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and - // information about the issuer. + // The certificate must be a valid SSL/TLS X.509 + // version 3 certificate with FQDN or IP address specified and information about + // the issuer. Certificate *string // The virtual private cloud (VPC) endpoint settings that are configured for your @@ -105,16 +105,16 @@ type UpdateServerInput struct { // protocol client can connect to your server's endpoint. The available protocols // are: // - // * Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over + // * Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over // SSH // - // * File Transfer Protocol Secure (FTPS): File transfer with TLS + // * File Transfer Protocol Secure (FTPS): File transfer with TLS // encryption // - // * File Transfer Protocol (FTP): Unencrypted file transfer + // * File Transfer Protocol (FTP): Unencrypted file transfer // - // If - // you select FTPS, you must choose a certificate stored in AWS Certificate Manager + // If you + // select FTPS, you must choose a certificate stored in AWS Certificate Manager // (ACM) which will be used to identify your server when clients connect to it over // FTPS. If Protocol includes either FTP or FTPS, then the EndpointType must be VPC // and the IdentityProviderType must be API_GATEWAY. If Protocol includes FTP, then diff --git a/service/transfer/types/enums.go b/service/transfer/types/enums.go index cb1164cfb4a..fcfbe991b77 100644 --- a/service/transfer/types/enums.go +++ b/service/transfer/types/enums.go @@ -6,9 +6,9 @@ type EndpointType string // Enum values for EndpointType const ( - EndpointTypePublic EndpointType = "PUBLIC" - EndpointTypeVpc EndpointType = "VPC" - EndpointTypeVpc_endpoint EndpointType = "VPC_ENDPOINT" + EndpointTypePublic EndpointType = "PUBLIC" + EndpointTypeVpc EndpointType = "VPC" + EndpointTypeVpcEndpoint EndpointType = "VPC_ENDPOINT" ) // Values returns all known values for EndpointType. Note that this can be expanded @@ -44,8 +44,8 @@ type IdentityProviderType string // Enum values for IdentityProviderType const ( - IdentityProviderTypeService_managed IdentityProviderType = "SERVICE_MANAGED" - IdentityProviderTypeApi_gateway IdentityProviderType = "API_GATEWAY" + IdentityProviderTypeServiceManaged IdentityProviderType = "SERVICE_MANAGED" + IdentityProviderTypeApiGateway IdentityProviderType = "API_GATEWAY" ) // Values returns all known values for IdentityProviderType. Note that this can be @@ -82,12 +82,12 @@ type State string // Enum values for State const ( - StateOffline State = "OFFLINE" - StateOnline State = "ONLINE" - StateStarting State = "STARTING" - StateStopping State = "STOPPING" - StateStart_failed State = "START_FAILED" - StateStop_failed State = "STOP_FAILED" + StateOffline State = "OFFLINE" + StateOnline State = "ONLINE" + StateStarting State = "STARTING" + StateStopping State = "STOPPING" + StateStartFailed State = "START_FAILED" + StateStopFailed State = "STOP_FAILED" ) // Values returns all known values for State. Note that this can be expanded in the diff --git a/service/transfer/types/types.go b/service/transfer/types/types.go index bcbddb6da87..f9084b292a9 100644 --- a/service/transfer/types/types.go +++ b/service/transfer/types/types.go @@ -84,13 +84,13 @@ type DescribedServer struct { // protocol client can connect to your server's endpoint. The available protocols // are: // - // * SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over + // * SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over // SSH // - // * FTPS (File Transfer Protocol Secure): File transfer with TLS + // * FTPS (File Transfer Protocol Secure): File transfer with TLS // encryption // - // * FTP (File Transfer Protocol): Unencrypted file transfer + // * FTP (File Transfer Protocol): Unencrypted file transfer Protocols []Protocol // Specifies the name of the security policy that is attached to the server. diff --git a/service/translate/api_op_StartTextTranslationJob.go b/service/translate/api_op_StartTextTranslationJob.go index 5d08b54d6ed..b5e20734587 100644 --- a/service/translate/api_op_StartTextTranslationJob.go +++ b/service/translate/api_op_StartTextTranslationJob.go @@ -88,27 +88,26 @@ type StartTextTranslationJobOutput struct { // The status of the job. Possible values include: // - // * SUBMITTED - The job has - // been received and is queued for processing. + // * SUBMITTED - The job has been + // received and is queued for processing. // - // * IN_PROGRESS - Amazon - // Translate is processing the job. + // * IN_PROGRESS - Amazon Translate is + // processing the job. // - // * COMPLETED - The job was successfully - // completed and the output is available. + // * COMPLETED - The job was successfully completed and the + // output is available. // - // * COMPLETED_WITH_ERROR - The job was - // completed with errors. The errors can be analyzed in the job's output. + // * COMPLETED_WITH_ERROR - The job was completed with + // errors. The errors can be analyzed in the job's output. // - // * - // FAILED - The job did not complete. To get details, use the - // DescribeTextTranslationJob operation. + // * FAILED - The job did + // not complete. To get details, use the DescribeTextTranslationJob operation. // - // * STOP_REQUESTED - The user who - // started the job has requested that it be stopped. + // * + // STOP_REQUESTED - The user who started the job has requested that it be + // stopped. // - // * STOPPED - The job has - // been stopped. + // * STOPPED - The job has been stopped. JobStatus types.JobStatus // Metadata pertaining to the operation's result. diff --git a/service/translate/types/enums.go b/service/translate/types/enums.go index 296db9961d6..b1e4c628602 100644 --- a/service/translate/types/enums.go +++ b/service/translate/types/enums.go @@ -22,13 +22,13 @@ type JobStatus string // Enum values for JobStatus const ( - JobStatusSubmitted JobStatus = "SUBMITTED" - JobStatusIn_progress JobStatus = "IN_PROGRESS" - JobStatusCompleted JobStatus = "COMPLETED" - JobStatusCompleted_with_error JobStatus = "COMPLETED_WITH_ERROR" - JobStatusFailed JobStatus = "FAILED" - JobStatusStop_requested JobStatus = "STOP_REQUESTED" - JobStatusStopped JobStatus = "STOPPED" + JobStatusSubmitted JobStatus = "SUBMITTED" + JobStatusInProgress JobStatus = "IN_PROGRESS" + JobStatusCompleted JobStatus = "COMPLETED" + JobStatusCompletedWithError JobStatus = "COMPLETED_WITH_ERROR" + JobStatusFailed JobStatus = "FAILED" + JobStatusStopRequested JobStatus = "STOP_REQUESTED" + JobStatusStopped JobStatus = "STOPPED" ) // Values returns all known values for JobStatus. Note that this can be expanded in diff --git a/service/translate/types/types.go b/service/translate/types/types.go index c8af1bb28aa..6c21fa169d3 100644 --- a/service/translate/types/types.go +++ b/service/translate/types/types.go @@ -47,22 +47,22 @@ type InputDataConfig struct { // You can specify one of the following multipurpose internet mail extension (MIME) // types: // - // * text/html: The input data consists of one or more HTML files. - // Amazon Translate translates only the text that resides in the html element in - // each file. + // * text/html: The input data consists of one or more HTML files. Amazon + // Translate translates only the text that resides in the html element in each + // file. // - // * text/plain: The input data consists of one or more unformatted - // text files. Amazon Translate translates every character in this type of input. + // * text/plain: The input data consists of one or more unformatted text + // files. Amazon Translate translates every character in this type of input. // - // - // * application/vnd.openxmlformats-officedocument.wordprocessingml.document: The + // * + // application/vnd.openxmlformats-officedocument.wordprocessingml.document: The // input data consists of one or more Word documents (.docx). // - // * + // * // application/vnd.openxmlformats-officedocument.presentationml.presentation: The // input data consists of one or more PowerPoint Presentation files (.pptx). // - // * + // * // application/vnd.openxmlformats-officedocument.spreadsheetml.sheet: The input // data consists of one or more Excel Workbook files (.xlsx). // diff --git a/service/waf/api_op_CreateByteMatchSet.go b/service/waf/api_op_CreateByteMatchSet.go index 67298bf4f4b..bfd8b75f520 100644 --- a/service/waf/api_op_CreateByteMatchSet.go +++ b/service/waf/api_op_CreateByteMatchSet.go @@ -24,17 +24,17 @@ import ( // string BadBot. You can then configure AWS WAF to reject those requests. To // create and configure a ByteMatchSet, perform the following steps: // -// * Use +// * Use // GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a CreateByteMatchSet request. // -// * Submit a CreateByteMatchSet +// * Submit a CreateByteMatchSet // request. // -// * Use GetChangeToken to get the change token that you provide in -// the ChangeToken parameter of an UpdateByteMatchSet request. +// * Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateByteMatchSet request. // -// * Submit an +// * Submit an // UpdateByteMatchSet request to specify the part of the request that you want AWS // WAF to inspect (for example, the header or the URI) and the value that you want // AWS WAF to watch for. diff --git a/service/waf/api_op_CreateGeoMatchSet.go b/service/waf/api_op_CreateGeoMatchSet.go index b03fd84794d..3c544159266 100644 --- a/service/waf/api_op_CreateGeoMatchSet.go +++ b/service/waf/api_op_CreateGeoMatchSet.go @@ -24,17 +24,17 @@ import ( // contains those countries and then configure AWS WAF to block the requests. To // create and configure a GeoMatchSet, perform the following steps: // -// * Use +// * Use // GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a CreateGeoMatchSet request. // -// * Submit a CreateGeoMatchSet +// * Submit a CreateGeoMatchSet // request. // -// * Use GetChangeToken to get the change token that you provide in -// the ChangeToken parameter of an UpdateGeoMatchSet request. +// * Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateGeoMatchSet request. // -// * Submit an +// * Submit an // UpdateGeoMatchSetSet request to specify the countries that you want AWS WAF to // watch for. // diff --git a/service/waf/api_op_CreateIPSet.go b/service/waf/api_op_CreateIPSet.go index 9451760b588..1e1a4b0c25c 100644 --- a/service/waf/api_op_CreateIPSet.go +++ b/service/waf/api_op_CreateIPSet.go @@ -25,21 +25,21 @@ import ( // then configure AWS WAF to block the requests. To create and configure an IPSet, // perform the following steps: // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateIPSet request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a CreateIPSet request. // -// * -// Submit a CreateIPSet request. +// * Submit a +// CreateIPSet request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateIPSet request. +// * Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of an UpdateIPSet request. // -// * -// Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF -// to watch for. +// * Submit an +// UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch +// for. // -// For more information about how to use the AWS WAF API to allow or -// block HTTP requests, see the AWS WAF Developer Guide +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateIPSet(ctx context.Context, params *CreateIPSetInput, optFns ...func(*Options)) (*CreateIPSetOutput, error) { if params == nil { diff --git a/service/waf/api_op_CreateRateBasedRule.go b/service/waf/api_op_CreateRateBasedRule.go index e1d517b7da4..ea6121fc819 100644 --- a/service/waf/api_op_CreateRateBasedRule.go +++ b/service/waf/api_op_CreateRateBasedRule.go @@ -26,60 +26,59 @@ import ( // exceed the RateLimit, but it also must match all the conditions to be counted or // blocked. For example, suppose you add the following to a RateBasedRule: // -// * -// An IPSet that matches the IP address 192.0.2.44/32 +// * An +// IPSet that matches the IP address 192.0.2.44/32 // -// * A ByteMatchSet that -// matches BadBot in the User-Agent header +// * A ByteMatchSet that matches +// BadBot in the User-Agent header // -// Further, you specify a RateLimit of -// 1,000. You then add the RateBasedRule to a WebACL and specify that you want to -// block requests that meet the conditions in the rule. For a request to be -// blocked, it must come from the IP address 192.0.2.44 and the User-Agent header -// in the request must contain the value BadBot. Further, requests that match these -// two conditions must be received at a rate of more than 1,000 requests every five +// Further, you specify a RateLimit of 1,000. You +// then add the RateBasedRule to a WebACL and specify that you want to block +// requests that meet the conditions in the rule. For a request to be blocked, it +// must come from the IP address 192.0.2.44 and the User-Agent header in the +// request must contain the value BadBot. Further, requests that match these two +// conditions must be received at a rate of more than 1,000 requests every five // minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the // requests. If the rate drops below 1,000 for a five-minute period, AWS WAF no // longer blocks the requests. As a second example, suppose you want to limit // requests to a particular page on your site. To do this, you could add the // following to a RateBasedRule: // -// * A ByteMatchSet with FieldToMatch of URI +// * A ByteMatchSet with FieldToMatch of URI // +// * A +// PositionalConstraint of STARTS_WITH // -// * A PositionalConstraint of STARTS_WITH +// * A TargetString of login // -// * A TargetString of login -// -// Further, -// you specify a RateLimit of 1,000. By adding this RateBasedRule to a WebACL, you +// Further, you +// specify a RateLimit of 1,000. By adding this RateBasedRule to a WebACL, you // could limit requests to your login page without affecting the rest of your site. // To create and configure a RateBasedRule, perform the following steps: // -// * -// Create and update the predicates that you want to include in the rule. For more +// * Create +// and update the predicates that you want to include in the rule. For more // information, see CreateByteMatchSet, CreateIPSet, and // CreateSqlInjectionMatchSet. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateRule request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a CreateRule request. // -// * -// Submit a CreateRateBasedRule request. +// * Submit a +// CreateRateBasedRule request. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an UpdateRule -// request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of an UpdateRule request. // -// * Submit an UpdateRateBasedRule request to specify the predicates -// that you want to include in the rule. +// * Submit an +// UpdateRateBasedRule request to specify the predicates that you want to include +// in the rule. // -// * Create and update a WebACL that -// contains the RateBasedRule. For more information, see CreateWebACL. +// * Create and update a WebACL that contains the RateBasedRule. For +// more information, see CreateWebACL. // -// For more -// information about how to use the AWS WAF API to allow or block HTTP requests, -// see the AWS WAF Developer Guide +// For more information about how to use the +// AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRateBasedRule(ctx context.Context, params *CreateRateBasedRuleInput, optFns ...func(*Options)) (*CreateRateBasedRuleOutput, error) { if params == nil { diff --git a/service/waf/api_op_CreateRegexMatchSet.go b/service/waf/api_op_CreateRegexMatchSet.go index 62f00714383..29279aeb807 100644 --- a/service/waf/api_op_CreateRegexMatchSet.go +++ b/service/waf/api_op_CreateRegexMatchSet.go @@ -25,24 +25,24 @@ import ( // can then configure AWS WAF to reject those requests. To create and configure a // RegexMatchSet, perform the following steps: // -// * Use GetChangeToken to get the +// * Use GetChangeToken to get the // change token that you provide in the ChangeToken parameter of a // CreateRegexMatchSet request. // -// * Submit a CreateRegexMatchSet request. +// * Submit a CreateRegexMatchSet request. // -// * -// Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of an UpdateRegexMatchSet request. // -// * Submit an -// UpdateRegexMatchSet request to specify the part of the request that you want AWS -// WAF to inspect (for example, the header or the URI) and the value, using a -// RegexPatternSet, that you want AWS WAF to watch for. +// * Submit an UpdateRegexMatchSet +// request to specify the part of the request that you want AWS WAF to inspect (for +// example, the header or the URI) and the value, using a RegexPatternSet, that you +// want AWS WAF to watch for. // -// For more information about -// how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF -// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to use the AWS WAF +// API to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRegexMatchSet(ctx context.Context, params *CreateRegexMatchSetInput, optFns ...func(*Options)) (*CreateRegexMatchSetOutput, error) { if params == nil { params = &CreateRegexMatchSetInput{} diff --git a/service/waf/api_op_CreateRegexPatternSet.go b/service/waf/api_op_CreateRegexPatternSet.go index c3480c9982f..4d226f32adf 100644 --- a/service/waf/api_op_CreateRegexPatternSet.go +++ b/service/waf/api_op_CreateRegexPatternSet.go @@ -23,21 +23,21 @@ import ( // requests. To create and configure a RegexPatternSet, perform the following // steps: // -// * Use GetChangeToken to get the change token that you provide in the +// * Use GetChangeToken to get the change token that you provide in the // ChangeToken parameter of a CreateRegexPatternSet request. // -// * Submit a +// * Submit a // CreateRegexPatternSet request. // -// * Use GetChangeToken to get the change token +// * Use GetChangeToken to get the change token // that you provide in the ChangeToken parameter of an UpdateRegexPatternSet // request. // -// * Submit an UpdateRegexPatternSet request to specify the string -// that you want AWS WAF to watch for. +// * Submit an UpdateRegexPatternSet request to specify the string that +// you want AWS WAF to watch for. // -// For more information about how to use the -// AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For more information about how to use the AWS +// WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRegexPatternSet(ctx context.Context, params *CreateRegexPatternSetInput, optFns ...func(*Options)) (*CreateRegexPatternSetOutput, error) { if params == nil { diff --git a/service/waf/api_op_CreateRule.go b/service/waf/api_op_CreateRule.go index fa33cfee06b..d516180c8d0 100644 --- a/service/waf/api_op_CreateRule.go +++ b/service/waf/api_op_CreateRule.go @@ -23,40 +23,40 @@ import ( // specifications to be allowed or blocked. For example, suppose that you add the // following to a Rule: // -// * An IPSet that matches the IP address 192.0.2.44/32 +// * An IPSet that matches the IP address 192.0.2.44/32 // +// * A +// ByteMatchSet that matches BadBot in the User-Agent header // -// * A ByteMatchSet that matches BadBot in the User-Agent header +// You then add the Rule +// to a WebACL and specify that you want to blocks requests that satisfy the Rule. +// For a request to be blocked, it must come from the IP address 192.0.2.44 and the +// User-Agent header in the request must contain the value BadBot. To create and +// configure a Rule, perform the following steps: // -// You then add the -// Rule to a WebACL and specify that you want to blocks requests that satisfy the -// Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 -// and the User-Agent header in the request must contain the value BadBot. To -// create and configure a Rule, perform the following steps: +// * Create and update the +// predicates that you want to include in the Rule. For more information, see +// CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. // -// * Create and -// update the predicates that you want to include in the Rule. For more -// information, see CreateByteMatchSet, CreateIPSet, and -// CreateSqlInjectionMatchSet. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateRule request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateRule request. +// * Submit a CreateRule request. // -// * -// Submit a CreateRule request. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRule request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateRule request. +// * Submit an UpdateRule request to specify +// the predicates that you want to include in the Rule. // -// * -// Submit an UpdateRule request to specify the predicates that you want to include -// in the Rule. +// * Create and update a +// WebACL that contains the Rule. For more information, see CreateWebACL. // -// * Create and update a WebACL that contains the Rule. For more -// information, see CreateWebACL. -// -// For more information about how to use the AWS -// WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For more +// information about how to use the AWS WAF API to allow or block HTTP requests, +// see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRule(ctx context.Context, params *CreateRuleInput, optFns ...func(*Options)) (*CreateRuleOutput, error) { if params == nil { diff --git a/service/waf/api_op_CreateRuleGroup.go b/service/waf/api_op_CreateRuleGroup.go index 18f13ee965b..c5638841492 100644 --- a/service/waf/api_op_CreateRuleGroup.go +++ b/service/waf/api_op_CreateRuleGroup.go @@ -21,13 +21,13 @@ import ( // rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the // rule group. Rule groups are subject to the following limits: // -// * Three rule +// * Three rule // groups per account. You can request an increase to this limit by contacting // customer support. // -// * One rule group per web ACL. +// * One rule group per web ACL. // -// * Ten rules per rule +// * Ten rules per rule // group. // // For more information about how to use the AWS WAF API to allow or block diff --git a/service/waf/api_op_CreateSizeConstraintSet.go b/service/waf/api_op_CreateSizeConstraintSet.go index 93ca20143f1..607cc19b0fc 100644 --- a/service/waf/api_op_CreateSizeConstraintSet.go +++ b/service/waf/api_op_CreateSizeConstraintSet.go @@ -25,23 +25,23 @@ import ( // WAF to reject those requests. To create and configure a SizeConstraintSet, // perform the following steps: // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateSizeConstraintSet +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a CreateSizeConstraintSet // request. // -// * Submit a CreateSizeConstraintSet request. +// * Submit a CreateSizeConstraintSet request. // -// * Use -// GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSizeConstraintSet request. +// * Use GetChangeToken to +// get the change token that you provide in the ChangeToken parameter of an +// UpdateSizeConstraintSet request. // -// * Submit an -// UpdateSizeConstraintSet request to specify the part of the request that you want -// AWS WAF to inspect (for example, the header or the URI) and the value that you -// want AWS WAF to watch for. +// * Submit an UpdateSizeConstraintSet request to +// specify the part of the request that you want AWS WAF to inspect (for example, +// the header or the URI) and the value that you want AWS WAF to watch for. // -// For more information about how to use the AWS WAF -// API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For +// more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateSizeConstraintSet(ctx context.Context, params *CreateSizeConstraintSetInput, optFns ...func(*Options)) (*CreateSizeConstraintSetOutput, error) { if params == nil { diff --git a/service/waf/api_op_CreateSqlInjectionMatchSet.go b/service/waf/api_op_CreateSqlInjectionMatchSet.go index e5fc5067e8b..c7c20b3a988 100644 --- a/service/waf/api_op_CreateSqlInjectionMatchSet.go +++ b/service/waf/api_op_CreateSqlInjectionMatchSet.go @@ -23,17 +23,17 @@ import ( // malicious strings. To create and configure a SqlInjectionMatchSet, perform the // following steps: // -// * Use GetChangeToken to get the change token that you -// provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request. +// * Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of a CreateSqlInjectionMatchSet request. // +// * Submit +// a CreateSqlInjectionMatchSet request. // -// * Submit a CreateSqlInjectionMatchSet request. -// -// * Use GetChangeToken to get -// the change token that you provide in the ChangeToken parameter of an +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an // UpdateSqlInjectionMatchSet request. // -// * Submit an UpdateSqlInjectionMatchSet +// * Submit an UpdateSqlInjectionMatchSet // request to specify the parts of web requests in which you want to allow, block, // or count malicious SQL code. // diff --git a/service/waf/api_op_CreateWebACL.go b/service/waf/api_op_CreateWebACL.go index e07fbe8a8d1..c51e9c28044 100644 --- a/service/waf/api_op_CreateWebACL.go +++ b/service/waf/api_op_CreateWebACL.go @@ -24,26 +24,26 @@ import ( // any of the Rules in a WebACL, AWS WAF responds to the request with the default // action. To create and configure a WebACL, perform the following steps: // -// * -// Create and update the ByteMatchSet objects and other predicates that you want to +// * Create +// and update the ByteMatchSet objects and other predicates that you want to // include in Rules. For more information, see CreateByteMatchSet, // UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and // UpdateSqlInjectionMatchSet. // -// * Create and update the Rules that you want to +// * Create and update the Rules that you want to // include in the WebACL. For more information, see CreateRule and UpdateRule. // -// -// * Use GetChangeToken to get the change token that you provide in the ChangeToken +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a CreateWebACL request. // -// * Submit a CreateWebACL request. -// +// * Submit a CreateWebACL request. // -// * Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of an UpdateWebACL request. // -// * Submit an UpdateWebACL request to +// * Submit an UpdateWebACL request to // specify the Rules that you want to include in the WebACL, to specify the default // action, and to associate the WebACL with a CloudFront distribution. // diff --git a/service/waf/api_op_CreateWebACLMigrationStack.go b/service/waf/api_op_CreateWebACLMigrationStack.go index 609d1bc7f97..87d08148483 100644 --- a/service/waf/api_op_CreateWebACLMigrationStack.go +++ b/service/waf/api_op_CreateWebACLMigrationStack.go @@ -49,17 +49,17 @@ type CreateWebACLMigrationStackInput struct { // The name of the Amazon S3 bucket to store the CloudFormation template in. The S3 // bucket must be configured as follows for the migration: // - // * The bucket name - // must start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl. + // * The bucket name must + // start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl. // + // * The + // bucket must be in the Region where you are deploying the template. For example, + // for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and + // you must deploy the template stack to us-west-2. // - // * The bucket must be in the Region where you are deploying the template. For - // example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in - // us-west-2 and you must deploy the template stack to us-west-2. - // - // * The bucket - // policies must permit the migration process to write data. For listings of the - // bucket policies, see the Examples section. + // * The bucket policies must + // permit the migration process to write data. For listings of the bucket policies, + // see the Examples section. // // This member is required. S3BucketName *string diff --git a/service/waf/api_op_CreateXssMatchSet.go b/service/waf/api_op_CreateXssMatchSet.go index 7bc5e96b226..11bc041cda4 100644 --- a/service/waf/api_op_CreateXssMatchSet.go +++ b/service/waf/api_op_CreateXssMatchSet.go @@ -23,17 +23,17 @@ import ( // malicious strings. To create and configure an XssMatchSet, perform the following // steps: // -// * Use GetChangeToken to get the change token that you provide in the +// * Use GetChangeToken to get the change token that you provide in the // ChangeToken parameter of a CreateXssMatchSet request. // -// * Submit a +// * Submit a // CreateXssMatchSet request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateXssMatchSet request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of an UpdateXssMatchSet request. // -// -// * Submit an UpdateXssMatchSet request to specify the parts of web requests in +// * +// Submit an UpdateXssMatchSet request to specify the parts of web requests in // which you want to allow, block, or count cross-site scripting attacks. // // For more diff --git a/service/waf/api_op_DeleteByteMatchSet.go b/service/waf/api_op_DeleteByteMatchSet.go index 3ab0e854e45..53edabf4a09 100644 --- a/service/waf/api_op_DeleteByteMatchSet.go +++ b/service/waf/api_op_DeleteByteMatchSet.go @@ -22,14 +22,14 @@ import ( // use UpdateRule. To permanently delete a ByteMatchSet, perform the following // steps: // -// * Update the ByteMatchSet to remove filters, if any. For more +// * Update the ByteMatchSet to remove filters, if any. For more // information, see UpdateByteMatchSet. // -// * Use GetChangeToken to get the change +// * Use GetChangeToken to get the change // token that you provide in the ChangeToken parameter of a DeleteByteMatchSet // request. // -// * Submit a DeleteByteMatchSet request. +// * Submit a DeleteByteMatchSet request. func (c *Client) DeleteByteMatchSet(ctx context.Context, params *DeleteByteMatchSetInput, optFns ...func(*Options)) (*DeleteByteMatchSetOutput, error) { if params == nil { params = &DeleteByteMatchSetInput{} diff --git a/service/waf/api_op_DeleteGeoMatchSet.go b/service/waf/api_op_DeleteGeoMatchSet.go index dc55a2adf83..ceb29845e7b 100644 --- a/service/waf/api_op_DeleteGeoMatchSet.go +++ b/service/waf/api_op_DeleteGeoMatchSet.go @@ -21,15 +21,15 @@ import ( // want to remove a GeoMatchSet from a Rule, use UpdateRule. To permanently delete // a GeoMatchSet from AWS WAF, perform the following steps: // -// * Update the +// * Update the // GeoMatchSet to remove any countries. For more information, see // UpdateGeoMatchSet. // -// * Use GetChangeToken to get the change token that you +// * Use GetChangeToken to get the change token that you // provide in the ChangeToken parameter of a DeleteGeoMatchSet request. // -// * -// Submit a DeleteGeoMatchSet request. +// * Submit a +// DeleteGeoMatchSet request. func (c *Client) DeleteGeoMatchSet(ctx context.Context, params *DeleteGeoMatchSetInput, optFns ...func(*Options)) (*DeleteGeoMatchSetOutput, error) { if params == nil { params = &DeleteGeoMatchSetInput{} diff --git a/service/waf/api_op_DeleteIPSet.go b/service/waf/api_op_DeleteIPSet.go index dfdf774a31f..fc941c20936 100644 --- a/service/waf/api_op_DeleteIPSet.go +++ b/service/waf/api_op_DeleteIPSet.go @@ -21,14 +21,14 @@ import ( // want to remove an IPSet from a Rule, use UpdateRule. To permanently delete an // IPSet from AWS WAF, perform the following steps: // -// * Update the IPSet to -// remove IP address ranges, if any. For more information, see UpdateIPSet. +// * Update the IPSet to remove +// IP address ranges, if any. For more information, see UpdateIPSet. // -// * -// Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a DeleteIPSet request. // -// * Submit a DeleteIPSet request. +// * Submit a DeleteIPSet request. func (c *Client) DeleteIPSet(ctx context.Context, params *DeleteIPSetInput, optFns ...func(*Options)) (*DeleteIPSetOutput, error) { if params == nil { params = &DeleteIPSetInput{} diff --git a/service/waf/api_op_DeleteRateBasedRule.go b/service/waf/api_op_DeleteRateBasedRule.go index 0e049922fa6..61058a203ad 100644 --- a/service/waf/api_op_DeleteRateBasedRule.go +++ b/service/waf/api_op_DeleteRateBasedRule.go @@ -22,14 +22,14 @@ import ( // UpdateWebACL. To permanently delete a RateBasedRule from AWS WAF, perform the // following steps: // -// * Update the RateBasedRule to remove predicates, if any. -// For more information, see UpdateRateBasedRule. +// * Update the RateBasedRule to remove predicates, if any. For +// more information, see UpdateRateBasedRule. // -// * Use GetChangeToken to get -// the change token that you provide in the ChangeToken parameter of a +// * Use GetChangeToken to get the +// change token that you provide in the ChangeToken parameter of a // DeleteRateBasedRule request. // -// * Submit a DeleteRateBasedRule request. +// * Submit a DeleteRateBasedRule request. func (c *Client) DeleteRateBasedRule(ctx context.Context, params *DeleteRateBasedRuleInput, optFns ...func(*Options)) (*DeleteRateBasedRuleOutput, error) { if params == nil { params = &DeleteRateBasedRuleInput{} diff --git a/service/waf/api_op_DeleteRegexMatchSet.go b/service/waf/api_op_DeleteRegexMatchSet.go index 5a2a1f79d1b..9bcbb6c5c26 100644 --- a/service/waf/api_op_DeleteRegexMatchSet.go +++ b/service/waf/api_op_DeleteRegexMatchSet.go @@ -22,14 +22,14 @@ import ( // RegexMatchSet from a Rule, use UpdateRule. To permanently delete a // RegexMatchSet, perform the following steps: // -// * Update the RegexMatchSet to +// * Update the RegexMatchSet to // remove filters, if any. For more information, see UpdateRegexMatchSet. // -// * -// Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a DeleteRegexMatchSet request. // -// * Submit a DeleteRegexMatchSet +// * Submit a DeleteRegexMatchSet // request. func (c *Client) DeleteRegexMatchSet(ctx context.Context, params *DeleteRegexMatchSetInput, optFns ...func(*Options)) (*DeleteRegexMatchSetOutput, error) { if params == nil { diff --git a/service/waf/api_op_DeleteRule.go b/service/waf/api_op_DeleteRule.go index 9861a47e36a..f6139119198 100644 --- a/service/waf/api_op_DeleteRule.go +++ b/service/waf/api_op_DeleteRule.go @@ -22,14 +22,14 @@ import ( // UpdateWebACL. To permanently delete a Rule from AWS WAF, perform the following // steps: // -// * Update the Rule to remove predicates, if any. For more -// information, see UpdateRule. +// * Update the Rule to remove predicates, if any. For more information, +// see UpdateRule. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a DeleteRule request. +// * Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of a DeleteRule request. // -// * -// Submit a DeleteRule request. +// * Submit a DeleteRule +// request. func (c *Client) DeleteRule(ctx context.Context, params *DeleteRuleInput, optFns ...func(*Options)) (*DeleteRuleOutput, error) { if params == nil { params = &DeleteRuleInput{} diff --git a/service/waf/api_op_DeleteRuleGroup.go b/service/waf/api_op_DeleteRuleGroup.go index 2e402ac30bc..26f01033bd3 100644 --- a/service/waf/api_op_DeleteRuleGroup.go +++ b/service/waf/api_op_DeleteRuleGroup.go @@ -21,14 +21,14 @@ import ( // just want to remove a RuleGroup from a WebACL, use UpdateWebACL. To permanently // delete a RuleGroup from AWS WAF, perform the following steps: // -// * Update the +// * Update the // RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup. // -// -// * Use GetChangeToken to get the change token that you provide in the ChangeToken +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a DeleteRuleGroup request. // -// * Submit a DeleteRuleGroup request. +// * Submit a DeleteRuleGroup request. func (c *Client) DeleteRuleGroup(ctx context.Context, params *DeleteRuleGroupInput, optFns ...func(*Options)) (*DeleteRuleGroupOutput, error) { if params == nil { params = &DeleteRuleGroupInput{} diff --git a/service/waf/api_op_DeleteSizeConstraintSet.go b/service/waf/api_op_DeleteSizeConstraintSet.go index 09b35d5d9e6..c0b002bef1a 100644 --- a/service/waf/api_op_DeleteSizeConstraintSet.go +++ b/service/waf/api_op_DeleteSizeConstraintSet.go @@ -22,15 +22,15 @@ import ( // SizeConstraintSet from a Rule, use UpdateRule. To permanently delete a // SizeConstraintSet, perform the following steps: // -// * Update the -// SizeConstraintSet to remove filters, if any. For more information, see -// UpdateSizeConstraintSet. +// * Update the SizeConstraintSet +// to remove filters, if any. For more information, see UpdateSizeConstraintSet. // -// * Use GetChangeToken to get the change token that -// you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request. +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSizeConstraintSet request. // -// -// * Submit a DeleteSizeConstraintSet request. +// * Submit a +// DeleteSizeConstraintSet request. func (c *Client) DeleteSizeConstraintSet(ctx context.Context, params *DeleteSizeConstraintSetInput, optFns ...func(*Options)) (*DeleteSizeConstraintSetOutput, error) { if params == nil { params = &DeleteSizeConstraintSetInput{} diff --git a/service/waf/api_op_DeleteSqlInjectionMatchSet.go b/service/waf/api_op_DeleteSqlInjectionMatchSet.go index 7993a50e7cb..10c04d0bc9c 100644 --- a/service/waf/api_op_DeleteSqlInjectionMatchSet.go +++ b/service/waf/api_op_DeleteSqlInjectionMatchSet.go @@ -22,15 +22,15 @@ import ( // SqlInjectionMatchSet from a Rule, use UpdateRule. To permanently delete a // SqlInjectionMatchSet from AWS WAF, perform the following steps: // -// * Update -// the SqlInjectionMatchSet to remove filters, if any. For more information, see +// * Update the +// SqlInjectionMatchSet to remove filters, if any. For more information, see // UpdateSqlInjectionMatchSet. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet // request. // -// * Submit a DeleteSqlInjectionMatchSet request. +// * Submit a DeleteSqlInjectionMatchSet request. func (c *Client) DeleteSqlInjectionMatchSet(ctx context.Context, params *DeleteSqlInjectionMatchSetInput, optFns ...func(*Options)) (*DeleteSqlInjectionMatchSetOutput, error) { if params == nil { params = &DeleteSqlInjectionMatchSetInput{} diff --git a/service/waf/api_op_DeleteWebACL.go b/service/waf/api_op_DeleteWebACL.go index c9dad38f07a..a9ba09a18ae 100644 --- a/service/waf/api_op_DeleteWebACL.go +++ b/service/waf/api_op_DeleteWebACL.go @@ -19,15 +19,14 @@ import ( // global use. Permanently deletes a WebACL. You can't delete a WebACL if it still // contains any Rules. To delete a WebACL, perform the following steps: // -// * -// Update the WebACL to remove Rules, if any. For more information, see -// UpdateWebACL. +// * Update +// the WebACL to remove Rules, if any. For more information, see UpdateWebACL. // -// * Use GetChangeToken to get the change token that you provide -// in the ChangeToken parameter of a DeleteWebACL request. +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. // -// * Submit a -// DeleteWebACL request. +// * Submit a DeleteWebACL request. func (c *Client) DeleteWebACL(ctx context.Context, params *DeleteWebACLInput, optFns ...func(*Options)) (*DeleteWebACLOutput, error) { if params == nil { params = &DeleteWebACLInput{} diff --git a/service/waf/api_op_DeleteXssMatchSet.go b/service/waf/api_op_DeleteXssMatchSet.go index c8d742ac177..f4a609e6bbd 100644 --- a/service/waf/api_op_DeleteXssMatchSet.go +++ b/service/waf/api_op_DeleteXssMatchSet.go @@ -22,14 +22,14 @@ import ( // To permanently delete an XssMatchSet from AWS WAF, perform the following // steps: // -// * Update the XssMatchSet to remove filters, if any. For more +// * Update the XssMatchSet to remove filters, if any. For more // information, see UpdateXssMatchSet. // -// * Use GetChangeToken to get the change +// * Use GetChangeToken to get the change // token that you provide in the ChangeToken parameter of a DeleteXssMatchSet // request. // -// * Submit a DeleteXssMatchSet request. +// * Submit a DeleteXssMatchSet request. func (c *Client) DeleteXssMatchSet(ctx context.Context, params *DeleteXssMatchSetInput, optFns ...func(*Options)) (*DeleteXssMatchSetOutput, error) { if params == nil { params = &DeleteXssMatchSetInput{} diff --git a/service/waf/api_op_GetByteMatchSet.go b/service/waf/api_op_GetByteMatchSet.go index bbc0ddba238..95526a05e39 100644 --- a/service/waf/api_op_GetByteMatchSet.go +++ b/service/waf/api_op_GetByteMatchSet.go @@ -47,14 +47,14 @@ type GetByteMatchSetOutput struct { // Information about the ByteMatchSet that you specified in the GetByteMatchSet // request. For more information, see the following topics: // - // * ByteMatchSet: + // * ByteMatchSet: // Contains ByteMatchSetId, ByteMatchTuples, and Name // - // * ByteMatchTuples: - // Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains + // * ByteMatchTuples: Contains + // an array of ByteMatchTuple objects. Each ByteMatchTuple object contains // FieldToMatch, PositionalConstraint, TargetString, and TextTransformation // - // * + // * // FieldToMatch: Contains Data and Type ByteMatchSet *types.ByteMatchSet diff --git a/service/waf/api_op_GetChangeTokenStatus.go b/service/waf/api_op_GetChangeTokenStatus.go index 3a3fad316db..76a141ef7a0 100644 --- a/service/waf/api_op_GetChangeTokenStatus.go +++ b/service/waf/api_op_GetChangeTokenStatus.go @@ -20,15 +20,15 @@ import ( // global use. Returns the status of a ChangeToken that you got by calling // GetChangeToken. ChangeTokenStatus is one of the following values: // -// * +// * // PROVISIONED: You requested the change token by calling GetChangeToken, but you // haven't used it yet in a call to create, update, or delete an AWS WAF object. // +// * +// PENDING: AWS WAF is propagating the create, update, or delete request to all AWS +// WAF servers. // -// * PENDING: AWS WAF is propagating the create, update, or delete request to all -// AWS WAF servers. -// -// * INSYNC: Propagation is complete. +// * INSYNC: Propagation is complete. func (c *Client) GetChangeTokenStatus(ctx context.Context, params *GetChangeTokenStatusInput, optFns ...func(*Options)) (*GetChangeTokenStatusOutput, error) { if params == nil { params = &GetChangeTokenStatusInput{} diff --git a/service/waf/api_op_GetIPSet.go b/service/waf/api_op_GetIPSet.go index ffbbf9089bf..b1cfabca0e2 100644 --- a/service/waf/api_op_GetIPSet.go +++ b/service/waf/api_op_GetIPSet.go @@ -47,10 +47,10 @@ type GetIPSetOutput struct { // Information about the IPSet that you specified in the GetIPSet request. For more // information, see the following topics: // - // * IPSet: Contains IPSetDescriptors, + // * IPSet: Contains IPSetDescriptors, // IPSetId, and Name // - // * IPSetDescriptors: Contains an array of IPSetDescriptor + // * IPSetDescriptors: Contains an array of IPSetDescriptor // objects. Each IPSetDescriptor object contains Type and Value IPSet *types.IPSet diff --git a/service/waf/api_op_GetRule.go b/service/waf/api_op_GetRule.go index d3d0e3f10cc..0794744f3c6 100644 --- a/service/waf/api_op_GetRule.go +++ b/service/waf/api_op_GetRule.go @@ -48,11 +48,11 @@ type GetRuleOutput struct { // Information about the Rule that you specified in the GetRule request. For more // information, see the following topics: // - // * Rule: Contains MetricName, Name, - // an array of Predicate objects, and RuleId + // * Rule: Contains MetricName, Name, an + // array of Predicate objects, and RuleId // - // * Predicate: Each Predicate - // object contains DataId, Negated, and Type + // * Predicate: Each Predicate object + // contains DataId, Negated, and Type Rule *types.Rule // Metadata pertaining to the operation's result. diff --git a/service/waf/api_op_GetSampledRequests.go b/service/waf/api_op_GetSampledRequests.go index c559f7306a6..12acbc8501e 100644 --- a/service/waf/api_op_GetSampledRequests.go +++ b/service/waf/api_op_GetSampledRequests.go @@ -54,13 +54,12 @@ type GetSampledRequestsInput struct { // RuleId is one of three values: // - // * The RuleId of the Rule or the RuleGroupId - // of the RuleGroup for which you want GetSampledRequests to return a sample of + // * The RuleId of the Rule or the RuleGroupId of + // the RuleGroup for which you want GetSampledRequests to return a sample of // requests. // - // * Default_Action, which causes GetSampledRequests to return a - // sample of the requests that didn't match any of the rules in the specified - // WebACL. + // * Default_Action, which causes GetSampledRequests to return a sample + // of the requests that didn't match any of the rules in the specified WebACL. // // This member is required. RuleId *string diff --git a/service/waf/api_op_GetSizeConstraintSet.go b/service/waf/api_op_GetSizeConstraintSet.go index f651a4c1113..a419c8b5891 100644 --- a/service/waf/api_op_GetSizeConstraintSet.go +++ b/service/waf/api_op_GetSizeConstraintSet.go @@ -48,15 +48,15 @@ type GetSizeConstraintSetOutput struct { // Information about the SizeConstraintSet that you specified in the // GetSizeConstraintSet request. For more information, see the following topics: // + // * + // SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and Name // - // * SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and Name - // - // - // * SizeConstraints: Contains an array of SizeConstraint objects. Each + // * + // SizeConstraints: Contains an array of SizeConstraint objects. Each // SizeConstraint object contains FieldToMatch, TextTransformation, // ComparisonOperator, and Size // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type SizeConstraintSet *types.SizeConstraintSet // Metadata pertaining to the operation's result. diff --git a/service/waf/api_op_GetSqlInjectionMatchSet.go b/service/waf/api_op_GetSqlInjectionMatchSet.go index e7f1b060ad4..df160affbd9 100644 --- a/service/waf/api_op_GetSqlInjectionMatchSet.go +++ b/service/waf/api_op_GetSqlInjectionMatchSet.go @@ -52,14 +52,14 @@ type GetSqlInjectionMatchSetOutput struct { // GetSqlInjectionMatchSet request. For more information, see the following // topics: // - // * SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and - // an array of SqlInjectionMatchTuple objects + // * SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and an + // array of SqlInjectionMatchTuple objects // - // * SqlInjectionMatchTuple: Each + // * SqlInjectionMatchTuple: Each // SqlInjectionMatchTuple object contains FieldToMatch and TextTransformation // - // - // * FieldToMatch: Contains Data and Type + // * + // FieldToMatch: Contains Data and Type SqlInjectionMatchSet *types.SqlInjectionMatchSet // Metadata pertaining to the operation's result. diff --git a/service/waf/api_op_GetWebACL.go b/service/waf/api_op_GetWebACL.go index 4da52f2199e..e7c83e33e11 100644 --- a/service/waf/api_op_GetWebACL.go +++ b/service/waf/api_op_GetWebACL.go @@ -47,15 +47,14 @@ type GetWebACLOutput struct { // Information about the WebACL that you specified in the GetWebACL request. For // more information, see the following topics: // - // * WebACL: Contains - // DefaultAction, MetricName, Name, an array of Rule objects, and WebACLId + // * WebACL: Contains DefaultAction, + // MetricName, Name, an array of Rule objects, and WebACLId // - // * - // DefaultAction (Data type is WafAction): Contains Type - // - // * Rules: Contains an - // array of ActivatedRule objects, which contain Action, Priority, and RuleId + // * DefaultAction (Data + // type is WafAction): Contains Type // + // * Rules: Contains an array of ActivatedRule + // objects, which contain Action, Priority, and RuleId // // * Action: Contains Type WebACL *types.WebACL diff --git a/service/waf/api_op_GetXssMatchSet.go b/service/waf/api_op_GetXssMatchSet.go index 23e55ba1b44..ca50f444461 100644 --- a/service/waf/api_op_GetXssMatchSet.go +++ b/service/waf/api_op_GetXssMatchSet.go @@ -49,14 +49,14 @@ type GetXssMatchSetOutput struct { // Information about the XssMatchSet that you specified in the GetXssMatchSet // request. For more information, see the following topics: // - // * XssMatchSet: + // * XssMatchSet: // Contains Name, XssMatchSetId, and an array of XssMatchTuple objects // - // * + // * // XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and // TextTransformation // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type XssMatchSet *types.XssMatchSet // Metadata pertaining to the operation's result. diff --git a/service/waf/api_op_PutLoggingConfiguration.go b/service/waf/api_op_PutLoggingConfiguration.go index e41bf4c1fbf..b890217105f 100644 --- a/service/waf/api_op_PutLoggingConfiguration.go +++ b/service/waf/api_op_PutLoggingConfiguration.go @@ -21,13 +21,13 @@ import ( // access information about all traffic that AWS WAF inspects using the following // steps: // -// * Create an Amazon Kinesis Data Firehose. Create the data firehose -// with a PUT source and in the region that you are operating. However, if you are +// * Create an Amazon Kinesis Data Firehose. Create the data firehose with +// a PUT source and in the region that you are operating. However, if you are // capturing logs for Amazon CloudFront, always create the firehose in US East (N. // Virginia). Do not create the data firehose using a Kinesis stream as your // source. // -// * Associate that firehose to your web ACL using a +// * Associate that firehose to your web ACL using a // PutLoggingConfiguration request. // // When you successfully enable logging using a diff --git a/service/waf/api_op_PutPermissionPolicy.go b/service/waf/api_op_PutPermissionPolicy.go index 64605744c88..1a91369d2b2 100644 --- a/service/waf/api_op_PutPermissionPolicy.go +++ b/service/waf/api_op_PutPermissionPolicy.go @@ -20,33 +20,32 @@ import ( // use for this action is to share a RuleGroup across accounts. The // PutPermissionPolicy is subject to the following restrictions: // -// * You can -// attach only one policy with each PutPermissionPolicy request. +// * You can attach +// only one policy with each PutPermissionPolicy request. // -// * The policy -// must include an Effect, Action and Principal. +// * The policy must +// include an Effect, Action and Principal. // -// * Effect must specify -// Allow. +// * Effect must specify Allow. // -// * The Action in the policy must be waf:UpdateWebACL, -// waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any -// extra or wildcard actions in the policy will be rejected. +// * The +// Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, +// waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions +// in the policy will be rejected. // -// * The policy -// cannot include a Resource parameter. +// * The policy cannot include a Resource +// parameter. // -// * The ARN in the request must be a -// valid WAF RuleGroup ARN and the RuleGroup must exist in the same region. +// * The ARN in the request must be a valid WAF RuleGroup ARN and the +// RuleGroup must exist in the same region. // -// * -// The user making the request must be the owner of the RuleGroup. +// * The user making the request must be +// the owner of the RuleGroup. // -// * Your -// policy must be composed using IAM Policy version 2012-10-17. +// * Your policy must be composed using IAM Policy +// version 2012-10-17. // -// For more -// information, see IAM Policies +// For more information, see IAM Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). An // example of a valid policy parameter is shown in the Examples section below. func (c *Client) PutPermissionPolicy(ctx context.Context, params *PutPermissionPolicyInput, optFns ...func(*Options)) (*PutPermissionPolicyOutput, error) { diff --git a/service/waf/api_op_UpdateByteMatchSet.go b/service/waf/api_op_UpdateByteMatchSet.go index 55195c3f667..ad5e6bd26f4 100644 --- a/service/waf/api_op_UpdateByteMatchSet.go +++ b/service/waf/api_op_UpdateByteMatchSet.go @@ -21,45 +21,44 @@ import ( // ByteMatchSet. For each ByteMatchTuple object, you specify the following // values: // -// * Whether to insert or delete the object from the array. If you -// want to change a ByteMatchSetUpdate object, you delete the existing object and -// add a new one. +// * Whether to insert or delete the object from the array. If you want to +// change a ByteMatchSetUpdate object, you delete the existing object and add a new +// one. // -// * The part of a web request that you want AWS WAF to -// inspect, such as a query string or the value of the User-Agent header. +// * The part of a web request that you want AWS WAF to inspect, such as a +// query string or the value of the User-Agent header. // -// * -// The bytes (typically a string that corresponds with ASCII characters) that you -// want AWS WAF to look for. For more information, including how you specify the -// values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the -// ByteMatchTuple data type. +// * The bytes (typically a +// string that corresponds with ASCII characters) that you want AWS WAF to look +// for. For more information, including how you specify the values for the AWS WAF +// API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data +// type. // -// * Where to look, such as at the beginning or the -// end of a query string. -// -// * Whether to perform any conversions on the request, -// such as converting it to lowercase, before inspecting it for the specified +// * Where to look, such as at the beginning or the end of a query // string. // -// For example, you can add a ByteMatchSetUpdate object that matches web -// requests in which User-Agent headers contain the string BadBot. You can then -// configure AWS WAF to block those requests. To create and configure a -// ByteMatchSet, perform the following steps: +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. +// +// For example, +// you can add a ByteMatchSetUpdate object that matches web requests in which +// User-Agent headers contain the string BadBot. You can then configure AWS WAF to +// block those requests. To create and configure a ByteMatchSet, perform the +// following steps: // -// * Create a ByteMatchSet. For -// more information, see CreateByteMatchSet. +// * Create a ByteMatchSet. For more information, see +// CreateByteMatchSet. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an -// UpdateByteMatchSet request. +// * Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of an UpdateByteMatchSet request. // -// * Submit an UpdateByteMatchSet request to -// specify the part of the request that you want AWS WAF to inspect (for example, -// the header or the URI) and the value that you want AWS WAF to watch for. +// * Submit +// an UpdateByteMatchSet request to specify the part of the request that you want +// AWS WAF to inspect (for example, the header or the URI) and the value that you +// want AWS WAF to watch for. // -// For -// more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide +// For more information about how to use the AWS WAF +// API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateByteMatchSet(ctx context.Context, params *UpdateByteMatchSetInput, optFns ...func(*Options)) (*UpdateByteMatchSetOutput, error) { if params == nil { @@ -92,14 +91,14 @@ type UpdateByteMatchSetInput struct { // An array of ByteMatchSetUpdate objects that you want to insert into or delete // from a ByteMatchSet. For more information, see the applicable data types: // - // * + // * // ByteMatchSetUpdate: Contains Action and ByteMatchTuple // - // * ByteMatchTuple: + // * ByteMatchTuple: // Contains FieldToMatch, PositionalConstraint, TargetString, and // TextTransformation // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.ByteMatchSetUpdate diff --git a/service/waf/api_op_UpdateGeoMatchSet.go b/service/waf/api_op_UpdateGeoMatchSet.go index 6eb17147f82..f2022e53e22 100644 --- a/service/waf/api_op_UpdateGeoMatchSet.go +++ b/service/waf/api_op_UpdateGeoMatchSet.go @@ -20,27 +20,27 @@ import ( // global use. Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For // each GeoMatchConstraint object, you specify the following values: // -// * Whether -// to insert or delete the object from the array. If you want to change an +// * Whether to +// insert or delete the object from the array. If you want to change an // GeoMatchConstraint object, you delete the existing object and add a new one. // +// * +// The Type. The only valid value for Type is Country. // -// * The Type. The only valid value for Type is Country. -// -// * The Value, which is -// a two character code for the country to add to the GeoMatchConstraint object. -// Valid codes are listed in GeoMatchConstraint$Value. +// * The Value, which is a two +// character code for the country to add to the GeoMatchConstraint object. Valid +// codes are listed in GeoMatchConstraint$Value. // // To create and configure an // GeoMatchSet, perform the following steps: // -// * Submit a CreateGeoMatchSet +// * Submit a CreateGeoMatchSet // request. // -// * Use GetChangeToken to get the change token that you provide in -// the ChangeToken parameter of an UpdateGeoMatchSet request. +// * Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateGeoMatchSet request. // -// * Submit an +// * Submit an // UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch // for. // @@ -80,12 +80,12 @@ type UpdateGeoMatchSetInput struct { // An array of GeoMatchSetUpdate objects that you want to insert into or delete // from an GeoMatchSet. For more information, see the applicable data types: // - // * + // * // GeoMatchSetUpdate: Contains Action and GeoMatchConstraint // - // * - // GeoMatchConstraint: Contains Type and Value You can have only one Type and Value - // per GeoMatchConstraint. To add multiple countries, include multiple + // * GeoMatchConstraint: + // Contains Type and Value You can have only one Type and Value per + // GeoMatchConstraint. To add multiple countries, include multiple // GeoMatchSetUpdate objects in your request. // // This member is required. diff --git a/service/waf/api_op_UpdateIPSet.go b/service/waf/api_op_UpdateIPSet.go index fa5b788a5f6..fb6aace77b9 100644 --- a/service/waf/api_op_UpdateIPSet.go +++ b/service/waf/api_op_UpdateIPSet.go @@ -20,50 +20,50 @@ import ( // global use. Inserts or deletes IPSetDescriptor objects in an IPSet. For each // IPSetDescriptor object, you specify the following values: // -// * Whether to -// insert or delete the object from the array. If you want to change an -// IPSetDescriptor object, you delete the existing object and add a new one. +// * Whether to insert +// or delete the object from the array. If you want to change an IPSetDescriptor +// object, you delete the existing object and add a new one. // -// * -// The IP address version, IPv4 or IPv6. +// * The IP address +// version, IPv4 or IPv6. // -// * The IP address in CIDR notation, -// for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to -// 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44). +// * The IP address in CIDR notation, for example, +// 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or +// 192.0.2.44/32 (for the individual IP address 192.0.2.44). // -// AWS -// WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS -// WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more -// information about CIDR notation, see the Wikipedia entry Classless Inter-Domain -// Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). IPv6 -// addresses can be represented using any of the following formats: +// AWS WAF supports IPv4 +// address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 +// address ranges: /24, /32, /48, /56, /64, and /128. For more information about +// CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing +// (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). IPv6 addresses +// can be represented using any of the following formats: // -// * +// * // 1111:0000:0000:0000:0000:0000:0000:0111/128 // -// * 1111:0:0:0:0:0:0:0111/128 +// * 1111:0:0:0:0:0:0:0111/128 // +// * +// 1111::0111/128 // -// * 1111::0111/128 +// * 1111::111/128 // -// * 1111::111/128 -// -// You use an IPSet to specify which web -// requests you want to allow or block based on the IP addresses that the requests +// You use an IPSet to specify which web requests +// you want to allow or block based on the IP addresses that the requests // originated from. For example, if you're receiving a lot of requests from one or // a small number of IP addresses and you want to block the requests, you can // create an IPSet that specifies those IP addresses, and then configure AWS WAF to // block the requests. To create and configure an IPSet, perform the following // steps: // -// * Submit a CreateIPSet request. +// * Submit a CreateIPSet request. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an UpdateIPSet +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateIPSet // request. // -// * Submit an UpdateIPSet request to specify the IP addresses that -// you want AWS WAF to watch for. +// * Submit an UpdateIPSet request to specify the IP addresses that you +// want AWS WAF to watch for. // // When you update an IPSet, you specify the IP // addresses that you want to add and/or the IP addresses that you want to delete. @@ -103,10 +103,10 @@ type UpdateIPSetInput struct { // An array of IPSetUpdate objects that you want to insert into or delete from an // IPSet. For more information, see the applicable data types: // - // * IPSetUpdate: + // * IPSetUpdate: // Contains Action and IPSetDescriptor // - // * IPSetDescriptor: Contains Type and + // * IPSetDescriptor: Contains Type and // Value // // You can insert a maximum of 1000 addresses in a single request. diff --git a/service/waf/api_op_UpdateRateBasedRule.go b/service/waf/api_op_UpdateRateBasedRule.go index 97e39e27cf3..324cec2f326 100644 --- a/service/waf/api_op_UpdateRateBasedRule.go +++ b/service/waf/api_op_UpdateRateBasedRule.go @@ -25,29 +25,29 @@ import ( // request must match all the predicates and exceed the RateLimit to be counted or // blocked. For example, suppose you add the following to a RateBasedRule: // -// * -// An IPSet that matches the IP address 192.0.2.44/32 +// * An +// IPSet that matches the IP address 192.0.2.44/32 // -// * A ByteMatchSet that -// matches BadBot in the User-Agent header +// * A ByteMatchSet that matches +// BadBot in the User-Agent header // -// Further, you specify a RateLimit of -// 1,000. You then add the RateBasedRule to a WebACL and specify that you want to -// block requests that satisfy the rule. For a request to be blocked, it must come -// from the IP address 192.0.2.44 and the User-Agent header in the request must -// contain the value BadBot. Further, requests that match these two conditions much -// be received at a rate of more than 1,000 every five minutes. If the rate drops +// Further, you specify a RateLimit of 1,000. You +// then add the RateBasedRule to a WebACL and specify that you want to block +// requests that satisfy the rule. For a request to be blocked, it must come from +// the IP address 192.0.2.44 and the User-Agent header in the request must contain +// the value BadBot. Further, requests that match these two conditions much be +// received at a rate of more than 1,000 every five minutes. If the rate drops // below this limit, AWS WAF no longer blocks the requests. As a second example, // suppose you want to limit requests to a particular page on your site. To do // this, you could add the following to a RateBasedRule: // -// * A ByteMatchSet with +// * A ByteMatchSet with // FieldToMatch of URI // -// * A PositionalConstraint of STARTS_WITH +// * A PositionalConstraint of STARTS_WITH // -// * A -// TargetString of login +// * A TargetString +// of login // // Further, you specify a RateLimit of 1,000. By adding this // RateBasedRule to a WebACL, you could limit requests to your login page without diff --git a/service/waf/api_op_UpdateRegexMatchSet.go b/service/waf/api_op_UpdateRegexMatchSet.go index 88d39ca4c4a..480ca74aead 100644 --- a/service/waf/api_op_UpdateRegexMatchSet.go +++ b/service/waf/api_op_UpdateRegexMatchSet.go @@ -21,41 +21,40 @@ import ( // RegexMatchSet. For each RegexMatchSetUpdate object, you specify the following // values: // -// * Whether to insert or delete the object from the array. If you -// want to change a RegexMatchSetUpdate object, you delete the existing object and -// add a new one. +// * Whether to insert or delete the object from the array. If you want to +// change a RegexMatchSetUpdate object, you delete the existing object and add a +// new one. // -// * The part of a web request that you want AWS WAF to -// inspectupdate, such as a query string or the value of the User-Agent header. +// * The part of a web request that you want AWS WAF to inspectupdate, +// such as a query string or the value of the User-Agent header. // +// * The identifier +// of the pattern (a regular expression) that you want AWS WAF to look for. For +// more information, see RegexPatternSet. // -// * The identifier of the pattern (a regular expression) that you want AWS WAF to -// look for. For more information, see RegexPatternSet. +// * Whether to perform any conversions on +// the request, such as converting it to lowercase, before inspecting it for the +// specified string. // -// * Whether to perform -// any conversions on the request, such as converting it to lowercase, before -// inspecting it for the specified string. +// For example, you can create a RegexPatternSet that matches +// any requests with User-Agent headers that contain the string B[a@]dB[o0]t. You +// can then configure AWS WAF to reject those requests. To create and configure a +// RegexMatchSet, perform the following steps: // -// For example, you can create a -// RegexPatternSet that matches any requests with User-Agent headers that contain -// the string B[a@]dB[o0]t. You can then configure AWS WAF to reject those -// requests. To create and configure a RegexMatchSet, perform the following -// steps: +// * Create a RegexMatchSet. For more +// information, see CreateRegexMatchSet. // -// * Create a RegexMatchSet. For more information, see -// CreateRegexMatchSet. +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet +// request. // -// * Use GetChangeToken to get the change token that you -// provide in the ChangeToken parameter of an UpdateRegexMatchSet request. +// * Submit an UpdateRegexMatchSet request to specify the part of the +// request that you want AWS WAF to inspect (for example, the header or the URI) +// and the identifier of the RegexPatternSet that contain the regular expression +// patters you want AWS WAF to watch for. // -// * -// Submit an UpdateRegexMatchSet request to specify the part of the request that -// you want AWS WAF to inspect (for example, the header or the URI) and the -// identifier of the RegexPatternSet that contain the regular expression patters -// you want AWS WAF to watch for. -// -// For more information about how to use the AWS -// WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For more information about how to use +// the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRegexMatchSet(ctx context.Context, params *UpdateRegexMatchSetInput, optFns ...func(*Options)) (*UpdateRegexMatchSetOutput, error) { if params == nil { diff --git a/service/waf/api_op_UpdateRegexPatternSet.go b/service/waf/api_op_UpdateRegexPatternSet.go index 23d45fefdcc..ab13a4a3cf5 100644 --- a/service/waf/api_op_UpdateRegexPatternSet.go +++ b/service/waf/api_op_UpdateRegexPatternSet.go @@ -20,42 +20,41 @@ import ( // global use. Inserts or deletes RegexPatternString objects in a RegexPatternSet. // For each RegexPatternString object, you specify the following values: // -// * -// Whether to insert or delete the RegexPatternString. +// * Whether +// to insert or delete the RegexPatternString. // -// * The regular -// expression pattern that you want to insert or delete. For more information, see +// * The regular expression pattern +// that you want to insert or delete. For more information, see // RegexPatternSet. // // For example, you can create a RegexPatternString such as // B[a@]dB[o0]t. AWS WAF will match this RegexPatternString to: // -// * BadBot +// * BadBot // +// * +// BadB0t // -// * BadB0t +// * B@dBot // -// * B@dBot +// * B@dB0t // -// * B@dB0t +// To create and configure a RegexPatternSet, perform +// the following steps: // -// To create and configure a RegexPatternSet, -// perform the following steps: +// * Create a RegexPatternSet. For more information, see +// CreateRegexPatternSet. // -// * Create a RegexPatternSet. For more -// information, see CreateRegexPatternSet. +// * Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of an UpdateRegexPatternSet request. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an -// UpdateRegexPatternSet request. +// * +// Submit an UpdateRegexPatternSet request to specify the regular expression +// pattern that you want AWS WAF to watch for. // -// * Submit an UpdateRegexPatternSet request to -// specify the regular expression pattern that you want AWS WAF to watch for. -// -// For -// more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide -// (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to +// use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer +// Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRegexPatternSet(ctx context.Context, params *UpdateRegexPatternSetInput, optFns ...func(*Options)) (*UpdateRegexPatternSetOutput, error) { if params == nil { params = &UpdateRegexPatternSetInput{} diff --git a/service/waf/api_op_UpdateRule.go b/service/waf/api_op_UpdateRule.go index 25f2e80431a..85c3c8f3d87 100644 --- a/service/waf/api_op_UpdateRule.go +++ b/service/waf/api_op_UpdateRule.go @@ -24,37 +24,37 @@ import ( // specifications to be allowed, blocked, or counted. For example, suppose that you // add the following to a Rule: // -// * A ByteMatchSet that matches the value BadBot -// in the User-Agent header +// * A ByteMatchSet that matches the value BadBot in +// the User-Agent header // -// * An IPSet that matches the IP address -// 192.0.2.44 +// * An IPSet that matches the IP address 192.0.2.44 // -// You then add the Rule to a WebACL and specify that you want to block -// requests that satisfy the Rule. For a request to be blocked, the User-Agent -// header in the request must contain the value BadBot and the request must -// originate from the IP address 192.0.2.44. To create and configure a Rule, -// perform the following steps: +// You +// then add the Rule to a WebACL and specify that you want to block requests that +// satisfy the Rule. For a request to be blocked, the User-Agent header in the +// request must contain the value BadBot and the request must originate from the IP +// address 192.0.2.44. To create and configure a Rule, perform the following +// steps: // -// * Create and update the predicates that you -// want to include in the Rule. +// * Create and update the predicates that you want to include in the +// Rule. // -// * Create the Rule. See CreateRule. +// * Create the Rule. See CreateRule. // -// * Use -// GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateRule request. +// * Use GetChangeToken to get the +// change token that you provide in the ChangeToken parameter of an UpdateRule +// request. // -// * Submit an UpdateRule request to add -// predicates to the Rule. +// * Submit an UpdateRule request to add predicates to the Rule. // -// * Create and update a WebACL that contains the -// Rule. See CreateWebACL. +// * +// Create and update a WebACL that contains the Rule. See CreateWebACL. // -// If you want to replace one ByteMatchSet or IPSet with -// another, you delete the existing one and add the new one. For more information -// about how to use the AWS WAF API to allow or block HTTP requests, see the AWS -// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// If you +// want to replace one ByteMatchSet or IPSet with another, you delete the existing +// one and add the new one. For more information about how to use the AWS WAF API +// to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRule(ctx context.Context, params *UpdateRuleInput, optFns ...func(*Options)) (*UpdateRuleOutput, error) { if params == nil { params = &UpdateRuleInput{} @@ -86,13 +86,13 @@ type UpdateRuleInput struct { // An array of RuleUpdate objects that you want to insert into or delete from a // Rule. For more information, see the applicable data types: // - // * RuleUpdate: + // * RuleUpdate: // Contains Action and Predicate // - // * Predicate: Contains DataId, Negated, and + // * Predicate: Contains DataId, Negated, and // Type // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.RuleUpdate diff --git a/service/waf/api_op_UpdateRuleGroup.go b/service/waf/api_op_UpdateRuleGroup.go index bdde43ca6aa..eac480b116f 100644 --- a/service/waf/api_op_UpdateRuleGroup.go +++ b/service/waf/api_op_UpdateRuleGroup.go @@ -22,22 +22,22 @@ import ( // per rule group. To create and configure a RuleGroup, perform the following // steps: // -// * Create and update the Rules that you want to include in the -// RuleGroup. See CreateRule. +// * Create and update the Rules that you want to include in the RuleGroup. +// See CreateRule. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateRuleGroup request. +// * Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of an UpdateRuleGroup request. // +// * Submit an +// UpdateRuleGroup request to add Rules to the RuleGroup. // -// * Submit an UpdateRuleGroup request to add Rules to the RuleGroup. +// * Create and update a +// WebACL that contains the RuleGroup. See CreateWebACL. // -// * Create -// and update a WebACL that contains the RuleGroup. See CreateWebACL. -// -// If you want -// to replace one Rule with another, you delete the existing one and add the new -// one. For more information about how to use the AWS WAF API to allow or block -// HTTP requests, see the AWS WAF Developer Guide +// If you want to replace +// one Rule with another, you delete the existing one and add the new one. For more +// information about how to use the AWS WAF API to allow or block HTTP requests, +// see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRuleGroup(ctx context.Context, params *UpdateRuleGroupInput, optFns ...func(*Options)) (*UpdateRuleGroupOutput, error) { if params == nil { diff --git a/service/waf/api_op_UpdateSizeConstraintSet.go b/service/waf/api_op_UpdateSizeConstraintSet.go index 9cfea81605f..302fb01cb62 100644 --- a/service/waf/api_op_UpdateSizeConstraintSet.go +++ b/service/waf/api_op_UpdateSizeConstraintSet.go @@ -21,48 +21,48 @@ import ( // SizeConstraintSet. For each SizeConstraint object, you specify the following // values: // -// * Whether to insert or delete the object from the array. If you -// want to change a SizeConstraintSetUpdate object, you delete the existing object -// and add a new one. +// * Whether to insert or delete the object from the array. If you want to +// change a SizeConstraintSetUpdate object, you delete the existing object and add +// a new one. // -// * The part of a web request that you want AWS WAF to -// evaluate, such as the length of a query string or the length of the User-Agent -// header. +// * The part of a web request that you want AWS WAF to evaluate, such +// as the length of a query string or the length of the User-Agent header. // -// * Whether to perform any transformations on the request, such as -// converting it to lowercase, before checking its length. Note that -// transformations of the request body are not supported because the AWS resource -// forwards only the first 8192 bytes of your request to AWS WAF. You can only -// specify a single type of TextTransformation. +// * +// Whether to perform any transformations on the request, such as converting it to +// lowercase, before checking its length. Note that transformations of the request +// body are not supported because the AWS resource forwards only the first 8192 +// bytes of your request to AWS WAF. You can only specify a single type of +// TextTransformation. // -// * A ComparisonOperator used -// for evaluating the selected part of the request against the specified Size, such -// as equals, greater than, less than, and so on. +// * A ComparisonOperator used for evaluating the selected +// part of the request against the specified Size, such as equals, greater than, +// less than, and so on. // -// * The length, in bytes, that -// you want AWS WAF to watch for in selected part of the request. The length is -// computed after applying the transformation. +// * The length, in bytes, that you want AWS WAF to watch +// for in selected part of the request. The length is computed after applying the +// transformation. // -// For example, you can add a -// SizeConstraintSetUpdate object that matches web requests in which the length of -// the User-Agent header is greater than 100 bytes. You can then configure AWS WAF -// to block those requests. To create and configure a SizeConstraintSet, perform -// the following steps: +// For example, you can add a SizeConstraintSetUpdate object that +// matches web requests in which the length of the User-Agent header is greater +// than 100 bytes. You can then configure AWS WAF to block those requests. To +// create and configure a SizeConstraintSet, perform the following steps: // -// * Create a SizeConstraintSet. For more information, -// see CreateSizeConstraintSet. +// * Create +// a SizeConstraintSet. For more information, see CreateSizeConstraintSet. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet -// request. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. // -// * Submit an UpdateSizeConstraintSet request to specify the part of -// the request that you want AWS WAF to inspect (for example, the header or the -// URI) and the value that you want AWS WAF to watch for. +// * Submit an +// UpdateSizeConstraintSet request to specify the part of the request that you want +// AWS WAF to inspect (for example, the header or the URI) and the value that you +// want AWS WAF to watch for. // -// For more information -// about how to use the AWS WAF API to allow or block HTTP requests, see the AWS -// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to use the AWS WAF +// API to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateSizeConstraintSet(ctx context.Context, params *UpdateSizeConstraintSetInput, optFns ...func(*Options)) (*UpdateSizeConstraintSetOutput, error) { if params == nil { params = &UpdateSizeConstraintSetInput{} @@ -96,13 +96,13 @@ type UpdateSizeConstraintSetInput struct { // delete from a SizeConstraintSet. For more information, see the applicable data // types: // - // * SizeConstraintSetUpdate: Contains Action and SizeConstraint + // * SizeConstraintSetUpdate: Contains Action and SizeConstraint // - // * + // * // SizeConstraint: Contains FieldToMatch, TextTransformation, ComparisonOperator, // and Size // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.SizeConstraintSetUpdate diff --git a/service/waf/api_op_UpdateSqlInjectionMatchSet.go b/service/waf/api_op_UpdateSqlInjectionMatchSet.go index 40c4903afe2..1f6f4300c7d 100644 --- a/service/waf/api_op_UpdateSqlInjectionMatchSet.go +++ b/service/waf/api_op_UpdateSqlInjectionMatchSet.go @@ -21,15 +21,15 @@ import ( // SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the // following values: // -// * Action: Whether to insert the object into or delete the +// * Action: Whether to insert the object into or delete the // object from the array. To change a SqlInjectionMatchTuple, you delete the // existing object and add a new one. // -// * FieldToMatch: The part of web requests +// * FieldToMatch: The part of web requests // that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or // custom query parameter, the name of the header or parameter. // -// * +// * // TextTransformation: Which text transformation, if any, to perform on the web // request before inspecting the request for snippets of malicious SQL code. You // can only specify a single type of TextTransformation. @@ -42,19 +42,19 @@ import ( // and then configure AWS WAF to block the requests. To create and configure a // SqlInjectionMatchSet, perform the following steps: // -// * Submit a +// * Submit a // CreateSqlInjectionMatchSet request. // -// * Use GetChangeToken to get the change -// token that you provide in the ChangeToken parameter of an UpdateIPSet request. +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateIPSet +// request. // +// * Submit an UpdateSqlInjectionMatchSet request to specify the parts of +// web requests that you want AWS WAF to inspect for snippets of SQL code. // -// * Submit an UpdateSqlInjectionMatchSet request to specify the parts of web -// requests that you want AWS WAF to inspect for snippets of SQL code. -// -// For more -// information about how to use the AWS WAF API to allow or block HTTP requests, -// see the AWS WAF Developer Guide +// For +// more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateSqlInjectionMatchSet(ctx context.Context, params *UpdateSqlInjectionMatchSetInput, optFns ...func(*Options)) (*UpdateSqlInjectionMatchSetOutput, error) { if params == nil { @@ -90,13 +90,13 @@ type UpdateSqlInjectionMatchSetInput struct { // delete from a SqlInjectionMatchSet. For more information, see the applicable // data types: // - // * SqlInjectionMatchSetUpdate: Contains Action and + // * SqlInjectionMatchSetUpdate: Contains Action and // SqlInjectionMatchTuple // - // * SqlInjectionMatchTuple: Contains FieldToMatch and + // * SqlInjectionMatchTuple: Contains FieldToMatch and // TextTransformation // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.SqlInjectionMatchSetUpdate diff --git a/service/waf/api_op_UpdateWebACL.go b/service/waf/api_op_UpdateWebACL.go index 017ba483b6e..a0e8495e80c 100644 --- a/service/waf/api_op_UpdateWebACL.go +++ b/service/waf/api_op_UpdateWebACL.go @@ -21,61 +21,61 @@ import ( // identifies web requests that you want to allow, block, or count. When you update // a WebACL, you specify the following values: // -// * A default action for the -// WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request -// doesn't match the criteria in any of the Rules in a WebACL. +// * A default action for the WebACL, +// either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't +// match the criteria in any of the Rules in a WebACL. // -// * The Rules -// that you want to add or delete. If you want to replace one Rule with another, -// you delete the existing Rule and add the new one. +// * The Rules that you want +// to add or delete. If you want to replace one Rule with another, you delete the +// existing Rule and add the new one. // -// * For each Rule, whether -// you want AWS WAF to allow requests, block requests, or count requests that match -// the conditions in the Rule. +// * For each Rule, whether you want AWS WAF to +// allow requests, block requests, or count requests that match the conditions in +// the Rule. // -// * The order in which you want AWS WAF to -// evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS -// WAF evaluates each request against the Rules in order based on the value of -// Priority. (The Rule that has the lowest value for Priority is evaluated first.) -// When a web request matches all the predicates (such as ByteMatchSets and IPSets) -// in a Rule, AWS WAF immediately takes the corresponding action, allow or block, -// and doesn't evaluate the request against the remaining Rules in the WebACL, if -// any. +// * The order in which you want AWS WAF to evaluate the Rules in a +// WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each +// request against the Rules in order based on the value of Priority. (The Rule +// that has the lowest value for Priority is evaluated first.) When a web request +// matches all the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF +// immediately takes the corresponding action, allow or block, and doesn't evaluate +// the request against the remaining Rules in the WebACL, if any. // -// To create and configure a WebACL, perform the following steps: +// To create and +// configure a WebACL, perform the following steps: // -// * -// Create and update the predicates that you want to include in Rules. For more -// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, -// UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// * Create and update the +// predicates that you want to include in Rules. For more information, see +// CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, +// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. // -// * -// Create and update the Rules that you want to include in the WebACL. For more -// information, see CreateRule and UpdateRule. +// * Create and update +// the Rules that you want to include in the WebACL. For more information, see +// CreateRule and UpdateRule. // -// * Create a WebACL. See -// CreateWebACL. +// * Create a WebACL. See CreateWebACL. // -// * Use GetChangeToken to get the change token that you provide -// in the ChangeToken parameter of an UpdateWebACL request. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. // -// * Submit an -// UpdateWebACL request to specify the Rules that you want to include in the -// WebACL, to specify the default action, and to associate the WebACL with a -// CloudFront distribution. The ActivatedRule can be a rule group. If you specify a -// rule group as your ActivatedRule , you can exclude specific rules from that rule -// group. If you already have a rule group associated with a web ACL and want to -// submit an UpdateWebACL request to exclude certain rules from that rule group, -// you must first remove the rule group from the web ACL, the re-insert it again, -// specifying the excluded rules. For details, see ActivatedRule$ExcludedRules -// . +// * Submit an UpdateWebACL request to +// specify the Rules that you want to include in the WebACL, to specify the default +// action, and to associate the WebACL with a CloudFront distribution. The +// ActivatedRule can be a rule group. If you specify a rule group as your +// ActivatedRule , you can exclude specific rules from that rule group. If you +// already have a rule group associated with a web ACL and want to submit an +// UpdateWebACL request to exclude certain rules from that rule group, you must +// first remove the rule group from the web ACL, the re-insert it again, specifying +// the excluded rules. For details, see ActivatedRule$ExcludedRules . // -// Be aware that if you try to add a RATE_BASED rule to a web ACL without -// setting the rule type when first creating the rule, the UpdateWebACL request -// will fail because the request tries to add a REGULAR rule (the default rule -// type) with the specified ID, which does not exist. For more information about -// how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF -// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// Be aware +// that if you try to add a RATE_BASED rule to a web ACL without setting the rule +// type when first creating the rule, the UpdateWebACL request will fail because +// the request tries to add a REGULAR rule (the default rule type) with the +// specified ID, which does not exist. For more information about how to use the +// AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateWebACL(ctx context.Context, params *UpdateWebACLInput, optFns ...func(*Options)) (*UpdateWebACLOutput, error) { if params == nil { params = &UpdateWebACLInput{} @@ -113,16 +113,16 @@ type UpdateWebACLInput struct { // you want to insert into or delete from a WebACL. For more information, see the // applicable data types: // - // * WebACLUpdate: Contains Action and ActivatedRule + // * WebACLUpdate: Contains Action and ActivatedRule // - // - // * ActivatedRule: Contains Action, OverrideAction, Priority, RuleId, and Type. + // * + // ActivatedRule: Contains Action, OverrideAction, Priority, RuleId, and Type. // ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to // a WebACL. In this case, you do not use ActivatedRule|Action. For all other // update requests, ActivatedRule|Action is used instead of // ActivatedRule|OverrideAction. // - // * WafAction: Contains Type + // * WafAction: Contains Type Updates []*types.WebACLUpdate } diff --git a/service/waf/api_op_UpdateXssMatchSet.go b/service/waf/api_op_UpdateXssMatchSet.go index 98a6ad5b3f8..c4df176f309 100644 --- a/service/waf/api_op_UpdateXssMatchSet.go +++ b/service/waf/api_op_UpdateXssMatchSet.go @@ -20,18 +20,18 @@ import ( // global use. Inserts or deletes XssMatchTuple objects (filters) in an // XssMatchSet. For each XssMatchTuple object, you specify the following values: // -// -// * Action: Whether to insert the object into or delete the object from the array. +// * +// Action: Whether to insert the object into or delete the object from the array. // To change an XssMatchTuple, you delete the existing object and add a new one. // +// * +// FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if +// you want AWS WAF to inspect a header or custom query parameter, the name of the +// header or parameter. // -// * FieldToMatch: The part of web requests that you want AWS WAF to inspect and, -// if you want AWS WAF to inspect a header or custom query parameter, the name of -// the header or parameter. -// -// * TextTransformation: Which text transformation, -// if any, to perform on the web request before inspecting the request for -// cross-site scripting attacks. You can only specify a single type of +// * TextTransformation: Which text transformation, if any, +// to perform on the web request before inspecting the request for cross-site +// scripting attacks. You can only specify a single type of // TextTransformation. // // You use XssMatchSet objects to specify which CloudFront @@ -41,20 +41,19 @@ import ( // applicable settings, and then configure AWS WAF to block the requests. To create // and configure an XssMatchSet, perform the following steps: // -// * Submit a +// * Submit a // CreateXssMatchSet request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateIPSet request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of an UpdateIPSet request. // -// * -// Submit an UpdateXssMatchSet request to specify the parts of web requests that -// you want AWS WAF to inspect for cross-site scripting attacks. +// * Submit an +// UpdateXssMatchSet request to specify the parts of web requests that you want AWS +// WAF to inspect for cross-site scripting attacks. // -// For more -// information about how to use the AWS WAF API to allow or block HTTP requests, -// see the AWS WAF Developer Guide -// (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how +// to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF +// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateXssMatchSet(ctx context.Context, params *UpdateXssMatchSetInput, optFns ...func(*Options)) (*UpdateXssMatchSetOutput, error) { if params == nil { params = &UpdateXssMatchSetInput{} @@ -81,14 +80,13 @@ type UpdateXssMatchSetInput struct { // An array of XssMatchSetUpdate objects that you want to insert into or delete // from an XssMatchSet. For more information, see the applicable data types: // - // * + // * // XssMatchSetUpdate: Contains Action and XssMatchTuple // - // * XssMatchTuple: - // Contains FieldToMatch and TextTransformation + // * XssMatchTuple: Contains + // FieldToMatch and TextTransformation // - // * FieldToMatch: Contains Data - // and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.XssMatchSetUpdate diff --git a/service/waf/types/enums.go b/service/waf/types/enums.go index 92d645ac002..58ae3339a83 100644 --- a/service/waf/types/enums.go +++ b/service/waf/types/enums.go @@ -616,13 +616,13 @@ type MatchFieldType string // Enum values for MatchFieldType const ( - MatchFieldTypeUri MatchFieldType = "URI" - MatchFieldTypeQuery_string MatchFieldType = "QUERY_STRING" - MatchFieldTypeHeader MatchFieldType = "HEADER" - MatchFieldTypeMethod MatchFieldType = "METHOD" - MatchFieldTypeBody MatchFieldType = "BODY" - MatchFieldTypeSingle_query_arg MatchFieldType = "SINGLE_QUERY_ARG" - MatchFieldTypeAll_query_args MatchFieldType = "ALL_QUERY_ARGS" + MatchFieldTypeUri MatchFieldType = "URI" + MatchFieldTypeQueryString MatchFieldType = "QUERY_STRING" + MatchFieldTypeHeader MatchFieldType = "HEADER" + MatchFieldTypeMethod MatchFieldType = "METHOD" + MatchFieldTypeBody MatchFieldType = "BODY" + MatchFieldTypeSingleQueryArg MatchFieldType = "SINGLE_QUERY_ARG" + MatchFieldTypeAllQueryArgs MatchFieldType = "ALL_QUERY_ARGS" ) // Values returns all known values for MatchFieldType. Note that this can be @@ -644,13 +644,13 @@ type MigrationErrorType string // Enum values for MigrationErrorType const ( - MigrationErrorTypeEntity_not_supported MigrationErrorType = "ENTITY_NOT_SUPPORTED" - MigrationErrorTypeEntity_not_found MigrationErrorType = "ENTITY_NOT_FOUND" - MigrationErrorTypeS3_bucket_no_permission MigrationErrorType = "S3_BUCKET_NO_PERMISSION" - MigrationErrorTypeS3_bucket_not_accessible MigrationErrorType = "S3_BUCKET_NOT_ACCESSIBLE" - MigrationErrorTypeS3_bucket_not_found MigrationErrorType = "S3_BUCKET_NOT_FOUND" - MigrationErrorTypeS3_bucket_invalid_region MigrationErrorType = "S3_BUCKET_INVALID_REGION" - MigrationErrorTypeS3_internal_error MigrationErrorType = "S3_INTERNAL_ERROR" + MigrationErrorTypeEntityNotSupported MigrationErrorType = "ENTITY_NOT_SUPPORTED" + MigrationErrorTypeEntityNotFound MigrationErrorType = "ENTITY_NOT_FOUND" + MigrationErrorTypeS3BucketNoPermission MigrationErrorType = "S3_BUCKET_NO_PERMISSION" + MigrationErrorTypeS3BucketNotAccessible MigrationErrorType = "S3_BUCKET_NOT_ACCESSIBLE" + MigrationErrorTypeS3BucketNotFound MigrationErrorType = "S3_BUCKET_NOT_FOUND" + MigrationErrorTypeS3BucketInvalidRegion MigrationErrorType = "S3_BUCKET_INVALID_REGION" + MigrationErrorTypeS3InternalError MigrationErrorType = "S3_INTERNAL_ERROR" ) // Values returns all known values for MigrationErrorType. Note that this can be @@ -672,24 +672,24 @@ type ParameterExceptionField string // Enum values for ParameterExceptionField const ( - ParameterExceptionFieldChange_action ParameterExceptionField = "CHANGE_ACTION" - ParameterExceptionFieldWaf_action ParameterExceptionField = "WAF_ACTION" - ParameterExceptionFieldWaf_override_action ParameterExceptionField = "WAF_OVERRIDE_ACTION" - ParameterExceptionFieldPredicate_type ParameterExceptionField = "PREDICATE_TYPE" - ParameterExceptionFieldIpset_type ParameterExceptionField = "IPSET_TYPE" - ParameterExceptionFieldByte_match_field_type ParameterExceptionField = "BYTE_MATCH_FIELD_TYPE" - ParameterExceptionFieldSql_injection_match_field_type ParameterExceptionField = "SQL_INJECTION_MATCH_FIELD_TYPE" - ParameterExceptionFieldByte_match_text_transformation ParameterExceptionField = "BYTE_MATCH_TEXT_TRANSFORMATION" - ParameterExceptionFieldByte_match_positional_constraint ParameterExceptionField = "BYTE_MATCH_POSITIONAL_CONSTRAINT" - ParameterExceptionFieldSize_constraint_comparison_operator ParameterExceptionField = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" - ParameterExceptionFieldGeo_match_location_type ParameterExceptionField = "GEO_MATCH_LOCATION_TYPE" - ParameterExceptionFieldGeo_match_location_value ParameterExceptionField = "GEO_MATCH_LOCATION_VALUE" - ParameterExceptionFieldRate_key ParameterExceptionField = "RATE_KEY" - ParameterExceptionFieldRule_type ParameterExceptionField = "RULE_TYPE" - ParameterExceptionFieldNext_marker ParameterExceptionField = "NEXT_MARKER" - ParameterExceptionFieldResource_arn ParameterExceptionField = "RESOURCE_ARN" - ParameterExceptionFieldTags ParameterExceptionField = "TAGS" - ParameterExceptionFieldTag_keys ParameterExceptionField = "TAG_KEYS" + ParameterExceptionFieldChangeAction ParameterExceptionField = "CHANGE_ACTION" + ParameterExceptionFieldWafAction ParameterExceptionField = "WAF_ACTION" + ParameterExceptionFieldWafOverrideAction ParameterExceptionField = "WAF_OVERRIDE_ACTION" + ParameterExceptionFieldPredicateType ParameterExceptionField = "PREDICATE_TYPE" + ParameterExceptionFieldIpsetType ParameterExceptionField = "IPSET_TYPE" + ParameterExceptionFieldByteMatchFieldType ParameterExceptionField = "BYTE_MATCH_FIELD_TYPE" + ParameterExceptionFieldSqlInjectionMatchFieldType ParameterExceptionField = "SQL_INJECTION_MATCH_FIELD_TYPE" + ParameterExceptionFieldByteMatchTextTransformation ParameterExceptionField = "BYTE_MATCH_TEXT_TRANSFORMATION" + ParameterExceptionFieldByteMatchPositionalConstraint ParameterExceptionField = "BYTE_MATCH_POSITIONAL_CONSTRAINT" + ParameterExceptionFieldSizeConstraintComparisonOperator ParameterExceptionField = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" + ParameterExceptionFieldGeoMatchLocationType ParameterExceptionField = "GEO_MATCH_LOCATION_TYPE" + ParameterExceptionFieldGeoMatchLocationValue ParameterExceptionField = "GEO_MATCH_LOCATION_VALUE" + ParameterExceptionFieldRateKey ParameterExceptionField = "RATE_KEY" + ParameterExceptionFieldRuleType ParameterExceptionField = "RULE_TYPE" + ParameterExceptionFieldNextMarker ParameterExceptionField = "NEXT_MARKER" + ParameterExceptionFieldResourceArn ParameterExceptionField = "RESOURCE_ARN" + ParameterExceptionFieldTags ParameterExceptionField = "TAGS" + ParameterExceptionFieldTagKeys ParameterExceptionField = "TAG_KEYS" ) // Values returns all known values for ParameterExceptionField. Note that this can @@ -722,10 +722,10 @@ type ParameterExceptionReason string // Enum values for ParameterExceptionReason const ( - ParameterExceptionReasonInvalid_option ParameterExceptionReason = "INVALID_OPTION" - ParameterExceptionReasonIllegal_combination ParameterExceptionReason = "ILLEGAL_COMBINATION" - ParameterExceptionReasonIllegal_argument ParameterExceptionReason = "ILLEGAL_ARGUMENT" - ParameterExceptionReasonInvalid_tag_key ParameterExceptionReason = "INVALID_TAG_KEY" + ParameterExceptionReasonInvalidOption ParameterExceptionReason = "INVALID_OPTION" + ParameterExceptionReasonIllegalCombination ParameterExceptionReason = "ILLEGAL_COMBINATION" + ParameterExceptionReasonIllegalArgument ParameterExceptionReason = "ILLEGAL_ARGUMENT" + ParameterExceptionReasonInvalidTagKey ParameterExceptionReason = "INVALID_TAG_KEY" ) // Values returns all known values for ParameterExceptionReason. Note that this can @@ -744,11 +744,11 @@ type PositionalConstraint string // Enum values for PositionalConstraint const ( - PositionalConstraintExactly PositionalConstraint = "EXACTLY" - PositionalConstraintStarts_with PositionalConstraint = "STARTS_WITH" - PositionalConstraintEnds_with PositionalConstraint = "ENDS_WITH" - PositionalConstraintContains PositionalConstraint = "CONTAINS" - PositionalConstraintContains_word PositionalConstraint = "CONTAINS_WORD" + PositionalConstraintExactly PositionalConstraint = "EXACTLY" + PositionalConstraintStartsWith PositionalConstraint = "STARTS_WITH" + PositionalConstraintEndsWith PositionalConstraint = "ENDS_WITH" + PositionalConstraintContains PositionalConstraint = "CONTAINS" + PositionalConstraintContainsWord PositionalConstraint = "CONTAINS_WORD" ) // Values returns all known values for PositionalConstraint. Note that this can be @@ -768,13 +768,13 @@ type PredicateType string // Enum values for PredicateType const ( - PredicateTypeIp_match PredicateType = "IPMatch" - PredicateTypeByte_match PredicateType = "ByteMatch" - PredicateTypeSql_injection_match PredicateType = "SqlInjectionMatch" - PredicateTypeGeo_match PredicateType = "GeoMatch" - PredicateTypeSize_constraint PredicateType = "SizeConstraint" - PredicateTypeXss_match PredicateType = "XssMatch" - PredicateTypeRegex_match PredicateType = "RegexMatch" + PredicateTypeIpMatch PredicateType = "IPMatch" + PredicateTypeByteMatch PredicateType = "ByteMatch" + PredicateTypeSqlInjectionMatch PredicateType = "SqlInjectionMatch" + PredicateTypeGeoMatch PredicateType = "GeoMatch" + PredicateTypeSizeConstraint PredicateType = "SizeConstraint" + PredicateTypeXssMatch PredicateType = "XssMatch" + PredicateTypeRegexMatch PredicateType = "RegexMatch" ) // Values returns all known values for PredicateType. Note that this can be @@ -812,12 +812,12 @@ type TextTransformation string // Enum values for TextTransformation const ( - TextTransformationNone TextTransformation = "NONE" - TextTransformationCompress_white_space TextTransformation = "COMPRESS_WHITE_SPACE" - TextTransformationHtml_entity_decode TextTransformation = "HTML_ENTITY_DECODE" - TextTransformationLowercase TextTransformation = "LOWERCASE" - TextTransformationCmd_line TextTransformation = "CMD_LINE" - TextTransformationUrl_decode TextTransformation = "URL_DECODE" + TextTransformationNone TextTransformation = "NONE" + TextTransformationCompressWhiteSpace TextTransformation = "COMPRESS_WHITE_SPACE" + TextTransformationHtmlEntityDecode TextTransformation = "HTML_ENTITY_DECODE" + TextTransformationLowercase TextTransformation = "LOWERCASE" + TextTransformationCmdLine TextTransformation = "CMD_LINE" + TextTransformationUrlDecode TextTransformation = "URL_DECODE" ) // Values returns all known values for TextTransformation. Note that this can be @@ -876,9 +876,9 @@ type WafRuleType string // Enum values for WafRuleType const ( - WafRuleTypeRegular WafRuleType = "REGULAR" - WafRuleTypeRate_based WafRuleType = "RATE_BASED" - WafRuleTypeGroup WafRuleType = "GROUP" + WafRuleTypeRegular WafRuleType = "REGULAR" + WafRuleTypeRateBased WafRuleType = "RATE_BASED" + WafRuleTypeGroup WafRuleType = "GROUP" ) // Values returns all known values for WafRuleType. Note that this can be expanded diff --git a/service/waf/types/errors.go b/service/waf/types/errors.go index f1d2af6cc5e..81c092a185a 100644 --- a/service/waf/types/errors.go +++ b/service/waf/types/errors.go @@ -44,28 +44,28 @@ func (e *WAFDisallowedNameException) ErrorFault() smithy.ErrorFault { return smi // The operation failed due to a problem with the migration. The failure cause is // provided in the exception, in the MigrationErrorType: // -// * -// ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the -// IgnoreUnsupportedType is not set to true. +// * ENTITY_NOT_SUPPORTED - +// The web ACL has an unsupported entity but the IgnoreUnsupportedType is not set +// to true. // -// * ENTITY_NOT_FOUND - The web ACL -// doesn't exist. +// * ENTITY_NOT_FOUND - The web ACL doesn't exist. // -// * S3_BUCKET_NO_PERMISSION - You don't have permission to -// perform the PutObject action to the specified Amazon S3 bucket. +// * +// S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject +// action to the specified Amazon S3 bucket. // -// * -// S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to perform -// the PutObject action in the bucket. +// * S3_BUCKET_NOT_ACCESSIBLE - The +// bucket policy doesn't allow AWS WAF to perform the PutObject action in the +// bucket. // -// * S3_BUCKET_NOT_FOUND - The S3 bucket -// doesn't exist. +// * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. // -// * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the -// same Region as the web ACL. +// * +// S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as the web +// ACL. // -// * S3_INTERNAL_ERROR - AWS WAF failed to create -// the template in the S3 bucket for another reason. +// * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 +// bucket for another reason. type WAFEntityMigrationException struct { Message *string @@ -123,23 +123,22 @@ func (e *WAFInvalidAccountException) ErrorFault() smithy.ErrorFault { return smi // The operation failed because there was nothing to do. For example: // -// * You -// tried to remove a Rule from a WebACL, but the Rule isn't in the specified -// WebACL. +// * You tried +// to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL. // -// * You tried to remove an IP address from an IPSet, but the IP -// address isn't in the specified IPSet. +// * +// You tried to remove an IP address from an IPSet, but the IP address isn't in the +// specified IPSet. // -// * You tried to remove a -// ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the -// specified WebACL. +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, +// but the ByteMatchTuple isn't in the specified WebACL. // -// * You tried to add a Rule to a WebACL, but the Rule -// already exists in the specified WebACL. +// * You tried to add a Rule +// to a WebACL, but the Rule already exists in the specified WebACL. // -// * You tried to add a ByteMatchTuple -// to a ByteMatchSet, but the ByteMatchTuple already exists in the specified -// WebACL. +// * You tried +// to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists +// in the specified WebACL. type WAFInvalidOperationException struct { Message *string } @@ -159,33 +158,33 @@ func (e *WAFInvalidOperationException) ErrorFault() smithy.ErrorFault { return s // The operation failed because AWS WAF didn't recognize a parameter in the // request. For example: // -// * You specified an invalid parameter name. +// * You specified an invalid parameter name. // -// * You +// * You // specified an invalid value. // -// * You tried to update an object (ByteMatchSet, +// * You tried to update an object (ByteMatchSet, // IPSet, Rule, or WebACL) using an action other than INSERT or DELETE. // -// * You +// * You // tried to create a WebACL with a DefaultActionType other than ALLOW, BLOCK, or // COUNT. // -// * You tried to create a RateBasedRule with a RateKey value other -// than IP. +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. // -// * You tried to update a WebACL with a WafActionType other than -// ALLOW, BLOCK, or COUNT. +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a -// FieldToMatchType other than HEADER, METHOD, QUERY_STRING, URI, or BODY. +// * You tried to update a ByteMatchSet with a FieldToMatchType +// other than HEADER, METHOD, QUERY_STRING, URI, or BODY. // -// * -// You tried to update a ByteMatchSet with a Field of HEADER but no value for -// Data. +// * You tried to update a +// ByteMatchSet with a Field of HEADER but no value for Data. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// * Your request +// references an ARN that is malformed, or corresponds to a resource with which a +// web ACL cannot be associated. type WAFInvalidParameterException struct { Message *string @@ -209,30 +208,29 @@ func (e *WAFInvalidParameterException) ErrorFault() smithy.ErrorFault { return s // The operation failed because the specified policy is not in the proper format. // The policy is subject to the following restrictions: // -// * You can attach only -// one policy with each PutPermissionPolicy request. +// * You can attach only one +// policy with each PutPermissionPolicy request. // -// * The policy must include -// an Effect, Action and Principal. +// * The policy must include an +// Effect, Action and Principal. // -// * Effect must specify Allow. +// * Effect must specify Allow. // -// * The -// Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, -// waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions -// in the policy will be rejected. +// * The Action in the +// policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and +// waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be +// rejected. // -// * The policy cannot include a Resource -// parameter. +// * The policy cannot include a Resource parameter. // -// * The ARN in the request must be a valid WAF RuleGroup ARN and -// the RuleGroup must exist in the same region. +// * The ARN in the +// request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the +// same region. // -// * The user making the request -// must be the owner of the RuleGroup. +// * The user making the request must be the owner of the +// RuleGroup. // -// * Your policy must be composed using -// IAM Policy version 2012-10-17. +// * Your policy must be composed using IAM Policy version 2012-10-17. type WAFInvalidPermissionPolicyException struct { Message *string } @@ -295,17 +293,17 @@ func (e *WAFLimitsExceededException) ErrorFault() smithy.ErrorFault { return smi // The operation failed because you tried to delete an object that isn't empty. For // example: // -// * You tried to delete a WebACL that still contains one or more -// Rule objects. +// * You tried to delete a WebACL that still contains one or more Rule +// objects. // -// * You tried to delete a Rule that still contains one or more +// * You tried to delete a Rule that still contains one or more // ByteMatchSet objects or other predicates. // -// * You tried to delete a -// ByteMatchSet that contains one or more ByteMatchTuple objects. +// * You tried to delete a ByteMatchSet +// that contains one or more ByteMatchTuple objects. // -// * You tried -// to delete an IPSet that references one or more IP addresses. +// * You tried to delete an +// IPSet that references one or more IP addresses. type WAFNonEmptyEntityException struct { Message *string } @@ -325,18 +323,18 @@ func (e *WAFNonEmptyEntityException) ErrorFault() smithy.ErrorFault { return smi // The operation failed because you tried to add an object to or delete an object // from another object that doesn't exist. For example: // -// * You tried to add a -// Rule to or delete a Rule from a WebACL that doesn't exist. +// * You tried to add a Rule +// to or delete a Rule from a WebACL that doesn't exist. // -// * You tried to -// add a ByteMatchSet to or delete a ByteMatchSet from a Rule that doesn't exist. +// * You tried to add a +// ByteMatchSet to or delete a ByteMatchSet from a Rule that doesn't exist. // +// * You +// tried to add an IP address to or delete an IP address from an IPSet that doesn't +// exist. // -// * You tried to add an IP address to or delete an IP address from an IPSet that -// doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a -// ByteMatchTuple from a ByteMatchSet that doesn't exist. +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from a +// ByteMatchSet that doesn't exist. type WAFNonexistentContainerException struct { Message *string } @@ -375,11 +373,10 @@ func (e *WAFNonexistentItemException) ErrorFault() smithy.ErrorFault { return sm // The operation failed because you tried to delete an object that is still in use. // For example: // -// * You tried to delete a ByteMatchSet that is still referenced -// by a Rule. +// * You tried to delete a ByteMatchSet that is still referenced by a +// Rule. // -// * You tried to delete a Rule that is still referenced by a -// WebACL. +// * You tried to delete a Rule that is still referenced by a WebACL. type WAFReferencedItemException struct { Message *string } diff --git a/service/waf/types/types.go b/service/waf/types/types.go index 679c79b3922..6f810019e46 100644 --- a/service/waf/types/types.go +++ b/service/waf/types/types.go @@ -38,15 +38,15 @@ type ActivatedRule struct { // Specifies the action that CloudFront or AWS WAF takes when a web request matches // the conditions in the Rule. Valid values for Action include the following: // + // * + // ALLOW: CloudFront responds with the requested object. // - // * ALLOW: CloudFront responds with the requested object. - // - // * BLOCK: CloudFront + // * BLOCK: CloudFront // responds with an HTTP 403 (Forbidden) status code. // - // * COUNT: AWS WAF - // increments a counter of requests that match the conditions in the rule and then - // continues to inspect the web request based on the remaining rules in the web + // * COUNT: AWS WAF increments + // a counter of requests that match the conditions in the rule and then continues + // to inspect the web request based on the remaining rules in the web // ACL. // // ActivatedRule|OverrideAction applies only when updating or adding a @@ -68,25 +68,24 @@ type ActivatedRule struct { // metrics for each ExcludedRule. If you want to exclude rules from a rule group // that is already associated with a web ACL, perform the following steps: // - // * - // Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. - // For more information about the logs, see Logging Web ACL Traffic Information + // * Use + // the AWS WAF logs to identify the IDs of the rules that you want to exclude. For + // more information about the logs, see Logging Web ACL Traffic Information // (https://docs.aws.amazon.com/waf/latest/developerguide/logging.html). // - // * - // Submit an UpdateWebACL request that has two actions: + // * Submit + // an UpdateWebACL request that has two actions: // - // * The first action - // deletes the existing rule group from the web ACL. That is, in the UpdateWebACL - // request, the first Updates:Action should be DELETE and - // Updates:ActivatedRule:RuleId should be the rule group that contains the rules - // that you want to exclude. + // * The first action deletes the + // existing rule group from the web ACL. That is, in the UpdateWebACL request, the + // first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be + // the rule group that contains the rules that you want to exclude. // - // * The second action inserts the same rule - // group back in, but specifying the rules to exclude. That is, the second - // Updates:Action should be INSERT, Updates:ActivatedRule:RuleId should be the rule - // group that you just removed, and ExcludedRules should contain the rules that you - // want to exclude. + // * The second + // action inserts the same rule group back in, but specifying the rules to exclude. + // That is, the second Updates:Action should be INSERT, + // Updates:ActivatedRule:RuleId should be the rule group that you just removed, and + // ExcludedRules should contain the rules that you want to exclude. ExcludedRules []*ExcludedRule // Use the OverrideAction to test your RuleGroup. Any rule in a RuleGroup can @@ -224,21 +223,21 @@ type ByteMatchTuple struct { // underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, // which means one of the following: // - // * TargetString exactly matches the value - // of the specified part of the web request, such as the value of a header. + // * TargetString exactly matches the value of + // the specified part of the web request, such as the value of a header. // - // * + // * // TargetString is at the beginning of the specified part of the web request and is // followed by a character other than an alphanumeric character or underscore (_), // for example, BadBot;. // - // * TargetString is at the end of the specified part of - // the web request and is preceded by a character other than an alphanumeric - // character or underscore (_), for example, ;BadBot. + // * TargetString is at the end of the specified part of the + // web request and is preceded by a character other than an alphanumeric character + // or underscore (_), for example, ;BadBot. // - // * TargetString is in the - // middle of the specified part of the web request and is preceded and followed by - // characters other than alphanumeric characters or underscore (_), for example, + // * TargetString is in the middle of the + // specified part of the web request and is preceded and followed by characters + // other than alphanumeric characters or underscore (_), for example, // -BadBot;. // // EXACTLY The value of the specified part of the web request must @@ -255,44 +254,44 @@ type ByteMatchTuple struct { // The maximum length of the value is 50 bytes. Valid values depend on the values // that you specified for FieldToMatch: // - // * HEADER: The value that you want AWS - // WAF to search for in the request header that you specified in FieldToMatch, for + // * HEADER: The value that you want AWS WAF + // to search for in the request header that you specified in FieldToMatch, for // example, the value of the User-Agent or Referer header. // - // * METHOD: The HTTP + // * METHOD: The HTTP // method, which indicates the type of operation specified in the request. // CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, // POST, and PUT. // - // * QUERY_STRING: The value that you want AWS WAF to search - // for in the query string, which is the part of a URL that appears after a ? + // * QUERY_STRING: The value that you want AWS WAF to search for in + // the query string, which is the part of a URL that appears after a ? // character. // - // * URI: The value that you want AWS WAF to search for in the part - // of a URL that identifies a resource, for example, /images/daily-ad.jpg. - // - // * - // BODY: The part of a request that contains any additional data that you want to - // send to your web server as the HTTP request body, such as data from a form. The - // request body immediately follows the request headers. Note that only the first - // 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow - // or block requests based on the length of the body, you can create a size - // constraint set. For more information, see CreateSizeConstraintSet. - // - // * - // SINGLE_QUERY_ARG: The parameter in the query string that you will inspect, such - // as UserName or SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 - // characters. - // - // * ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but instead of - // inspecting a single parameter, AWS WAF inspects all parameters within the query - // string for the value or regex pattern that you specify in TargetString. - // - // If - // TargetString includes alphabetic characters A-Z and a-z, note that the value is - // case sensitive. If you're using the AWS WAF API Specify a base64-encoded version - // of the value. The maximum length of the value before you base64-encode it is 50 - // bytes. For example, suppose the value of Type is HEADER and the value of Data is + // * URI: The value that you want AWS WAF to search for in the part of + // a URL that identifies a resource, for example, /images/daily-ad.jpg. + // + // * BODY: + // The part of a request that contains any additional data that you want to send to + // your web server as the HTTP request body, such as data from a form. The request + // body immediately follows the request headers. Note that only the first 8192 + // bytes of the request body are forwarded to AWS WAF for inspection. To allow or + // block requests based on the length of the body, you can create a size constraint + // set. For more information, see CreateSizeConstraintSet. + // + // * SINGLE_QUERY_ARG: The + // parameter in the query string that you will inspect, such as UserName or + // SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters. + // + // * + // ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but instead of inspecting a single + // parameter, AWS WAF inspects all parameters within the query string for the value + // or regex pattern that you specify in TargetString. + // + // If TargetString includes + // alphabetic characters A-Z and a-z, note that the value is case sensitive. If + // you're using the AWS WAF API Specify a base64-encoded version of the value. The + // maximum length of the value before you base64-encode it is 50 bytes. For + // example, suppose the value of Type is HEADER and the value of Data is // User-Agent. If you want to search the User-Agent header for the value BadBot, // you base64-encode BadBot using MIME base64-encoding and include the resulting // value, QmFkQm90, in the value of TargetString. If you're using the AWS CLI or @@ -310,64 +309,64 @@ type ByteMatchTuple struct { // command and using unusual formatting to disguise some or all of the command, use // this option to perform the following transformations: // - // * Delete the - // following characters: \ " ' ^ + // * Delete the following + // characters: \ " ' ^ // - // * Delete spaces before the following - // characters: / ( + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * - // Replace multiple spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \f, formfeed, decimal - // 12 + // * \f, formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage - // return, decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking - // space, decimal 160 + // * non-breaking space, decimal + // 160 // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: - // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)lt; with a "less than" + // symbol // + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation @@ -402,38 +401,38 @@ type FieldToMatch struct { // The part of the web request that you want AWS WAF to search for a specified // string. Parts of a request that you can search include the following: // - // * - // HEADER: A specified request header, for example, the value of the User-Agent or - // Referer header. If you choose HEADER for the type, specify the name of the - // header in Data. + // * HEADER: + // A specified request header, for example, the value of the User-Agent or Referer + // header. If you choose HEADER for the type, specify the name of the header in + // Data. // - // * METHOD: The HTTP method, which indicated the type of - // operation that the request is asking the origin to perform. Amazon CloudFront - // supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and - // PUT. + // * METHOD: The HTTP method, which indicated the type of operation that the + // request is asking the origin to perform. Amazon CloudFront supports the + // following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. // - // * QUERY_STRING: A query string, which is the part of a URL that - // appears after a ? character, if any. + // * + // QUERY_STRING: A query string, which is the part of a URL that appears after a ? + // character, if any. // - // * URI: The part of a web request that - // identifies a resource, for example, /images/daily-ad.jpg. + // * URI: The part of a web request that identifies a resource, + // for example, /images/daily-ad.jpg. // - // * BODY: The part - // of a request that contains any additional data that you want to send to your web - // server as the HTTP request body, such as data from a form. The request body - // immediately follows the request headers. Note that only the first 8192 bytes of - // the request body are forwarded to AWS WAF for inspection. To allow or block - // requests based on the length of the body, you can create a size constraint set. - // For more information, see CreateSizeConstraintSet. + // * BODY: The part of a request that contains + // any additional data that you want to send to your web server as the HTTP request + // body, such as data from a form. The request body immediately follows the request + // headers. Note that only the first 8192 bytes of the request body are forwarded + // to AWS WAF for inspection. To allow or block requests based on the length of the + // body, you can create a size constraint set. For more information, see + // CreateSizeConstraintSet. // - // * SINGLE_QUERY_ARG: The - // parameter in the query string that you will inspect, such as UserName or - // SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters. + // * SINGLE_QUERY_ARG: The parameter in the query string + // that you will inspect, such as UserName or SalesRegion. The maximum length for + // SINGLE_QUERY_ARG is 30 characters. // - // * - // ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but rather than inspecting a single - // parameter, AWS WAF will inspect all parameters within the query for the value or - // regex pattern that you specify in TargetString. + // * ALL_QUERY_ARGS: Similar to + // SINGLE_QUERY_ARG, but rather than inspecting a single parameter, AWS WAF will + // inspect all parameters within the query for the value or regex pattern that you + // specify in TargetString. // // This member is required. Type MatchFieldType @@ -577,11 +576,11 @@ type HTTPRequest struct { // with a CloudFront distribution, this is the value of one of the following fields // in CloudFront access logs: // - // * c-ip, if the viewer did not use an HTTP proxy - // or a load balancer to send the request + // * c-ip, if the viewer did not use an HTTP proxy or a + // load balancer to send the request // - // * x-forwarded-for, if the viewer did - // use an HTTP proxy or a load balancer to send the request + // * x-forwarded-for, if the viewer did use an + // HTTP proxy or a load balancer to send the request ClientIP *string // The two-letter country code for the country that the request originated from. @@ -661,26 +660,26 @@ type IPSetDescriptor struct { // Specify an IPv4 address by using CIDR notation. For example: // - // * To configure - // AWS WAF to allow, block, or count requests that originated from the IP address + // * To configure AWS + // WAF to allow, block, or count requests that originated from the IP address // 192.0.2.44, specify 192.0.2.44/32. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from IP addresses from 192.0.2.0 to - // 192.0.2.255, specify 192.0.2.0/24. + // * To configure AWS WAF to allow, block, or + // count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, + // specify 192.0.2.0/24. // - // For more information about CIDR notation, - // see the Wikipedia entry Classless Inter-Domain Routing + // For more information about CIDR notation, see the + // Wikipedia entry Classless Inter-Domain Routing // (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Specify an IPv6 // address by using CIDR notation. For example: // - // * To configure AWS WAF to - // allow, block, or count requests that originated from the IP address + // * To configure AWS WAF to allow, + // block, or count requests that originated from the IP address // 1111:0000:0000:0000:0000:0000:0000:0111, specify // 1111:0000:0000:0000:0000:0000:0000:0111/128. // - // * To configure AWS WAF to - // allow, block, or count requests that originated from IP addresses + // * To configure AWS WAF to allow, + // block, or count requests that originated from IP addresses // 1111:0000:0000:0000:0000:0000:0000:0000 to // 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify // 1111:0000:0000:0000:0000:0000:0000:0000/64. @@ -810,15 +809,15 @@ type Predicate struct { // seen from an attacker, you might create a RateBasedRule that includes the // following conditions: // -// * The requests come from 192.0.2.44. +// * The requests come from 192.0.2.44. // -// * They -// contain the value BadBot in the User-Agent header. +// * They contain the +// value BadBot in the User-Agent header. // -// In the rule, you also define -// the rate limit as 1,000. Requests that meet both of these conditions and exceed -// 1,000 requests every five minutes trigger the rule's action (block or count), -// which is defined in the web ACL. +// In the rule, you also define the rate +// limit as 1,000. Requests that meet both of these conditions and exceed 1,000 +// requests every five minutes trigger the rule's action (block or count), which is +// defined in the web ACL. type RateBasedRule struct { // The Predicates object contains one Predicate element for each ByteMatchSet, @@ -895,16 +894,16 @@ type RegexMatchSet struct { // Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object // contains: // - // * The part of a web request that you want AWS WAF to inspect, - // such as a query string or the value of the User-Agent header. + // * The part of a web request that you want AWS WAF to inspect, such as + // a query string or the value of the User-Agent header. // - // * The - // identifier of the pattern (a regular expression) that you want AWS WAF to look - // for. For more information, see RegexPatternSet. + // * The identifier of the + // pattern (a regular expression) that you want AWS WAF to look for. For more + // information, see RegexPatternSet. // - // * Whether to perform any - // conversions on the request, such as converting it to lowercase, before - // inspecting it for the specified string. + // * Whether to perform any conversions on the + // request, such as converting it to lowercase, before inspecting it for the + // specified string. RegexMatchTuples []*RegexMatchTuple } @@ -969,16 +968,16 @@ type RegexMatchSetUpdate struct { // in web requests, the location in requests that you want AWS WAF to search, and // other settings. Each RegexMatchTuple object contains: // -// * The part of a web +// * The part of a web // request that you want AWS WAF to inspect, such as a query string or the value of // the User-Agent header. // -// * The identifier of the pattern (a regular -// expression) that you want AWS WAF to look for. For more information, see -// RegexPatternSet. +// * The identifier of the pattern (a regular expression) +// that you want AWS WAF to look for. For more information, see RegexPatternSet. // -// * Whether to perform any conversions on the request, such -// as converting it to lowercase, before inspecting it for the specified string. +// * +// Whether to perform any conversions on the request, such as converting it to +// lowercase, before inspecting it for the specified string. type RegexMatchTuple struct { // Specifies where in a web request to look for the RegexPatternSet. @@ -1005,64 +1004,64 @@ type RegexMatchTuple struct { // commandline command and using unusual formatting to disguise some or all of the // command, use this option to perform the following transformations: // - // * Delete - // the following characters: \ " ' ^ - // - // * Delete spaces before the following - // characters: / ( + // * Delete the + // following characters: \ " ' ^ // - // * Replace the following characters with a space: , ; + // * Delete spaces before the following characters: + // / ( // - // * - // Replace multiple spaces with one space + // * Replace the following characters with a space: , ; // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Replace multiple + // spaces with one space // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // * \f, formfeed, decimal - // 12 + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \t, tab, decimal 9 + // * \f, formfeed, decimal 12 // - // * \n, newline, decimal 10 + // * \t, tab, + // decimal 9 // - // * \r, carriage - // return, decimal 13 + // * \n, newline, decimal 10 // - // * \v, vertical tab, decimal 11 + // * \r, carriage return, decimal 13 // - // * non-breaking - // space, decimal 160 + // * \v, + // vertical tab, decimal 11 // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: + // * non-breaking space, decimal + // 160 // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)lt; with a "less than" + // symbol // + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation @@ -1158,16 +1157,16 @@ type RegexPatternSetUpdate struct { // objects that identify the web requests that you want to allow, block, or count. // For example, you might create a Rule that includes the following predicates: // +// * +// An IPSet that causes AWS WAF to search for web requests that originate from the +// IP address 192.0.2.44 // -// * An IPSet that causes AWS WAF to search for web requests that originate from -// the IP address 192.0.2.44 -// -// * A ByteMatchSet that causes AWS WAF to search -// for web requests for which the value of the User-Agent header is BadBot. +// * A ByteMatchSet that causes AWS WAF to search for web +// requests for which the value of the User-Agent header is BadBot. // -// To -// match the settings in this Rule, a request must originate from 192.0.2.44 AND -// include a User-Agent header for which the value is BadBot. +// To match the +// settings in this Rule, a request must originate from 192.0.2.44 AND include a +// User-Agent header for which the value is BadBot. type Rule struct { // The Predicates object contains one Predicate element for each ByteMatchSet, @@ -1205,13 +1204,13 @@ type Rule struct { // global use. A collection of predefined rules that you can add to a web ACL. Rule // groups are subject to the following limits: // -// * Three rule groups per -// account. You can request an increase to this limit by contacting customer -// support. +// * Three rule groups per account. +// You can request an increase to this limit by contacting customer support. // -// * One rule group per web ACL. +// * One +// rule group per web ACL. // -// * Ten rules per rule group. +// * Ten rules per rule group. type RuleGroup struct { // A unique identifier for a RuleGroup. You use RuleGroupId to get more information @@ -1425,63 +1424,62 @@ type SizeConstraint struct { // some or all of the command, use this option to perform the following // transformations: // - // * Delete the following characters: \ " ' ^ - // - // * Delete - // spaces before the following characters: / ( + // * Delete the following characters: \ " ' ^ // - // * Replace the following - // characters with a space: , ; + // * Delete spaces + // before the following characters: / ( // - // * Replace multiple spaces with one space + // * Replace the following characters with a + // space: , ; // + // * Replace multiple spaces with one space // - // * Convert uppercase letters (A-Z) to lowercase (a-z) + // * Convert uppercase + // letters (A-Z) to lowercase (a-z) // - // COMPRESS_WHITE_SPACE Use - // this option to replace the following characters with a space character (decimal - // 32): + // COMPRESS_WHITE_SPACE Use this option to + // replace the following characters with a space character (decimal 32): // - // * \f, formfeed, decimal 12 + // * \f, + // formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, decimal 9 // - // * \n, - // newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage return, decimal 13 + // * \r, + // carriage return, decimal 13 // - // * \v, vertical - // tab, decimal 11 + // * \v, vertical tab, decimal 11 // - // * non-breaking space, decimal 160 - // - // COMPRESS_WHITE_SPACE - // also replaces multiple spaces with one space. HTML_ENTITY_DECODE Use this option - // to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE - // performs the following operations: + // * non-breaking + // space, decimal 160 // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters + // with unencoded characters. HTML_ENTITY_DECODE performs the following + // operations: // + // * Replaces (ampersand)quot; with " // - // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 + // * Replaces (ampersand)nbsp; + // with a non-breaking space, decimal 160 // - // * - // Replaces (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)lt; with a "less + // than" symbol // - // * Replaces (ampersand)gt; - // with > + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. // // This member is required. TextTransformation TextTransformation @@ -1682,64 +1680,64 @@ type SqlInjectionMatchTuple struct { // command and using unusual formatting to disguise some or all of the command, use // this option to perform the following transformations: // - // * Delete the - // following characters: \ " ' ^ + // * Delete the following + // characters: \ " ' ^ // - // * Delete spaces before the following - // characters: / ( + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * - // Replace multiple spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \f, formfeed, decimal - // 12 + // * \f, formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage - // return, decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking - // space, decimal 160 + // * non-breaking space, decimal + // 160 // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces (ampersand)quot; with " + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)lt; with a "less than" + // symbol // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)gt; with > // + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters - // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation @@ -1879,13 +1877,13 @@ type WafAction struct { // Specifies how you want AWS WAF to respond to requests that match the settings in // a Rule. Valid settings include the following: // - // * ALLOW: AWS WAF allows + // * ALLOW: AWS WAF allows // requests // - // * BLOCK: AWS WAF blocks requests + // * BLOCK: AWS WAF blocks requests // - // * COUNT: AWS WAF increments - // a counter of the requests that match all of the conditions in the rule. AWS WAF + // * COUNT: AWS WAF increments a + // counter of the requests that match all of the conditions in the rule. AWS WAF // then continues to inspect the web request based on the remaining rules in the // web ACL. You can't specify COUNT for the default action for a WebACL. // @@ -2117,64 +2115,64 @@ type XssMatchTuple struct { // command and using unusual formatting to disguise some or all of the command, use // this option to perform the following transformations: // - // * Delete the - // following characters: \ " ' ^ + // * Delete the following + // characters: \ " ' ^ // - // * Delete spaces before the following - // characters: / ( + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * - // Replace multiple spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \f, formfeed, decimal - // 12 + // * \f, formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage - // return, decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking - // space, decimal 160 - // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: + // * non-breaking space, decimal + // 160 // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)lt; with a "less than" + // symbol // + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation diff --git a/service/wafregional/api_op_AssociateWebACL.go b/service/wafregional/api_op_AssociateWebACL.go index dc91ed3715b..6b50958e7a9 100644 --- a/service/wafregional/api_op_AssociateWebACL.go +++ b/service/wafregional/api_op_AssociateWebACL.go @@ -40,11 +40,11 @@ type AssociateWebACLInput struct { // application load balancer or Amazon API Gateway stage. The ARN should be in one // of the following formats: // - // * For an Application Load Balancer: + // * For an Application Load Balancer: // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // - // * For an Amazon API Gateway stage: + // * + // For an Amazon API Gateway stage: // arn:aws:apigateway:region::/restapis/api-id/stages/stage-name // // This member is required. diff --git a/service/wafregional/api_op_CreateByteMatchSet.go b/service/wafregional/api_op_CreateByteMatchSet.go index 88250f0a9ce..3b522da842b 100644 --- a/service/wafregional/api_op_CreateByteMatchSet.go +++ b/service/wafregional/api_op_CreateByteMatchSet.go @@ -24,17 +24,17 @@ import ( // string BadBot. You can then configure AWS WAF to reject those requests. To // create and configure a ByteMatchSet, perform the following steps: // -// * Use +// * Use // GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a CreateByteMatchSet request. // -// * Submit a CreateByteMatchSet +// * Submit a CreateByteMatchSet // request. // -// * Use GetChangeToken to get the change token that you provide in -// the ChangeToken parameter of an UpdateByteMatchSet request. +// * Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateByteMatchSet request. // -// * Submit an +// * Submit an // UpdateByteMatchSet request to specify the part of the request that you want AWS // WAF to inspect (for example, the header or the URI) and the value that you want // AWS WAF to watch for. diff --git a/service/wafregional/api_op_CreateGeoMatchSet.go b/service/wafregional/api_op_CreateGeoMatchSet.go index d6ce6f15030..3e26ff34f04 100644 --- a/service/wafregional/api_op_CreateGeoMatchSet.go +++ b/service/wafregional/api_op_CreateGeoMatchSet.go @@ -24,17 +24,17 @@ import ( // contains those countries and then configure AWS WAF to block the requests. To // create and configure a GeoMatchSet, perform the following steps: // -// * Use +// * Use // GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a CreateGeoMatchSet request. // -// * Submit a CreateGeoMatchSet +// * Submit a CreateGeoMatchSet // request. // -// * Use GetChangeToken to get the change token that you provide in -// the ChangeToken parameter of an UpdateGeoMatchSet request. +// * Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateGeoMatchSet request. // -// * Submit an +// * Submit an // UpdateGeoMatchSetSet request to specify the countries that you want AWS WAF to // watch for. // diff --git a/service/wafregional/api_op_CreateIPSet.go b/service/wafregional/api_op_CreateIPSet.go index 545cd3660f7..2b7df1e8a45 100644 --- a/service/wafregional/api_op_CreateIPSet.go +++ b/service/wafregional/api_op_CreateIPSet.go @@ -25,21 +25,21 @@ import ( // then configure AWS WAF to block the requests. To create and configure an IPSet, // perform the following steps: // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateIPSet request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a CreateIPSet request. // -// * -// Submit a CreateIPSet request. +// * Submit a +// CreateIPSet request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateIPSet request. +// * Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of an UpdateIPSet request. // -// * -// Submit an UpdateIPSet request to specify the IP addresses that you want AWS WAF -// to watch for. +// * Submit an +// UpdateIPSet request to specify the IP addresses that you want AWS WAF to watch +// for. // -// For more information about how to use the AWS WAF API to allow or -// block HTTP requests, see the AWS WAF Developer Guide +// For more information about how to use the AWS WAF API to allow or block +// HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateIPSet(ctx context.Context, params *CreateIPSetInput, optFns ...func(*Options)) (*CreateIPSetOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_CreateRateBasedRule.go b/service/wafregional/api_op_CreateRateBasedRule.go index 73a1fffb9fe..d19bfcd2a99 100644 --- a/service/wafregional/api_op_CreateRateBasedRule.go +++ b/service/wafregional/api_op_CreateRateBasedRule.go @@ -26,60 +26,59 @@ import ( // exceed the RateLimit, but it also must match all the conditions to be counted or // blocked. For example, suppose you add the following to a RateBasedRule: // -// * -// An IPSet that matches the IP address 192.0.2.44/32 +// * An +// IPSet that matches the IP address 192.0.2.44/32 // -// * A ByteMatchSet that -// matches BadBot in the User-Agent header +// * A ByteMatchSet that matches +// BadBot in the User-Agent header // -// Further, you specify a RateLimit of -// 1,000. You then add the RateBasedRule to a WebACL and specify that you want to -// block requests that meet the conditions in the rule. For a request to be -// blocked, it must come from the IP address 192.0.2.44 and the User-Agent header -// in the request must contain the value BadBot. Further, requests that match these -// two conditions must be received at a rate of more than 1,000 requests every five +// Further, you specify a RateLimit of 1,000. You +// then add the RateBasedRule to a WebACL and specify that you want to block +// requests that meet the conditions in the rule. For a request to be blocked, it +// must come from the IP address 192.0.2.44 and the User-Agent header in the +// request must contain the value BadBot. Further, requests that match these two +// conditions must be received at a rate of more than 1,000 requests every five // minutes. If both conditions are met and the rate is exceeded, AWS WAF blocks the // requests. If the rate drops below 1,000 for a five-minute period, AWS WAF no // longer blocks the requests. As a second example, suppose you want to limit // requests to a particular page on your site. To do this, you could add the // following to a RateBasedRule: // -// * A ByteMatchSet with FieldToMatch of URI +// * A ByteMatchSet with FieldToMatch of URI // +// * A +// PositionalConstraint of STARTS_WITH // -// * A PositionalConstraint of STARTS_WITH +// * A TargetString of login // -// * A TargetString of login -// -// Further, -// you specify a RateLimit of 1,000. By adding this RateBasedRule to a WebACL, you +// Further, you +// specify a RateLimit of 1,000. By adding this RateBasedRule to a WebACL, you // could limit requests to your login page without affecting the rest of your site. // To create and configure a RateBasedRule, perform the following steps: // -// * -// Create and update the predicates that you want to include in the rule. For more +// * Create +// and update the predicates that you want to include in the rule. For more // information, see CreateByteMatchSet, CreateIPSet, and // CreateSqlInjectionMatchSet. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateRule request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a CreateRule request. // -// * -// Submit a CreateRateBasedRule request. +// * Submit a +// CreateRateBasedRule request. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an UpdateRule -// request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of an UpdateRule request. // -// * Submit an UpdateRateBasedRule request to specify the predicates -// that you want to include in the rule. +// * Submit an +// UpdateRateBasedRule request to specify the predicates that you want to include +// in the rule. // -// * Create and update a WebACL that -// contains the RateBasedRule. For more information, see CreateWebACL. +// * Create and update a WebACL that contains the RateBasedRule. For +// more information, see CreateWebACL. // -// For more -// information about how to use the AWS WAF API to allow or block HTTP requests, -// see the AWS WAF Developer Guide +// For more information about how to use the +// AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRateBasedRule(ctx context.Context, params *CreateRateBasedRuleInput, optFns ...func(*Options)) (*CreateRateBasedRuleOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_CreateRegexMatchSet.go b/service/wafregional/api_op_CreateRegexMatchSet.go index ed6fea4434d..972f4543759 100644 --- a/service/wafregional/api_op_CreateRegexMatchSet.go +++ b/service/wafregional/api_op_CreateRegexMatchSet.go @@ -25,24 +25,24 @@ import ( // can then configure AWS WAF to reject those requests. To create and configure a // RegexMatchSet, perform the following steps: // -// * Use GetChangeToken to get the +// * Use GetChangeToken to get the // change token that you provide in the ChangeToken parameter of a // CreateRegexMatchSet request. // -// * Submit a CreateRegexMatchSet request. +// * Submit a CreateRegexMatchSet request. // -// * -// Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of an UpdateRegexMatchSet request. // -// * Submit an -// UpdateRegexMatchSet request to specify the part of the request that you want AWS -// WAF to inspect (for example, the header or the URI) and the value, using a -// RegexPatternSet, that you want AWS WAF to watch for. +// * Submit an UpdateRegexMatchSet +// request to specify the part of the request that you want AWS WAF to inspect (for +// example, the header or the URI) and the value, using a RegexPatternSet, that you +// want AWS WAF to watch for. // -// For more information about -// how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF -// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to use the AWS WAF +// API to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRegexMatchSet(ctx context.Context, params *CreateRegexMatchSetInput, optFns ...func(*Options)) (*CreateRegexMatchSetOutput, error) { if params == nil { params = &CreateRegexMatchSetInput{} diff --git a/service/wafregional/api_op_CreateRegexPatternSet.go b/service/wafregional/api_op_CreateRegexPatternSet.go index aa7fa89c3d0..8734416dac7 100644 --- a/service/wafregional/api_op_CreateRegexPatternSet.go +++ b/service/wafregional/api_op_CreateRegexPatternSet.go @@ -23,21 +23,21 @@ import ( // requests. To create and configure a RegexPatternSet, perform the following // steps: // -// * Use GetChangeToken to get the change token that you provide in the +// * Use GetChangeToken to get the change token that you provide in the // ChangeToken parameter of a CreateRegexPatternSet request. // -// * Submit a +// * Submit a // CreateRegexPatternSet request. // -// * Use GetChangeToken to get the change token +// * Use GetChangeToken to get the change token // that you provide in the ChangeToken parameter of an UpdateRegexPatternSet // request. // -// * Submit an UpdateRegexPatternSet request to specify the string -// that you want AWS WAF to watch for. +// * Submit an UpdateRegexPatternSet request to specify the string that +// you want AWS WAF to watch for. // -// For more information about how to use the -// AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For more information about how to use the AWS +// WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRegexPatternSet(ctx context.Context, params *CreateRegexPatternSetInput, optFns ...func(*Options)) (*CreateRegexPatternSetOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_CreateRule.go b/service/wafregional/api_op_CreateRule.go index 0a64e968c02..6f9f142d096 100644 --- a/service/wafregional/api_op_CreateRule.go +++ b/service/wafregional/api_op_CreateRule.go @@ -23,40 +23,40 @@ import ( // specifications to be allowed or blocked. For example, suppose that you add the // following to a Rule: // -// * An IPSet that matches the IP address 192.0.2.44/32 +// * An IPSet that matches the IP address 192.0.2.44/32 // +// * A +// ByteMatchSet that matches BadBot in the User-Agent header // -// * A ByteMatchSet that matches BadBot in the User-Agent header +// You then add the Rule +// to a WebACL and specify that you want to blocks requests that satisfy the Rule. +// For a request to be blocked, it must come from the IP address 192.0.2.44 and the +// User-Agent header in the request must contain the value BadBot. To create and +// configure a Rule, perform the following steps: // -// You then add the -// Rule to a WebACL and specify that you want to blocks requests that satisfy the -// Rule. For a request to be blocked, it must come from the IP address 192.0.2.44 -// and the User-Agent header in the request must contain the value BadBot. To -// create and configure a Rule, perform the following steps: +// * Create and update the +// predicates that you want to include in the Rule. For more information, see +// CreateByteMatchSet, CreateIPSet, and CreateSqlInjectionMatchSet. // -// * Create and -// update the predicates that you want to include in the Rule. For more -// information, see CreateByteMatchSet, CreateIPSet, and -// CreateSqlInjectionMatchSet. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a CreateRule request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateRule request. +// * Submit a CreateRule request. // -// * -// Submit a CreateRule request. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateRule request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateRule request. +// * Submit an UpdateRule request to specify +// the predicates that you want to include in the Rule. // -// * -// Submit an UpdateRule request to specify the predicates that you want to include -// in the Rule. +// * Create and update a +// WebACL that contains the Rule. For more information, see CreateWebACL. // -// * Create and update a WebACL that contains the Rule. For more -// information, see CreateWebACL. -// -// For more information about how to use the AWS -// WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For more +// information about how to use the AWS WAF API to allow or block HTTP requests, +// see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateRule(ctx context.Context, params *CreateRuleInput, optFns ...func(*Options)) (*CreateRuleOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_CreateRuleGroup.go b/service/wafregional/api_op_CreateRuleGroup.go index 7215b098991..aa3e2fe2828 100644 --- a/service/wafregional/api_op_CreateRuleGroup.go +++ b/service/wafregional/api_op_CreateRuleGroup.go @@ -21,13 +21,13 @@ import ( // rules that you add to a web ACL. You use UpdateRuleGroup to add rules to the // rule group. Rule groups are subject to the following limits: // -// * Three rule +// * Three rule // groups per account. You can request an increase to this limit by contacting // customer support. // -// * One rule group per web ACL. +// * One rule group per web ACL. // -// * Ten rules per rule +// * Ten rules per rule // group. // // For more information about how to use the AWS WAF API to allow or block diff --git a/service/wafregional/api_op_CreateSizeConstraintSet.go b/service/wafregional/api_op_CreateSizeConstraintSet.go index 886433094ec..24d0937453a 100644 --- a/service/wafregional/api_op_CreateSizeConstraintSet.go +++ b/service/wafregional/api_op_CreateSizeConstraintSet.go @@ -25,23 +25,23 @@ import ( // WAF to reject those requests. To create and configure a SizeConstraintSet, // perform the following steps: // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a CreateSizeConstraintSet +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a CreateSizeConstraintSet // request. // -// * Submit a CreateSizeConstraintSet request. +// * Submit a CreateSizeConstraintSet request. // -// * Use -// GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateSizeConstraintSet request. +// * Use GetChangeToken to +// get the change token that you provide in the ChangeToken parameter of an +// UpdateSizeConstraintSet request. // -// * Submit an -// UpdateSizeConstraintSet request to specify the part of the request that you want -// AWS WAF to inspect (for example, the header or the URI) and the value that you -// want AWS WAF to watch for. +// * Submit an UpdateSizeConstraintSet request to +// specify the part of the request that you want AWS WAF to inspect (for example, +// the header or the URI) and the value that you want AWS WAF to watch for. // -// For more information about how to use the AWS WAF -// API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For +// more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) CreateSizeConstraintSet(ctx context.Context, params *CreateSizeConstraintSetInput, optFns ...func(*Options)) (*CreateSizeConstraintSetOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_CreateSqlInjectionMatchSet.go b/service/wafregional/api_op_CreateSqlInjectionMatchSet.go index 05070791828..7fbaace88ec 100644 --- a/service/wafregional/api_op_CreateSqlInjectionMatchSet.go +++ b/service/wafregional/api_op_CreateSqlInjectionMatchSet.go @@ -23,17 +23,17 @@ import ( // malicious strings. To create and configure a SqlInjectionMatchSet, perform the // following steps: // -// * Use GetChangeToken to get the change token that you -// provide in the ChangeToken parameter of a CreateSqlInjectionMatchSet request. +// * Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of a CreateSqlInjectionMatchSet request. // +// * Submit +// a CreateSqlInjectionMatchSet request. // -// * Submit a CreateSqlInjectionMatchSet request. -// -// * Use GetChangeToken to get -// the change token that you provide in the ChangeToken parameter of an +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an // UpdateSqlInjectionMatchSet request. // -// * Submit an UpdateSqlInjectionMatchSet +// * Submit an UpdateSqlInjectionMatchSet // request to specify the parts of web requests in which you want to allow, block, // or count malicious SQL code. // diff --git a/service/wafregional/api_op_CreateWebACL.go b/service/wafregional/api_op_CreateWebACL.go index 8b1392738f7..a71a7f5ffa1 100644 --- a/service/wafregional/api_op_CreateWebACL.go +++ b/service/wafregional/api_op_CreateWebACL.go @@ -24,26 +24,26 @@ import ( // any of the Rules in a WebACL, AWS WAF responds to the request with the default // action. To create and configure a WebACL, perform the following steps: // -// * -// Create and update the ByteMatchSet objects and other predicates that you want to +// * Create +// and update the ByteMatchSet objects and other predicates that you want to // include in Rules. For more information, see CreateByteMatchSet, // UpdateByteMatchSet, CreateIPSet, UpdateIPSet, CreateSqlInjectionMatchSet, and // UpdateSqlInjectionMatchSet. // -// * Create and update the Rules that you want to +// * Create and update the Rules that you want to // include in the WebACL. For more information, see CreateRule and UpdateRule. // -// -// * Use GetChangeToken to get the change token that you provide in the ChangeToken +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a CreateWebACL request. // -// * Submit a CreateWebACL request. -// +// * Submit a CreateWebACL request. // -// * Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of an UpdateWebACL request. // -// * Submit an UpdateWebACL request to +// * Submit an UpdateWebACL request to // specify the Rules that you want to include in the WebACL, to specify the default // action, and to associate the WebACL with a CloudFront distribution. // diff --git a/service/wafregional/api_op_CreateWebACLMigrationStack.go b/service/wafregional/api_op_CreateWebACLMigrationStack.go index b08994e0349..55e59c17955 100644 --- a/service/wafregional/api_op_CreateWebACLMigrationStack.go +++ b/service/wafregional/api_op_CreateWebACLMigrationStack.go @@ -49,17 +49,17 @@ type CreateWebACLMigrationStackInput struct { // The name of the Amazon S3 bucket to store the CloudFormation template in. The S3 // bucket must be configured as follows for the migration: // - // * The bucket name - // must start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl. + // * The bucket name must + // start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl. // + // * The + // bucket must be in the Region where you are deploying the template. For example, + // for a web ACL in us-west-2, you must use an Amazon S3 bucket in us-west-2 and + // you must deploy the template stack to us-west-2. // - // * The bucket must be in the Region where you are deploying the template. For - // example, for a web ACL in us-west-2, you must use an Amazon S3 bucket in - // us-west-2 and you must deploy the template stack to us-west-2. - // - // * The bucket - // policies must permit the migration process to write data. For listings of the - // bucket policies, see the Examples section. + // * The bucket policies must + // permit the migration process to write data. For listings of the bucket policies, + // see the Examples section. // // This member is required. S3BucketName *string diff --git a/service/wafregional/api_op_CreateXssMatchSet.go b/service/wafregional/api_op_CreateXssMatchSet.go index 3434e61a716..151358b8423 100644 --- a/service/wafregional/api_op_CreateXssMatchSet.go +++ b/service/wafregional/api_op_CreateXssMatchSet.go @@ -23,17 +23,17 @@ import ( // malicious strings. To create and configure an XssMatchSet, perform the following // steps: // -// * Use GetChangeToken to get the change token that you provide in the +// * Use GetChangeToken to get the change token that you provide in the // ChangeToken parameter of a CreateXssMatchSet request. // -// * Submit a +// * Submit a // CreateXssMatchSet request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateXssMatchSet request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of an UpdateXssMatchSet request. // -// -// * Submit an UpdateXssMatchSet request to specify the parts of web requests in +// * +// Submit an UpdateXssMatchSet request to specify the parts of web requests in // which you want to allow, block, or count cross-site scripting attacks. // // For more diff --git a/service/wafregional/api_op_DeleteByteMatchSet.go b/service/wafregional/api_op_DeleteByteMatchSet.go index 439213810bf..7a778b69d53 100644 --- a/service/wafregional/api_op_DeleteByteMatchSet.go +++ b/service/wafregional/api_op_DeleteByteMatchSet.go @@ -22,14 +22,14 @@ import ( // use UpdateRule. To permanently delete a ByteMatchSet, perform the following // steps: // -// * Update the ByteMatchSet to remove filters, if any. For more +// * Update the ByteMatchSet to remove filters, if any. For more // information, see UpdateByteMatchSet. // -// * Use GetChangeToken to get the change +// * Use GetChangeToken to get the change // token that you provide in the ChangeToken parameter of a DeleteByteMatchSet // request. // -// * Submit a DeleteByteMatchSet request. +// * Submit a DeleteByteMatchSet request. func (c *Client) DeleteByteMatchSet(ctx context.Context, params *DeleteByteMatchSetInput, optFns ...func(*Options)) (*DeleteByteMatchSetOutput, error) { if params == nil { params = &DeleteByteMatchSetInput{} diff --git a/service/wafregional/api_op_DeleteGeoMatchSet.go b/service/wafregional/api_op_DeleteGeoMatchSet.go index b839b3286b5..850fb91c897 100644 --- a/service/wafregional/api_op_DeleteGeoMatchSet.go +++ b/service/wafregional/api_op_DeleteGeoMatchSet.go @@ -21,15 +21,15 @@ import ( // want to remove a GeoMatchSet from a Rule, use UpdateRule. To permanently delete // a GeoMatchSet from AWS WAF, perform the following steps: // -// * Update the +// * Update the // GeoMatchSet to remove any countries. For more information, see // UpdateGeoMatchSet. // -// * Use GetChangeToken to get the change token that you +// * Use GetChangeToken to get the change token that you // provide in the ChangeToken parameter of a DeleteGeoMatchSet request. // -// * -// Submit a DeleteGeoMatchSet request. +// * Submit a +// DeleteGeoMatchSet request. func (c *Client) DeleteGeoMatchSet(ctx context.Context, params *DeleteGeoMatchSetInput, optFns ...func(*Options)) (*DeleteGeoMatchSetOutput, error) { if params == nil { params = &DeleteGeoMatchSetInput{} diff --git a/service/wafregional/api_op_DeleteIPSet.go b/service/wafregional/api_op_DeleteIPSet.go index 01e8a51d2d3..c91a6dd30f2 100644 --- a/service/wafregional/api_op_DeleteIPSet.go +++ b/service/wafregional/api_op_DeleteIPSet.go @@ -21,14 +21,14 @@ import ( // want to remove an IPSet from a Rule, use UpdateRule. To permanently delete an // IPSet from AWS WAF, perform the following steps: // -// * Update the IPSet to -// remove IP address ranges, if any. For more information, see UpdateIPSet. +// * Update the IPSet to remove +// IP address ranges, if any. For more information, see UpdateIPSet. // -// * -// Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a DeleteIPSet request. // -// * Submit a DeleteIPSet request. +// * Submit a DeleteIPSet request. func (c *Client) DeleteIPSet(ctx context.Context, params *DeleteIPSetInput, optFns ...func(*Options)) (*DeleteIPSetOutput, error) { if params == nil { params = &DeleteIPSetInput{} diff --git a/service/wafregional/api_op_DeleteRateBasedRule.go b/service/wafregional/api_op_DeleteRateBasedRule.go index 34512efefab..6f0b375ff96 100644 --- a/service/wafregional/api_op_DeleteRateBasedRule.go +++ b/service/wafregional/api_op_DeleteRateBasedRule.go @@ -22,14 +22,14 @@ import ( // UpdateWebACL. To permanently delete a RateBasedRule from AWS WAF, perform the // following steps: // -// * Update the RateBasedRule to remove predicates, if any. -// For more information, see UpdateRateBasedRule. +// * Update the RateBasedRule to remove predicates, if any. For +// more information, see UpdateRateBasedRule. // -// * Use GetChangeToken to get -// the change token that you provide in the ChangeToken parameter of a +// * Use GetChangeToken to get the +// change token that you provide in the ChangeToken parameter of a // DeleteRateBasedRule request. // -// * Submit a DeleteRateBasedRule request. +// * Submit a DeleteRateBasedRule request. func (c *Client) DeleteRateBasedRule(ctx context.Context, params *DeleteRateBasedRuleInput, optFns ...func(*Options)) (*DeleteRateBasedRuleOutput, error) { if params == nil { params = &DeleteRateBasedRuleInput{} diff --git a/service/wafregional/api_op_DeleteRegexMatchSet.go b/service/wafregional/api_op_DeleteRegexMatchSet.go index e5c80c38cf2..831180f0bf4 100644 --- a/service/wafregional/api_op_DeleteRegexMatchSet.go +++ b/service/wafregional/api_op_DeleteRegexMatchSet.go @@ -22,14 +22,14 @@ import ( // RegexMatchSet from a Rule, use UpdateRule. To permanently delete a // RegexMatchSet, perform the following steps: // -// * Update the RegexMatchSet to +// * Update the RegexMatchSet to // remove filters, if any. For more information, see UpdateRegexMatchSet. // -// * -// Use GetChangeToken to get the change token that you provide in the ChangeToken +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a DeleteRegexMatchSet request. // -// * Submit a DeleteRegexMatchSet +// * Submit a DeleteRegexMatchSet // request. func (c *Client) DeleteRegexMatchSet(ctx context.Context, params *DeleteRegexMatchSetInput, optFns ...func(*Options)) (*DeleteRegexMatchSetOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_DeleteRule.go b/service/wafregional/api_op_DeleteRule.go index fbfbce9abf0..f3d1d0bca06 100644 --- a/service/wafregional/api_op_DeleteRule.go +++ b/service/wafregional/api_op_DeleteRule.go @@ -22,14 +22,14 @@ import ( // UpdateWebACL. To permanently delete a Rule from AWS WAF, perform the following // steps: // -// * Update the Rule to remove predicates, if any. For more -// information, see UpdateRule. +// * Update the Rule to remove predicates, if any. For more information, +// see UpdateRule. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a DeleteRule request. +// * Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of a DeleteRule request. // -// * -// Submit a DeleteRule request. +// * Submit a DeleteRule +// request. func (c *Client) DeleteRule(ctx context.Context, params *DeleteRuleInput, optFns ...func(*Options)) (*DeleteRuleOutput, error) { if params == nil { params = &DeleteRuleInput{} diff --git a/service/wafregional/api_op_DeleteRuleGroup.go b/service/wafregional/api_op_DeleteRuleGroup.go index ee3dff91e36..fce1ae55c05 100644 --- a/service/wafregional/api_op_DeleteRuleGroup.go +++ b/service/wafregional/api_op_DeleteRuleGroup.go @@ -21,14 +21,14 @@ import ( // just want to remove a RuleGroup from a WebACL, use UpdateWebACL. To permanently // delete a RuleGroup from AWS WAF, perform the following steps: // -// * Update the +// * Update the // RuleGroup to remove rules, if any. For more information, see UpdateRuleGroup. // -// -// * Use GetChangeToken to get the change token that you provide in the ChangeToken +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken // parameter of a DeleteRuleGroup request. // -// * Submit a DeleteRuleGroup request. +// * Submit a DeleteRuleGroup request. func (c *Client) DeleteRuleGroup(ctx context.Context, params *DeleteRuleGroupInput, optFns ...func(*Options)) (*DeleteRuleGroupOutput, error) { if params == nil { params = &DeleteRuleGroupInput{} diff --git a/service/wafregional/api_op_DeleteSizeConstraintSet.go b/service/wafregional/api_op_DeleteSizeConstraintSet.go index e829a8f17cc..23f6c242f8c 100644 --- a/service/wafregional/api_op_DeleteSizeConstraintSet.go +++ b/service/wafregional/api_op_DeleteSizeConstraintSet.go @@ -22,15 +22,15 @@ import ( // SizeConstraintSet from a Rule, use UpdateRule. To permanently delete a // SizeConstraintSet, perform the following steps: // -// * Update the -// SizeConstraintSet to remove filters, if any. For more information, see -// UpdateSizeConstraintSet. +// * Update the SizeConstraintSet +// to remove filters, if any. For more information, see UpdateSizeConstraintSet. // -// * Use GetChangeToken to get the change token that -// you provide in the ChangeToken parameter of a DeleteSizeConstraintSet request. +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteSizeConstraintSet request. // -// -// * Submit a DeleteSizeConstraintSet request. +// * Submit a +// DeleteSizeConstraintSet request. func (c *Client) DeleteSizeConstraintSet(ctx context.Context, params *DeleteSizeConstraintSetInput, optFns ...func(*Options)) (*DeleteSizeConstraintSetOutput, error) { if params == nil { params = &DeleteSizeConstraintSetInput{} diff --git a/service/wafregional/api_op_DeleteSqlInjectionMatchSet.go b/service/wafregional/api_op_DeleteSqlInjectionMatchSet.go index 55ceeb2815a..0b966b42082 100644 --- a/service/wafregional/api_op_DeleteSqlInjectionMatchSet.go +++ b/service/wafregional/api_op_DeleteSqlInjectionMatchSet.go @@ -22,15 +22,15 @@ import ( // SqlInjectionMatchSet from a Rule, use UpdateRule. To permanently delete a // SqlInjectionMatchSet from AWS WAF, perform the following steps: // -// * Update -// the SqlInjectionMatchSet to remove filters, if any. For more information, see +// * Update the +// SqlInjectionMatchSet to remove filters, if any. For more information, see // UpdateSqlInjectionMatchSet. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of a DeleteSqlInjectionMatchSet // request. // -// * Submit a DeleteSqlInjectionMatchSet request. +// * Submit a DeleteSqlInjectionMatchSet request. func (c *Client) DeleteSqlInjectionMatchSet(ctx context.Context, params *DeleteSqlInjectionMatchSetInput, optFns ...func(*Options)) (*DeleteSqlInjectionMatchSetOutput, error) { if params == nil { params = &DeleteSqlInjectionMatchSetInput{} diff --git a/service/wafregional/api_op_DeleteWebACL.go b/service/wafregional/api_op_DeleteWebACL.go index 333ca6fef05..ea4043dd040 100644 --- a/service/wafregional/api_op_DeleteWebACL.go +++ b/service/wafregional/api_op_DeleteWebACL.go @@ -19,15 +19,14 @@ import ( // global use. Permanently deletes a WebACL. You can't delete a WebACL if it still // contains any Rules. To delete a WebACL, perform the following steps: // -// * -// Update the WebACL to remove Rules, if any. For more information, see -// UpdateWebACL. +// * Update +// the WebACL to remove Rules, if any. For more information, see UpdateWebACL. // -// * Use GetChangeToken to get the change token that you provide -// in the ChangeToken parameter of a DeleteWebACL request. +// * +// Use GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of a DeleteWebACL request. // -// * Submit a -// DeleteWebACL request. +// * Submit a DeleteWebACL request. func (c *Client) DeleteWebACL(ctx context.Context, params *DeleteWebACLInput, optFns ...func(*Options)) (*DeleteWebACLOutput, error) { if params == nil { params = &DeleteWebACLInput{} diff --git a/service/wafregional/api_op_DeleteXssMatchSet.go b/service/wafregional/api_op_DeleteXssMatchSet.go index 14378765633..8721c45d38d 100644 --- a/service/wafregional/api_op_DeleteXssMatchSet.go +++ b/service/wafregional/api_op_DeleteXssMatchSet.go @@ -22,14 +22,14 @@ import ( // To permanently delete an XssMatchSet from AWS WAF, perform the following // steps: // -// * Update the XssMatchSet to remove filters, if any. For more +// * Update the XssMatchSet to remove filters, if any. For more // information, see UpdateXssMatchSet. // -// * Use GetChangeToken to get the change +// * Use GetChangeToken to get the change // token that you provide in the ChangeToken parameter of a DeleteXssMatchSet // request. // -// * Submit a DeleteXssMatchSet request. +// * Submit a DeleteXssMatchSet request. func (c *Client) DeleteXssMatchSet(ctx context.Context, params *DeleteXssMatchSetInput, optFns ...func(*Options)) (*DeleteXssMatchSetOutput, error) { if params == nil { params = &DeleteXssMatchSetInput{} diff --git a/service/wafregional/api_op_DisassociateWebACL.go b/service/wafregional/api_op_DisassociateWebACL.go index 892ef0f9480..b8e9150df90 100644 --- a/service/wafregional/api_op_DisassociateWebACL.go +++ b/service/wafregional/api_op_DisassociateWebACL.go @@ -40,12 +40,12 @@ type DisassociateWebACLInput struct { // removed, either an application load balancer or Amazon API Gateway stage. The // ARN should be in one of the following formats: // - // * For an Application Load + // * For an Application Load // Balancer: // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // - // * For an Amazon API Gateway stage: + // * + // For an Amazon API Gateway stage: // arn:aws:apigateway:region::/restapis/api-id/stages/stage-name // // This member is required. diff --git a/service/wafregional/api_op_GetByteMatchSet.go b/service/wafregional/api_op_GetByteMatchSet.go index 4a71eafd451..db2266e53ce 100644 --- a/service/wafregional/api_op_GetByteMatchSet.go +++ b/service/wafregional/api_op_GetByteMatchSet.go @@ -47,14 +47,14 @@ type GetByteMatchSetOutput struct { // Information about the ByteMatchSet that you specified in the GetByteMatchSet // request. For more information, see the following topics: // - // * ByteMatchSet: + // * ByteMatchSet: // Contains ByteMatchSetId, ByteMatchTuples, and Name // - // * ByteMatchTuples: - // Contains an array of ByteMatchTuple objects. Each ByteMatchTuple object contains + // * ByteMatchTuples: Contains + // an array of ByteMatchTuple objects. Each ByteMatchTuple object contains // FieldToMatch, PositionalConstraint, TargetString, and TextTransformation // - // * + // * // FieldToMatch: Contains Data and Type ByteMatchSet *types.ByteMatchSet diff --git a/service/wafregional/api_op_GetChangeTokenStatus.go b/service/wafregional/api_op_GetChangeTokenStatus.go index f82743e5ce4..0e573558180 100644 --- a/service/wafregional/api_op_GetChangeTokenStatus.go +++ b/service/wafregional/api_op_GetChangeTokenStatus.go @@ -20,15 +20,15 @@ import ( // global use. Returns the status of a ChangeToken that you got by calling // GetChangeToken. ChangeTokenStatus is one of the following values: // -// * +// * // PROVISIONED: You requested the change token by calling GetChangeToken, but you // haven't used it yet in a call to create, update, or delete an AWS WAF object. // +// * +// PENDING: AWS WAF is propagating the create, update, or delete request to all AWS +// WAF servers. // -// * PENDING: AWS WAF is propagating the create, update, or delete request to all -// AWS WAF servers. -// -// * INSYNC: Propagation is complete. +// * INSYNC: Propagation is complete. func (c *Client) GetChangeTokenStatus(ctx context.Context, params *GetChangeTokenStatusInput, optFns ...func(*Options)) (*GetChangeTokenStatusOutput, error) { if params == nil { params = &GetChangeTokenStatusInput{} diff --git a/service/wafregional/api_op_GetIPSet.go b/service/wafregional/api_op_GetIPSet.go index 68ef9c01a38..55077201234 100644 --- a/service/wafregional/api_op_GetIPSet.go +++ b/service/wafregional/api_op_GetIPSet.go @@ -47,10 +47,10 @@ type GetIPSetOutput struct { // Information about the IPSet that you specified in the GetIPSet request. For more // information, see the following topics: // - // * IPSet: Contains IPSetDescriptors, + // * IPSet: Contains IPSetDescriptors, // IPSetId, and Name // - // * IPSetDescriptors: Contains an array of IPSetDescriptor + // * IPSetDescriptors: Contains an array of IPSetDescriptor // objects. Each IPSetDescriptor object contains Type and Value IPSet *types.IPSet diff --git a/service/wafregional/api_op_GetRule.go b/service/wafregional/api_op_GetRule.go index 994a1e2fe45..6e1de4077a1 100644 --- a/service/wafregional/api_op_GetRule.go +++ b/service/wafregional/api_op_GetRule.go @@ -48,11 +48,11 @@ type GetRuleOutput struct { // Information about the Rule that you specified in the GetRule request. For more // information, see the following topics: // - // * Rule: Contains MetricName, Name, - // an array of Predicate objects, and RuleId + // * Rule: Contains MetricName, Name, an + // array of Predicate objects, and RuleId // - // * Predicate: Each Predicate - // object contains DataId, Negated, and Type + // * Predicate: Each Predicate object + // contains DataId, Negated, and Type Rule *types.Rule // Metadata pertaining to the operation's result. diff --git a/service/wafregional/api_op_GetSampledRequests.go b/service/wafregional/api_op_GetSampledRequests.go index 7a62bbf72dd..b1d46287b55 100644 --- a/service/wafregional/api_op_GetSampledRequests.go +++ b/service/wafregional/api_op_GetSampledRequests.go @@ -54,13 +54,12 @@ type GetSampledRequestsInput struct { // RuleId is one of three values: // - // * The RuleId of the Rule or the RuleGroupId - // of the RuleGroup for which you want GetSampledRequests to return a sample of + // * The RuleId of the Rule or the RuleGroupId of + // the RuleGroup for which you want GetSampledRequests to return a sample of // requests. // - // * Default_Action, which causes GetSampledRequests to return a - // sample of the requests that didn't match any of the rules in the specified - // WebACL. + // * Default_Action, which causes GetSampledRequests to return a sample + // of the requests that didn't match any of the rules in the specified WebACL. // // This member is required. RuleId *string diff --git a/service/wafregional/api_op_GetSizeConstraintSet.go b/service/wafregional/api_op_GetSizeConstraintSet.go index bfe03b162b4..5b80a4c0b69 100644 --- a/service/wafregional/api_op_GetSizeConstraintSet.go +++ b/service/wafregional/api_op_GetSizeConstraintSet.go @@ -48,15 +48,15 @@ type GetSizeConstraintSetOutput struct { // Information about the SizeConstraintSet that you specified in the // GetSizeConstraintSet request. For more information, see the following topics: // + // * + // SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and Name // - // * SizeConstraintSet: Contains SizeConstraintSetId, SizeConstraints, and Name - // - // - // * SizeConstraints: Contains an array of SizeConstraint objects. Each + // * + // SizeConstraints: Contains an array of SizeConstraint objects. Each // SizeConstraint object contains FieldToMatch, TextTransformation, // ComparisonOperator, and Size // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type SizeConstraintSet *types.SizeConstraintSet // Metadata pertaining to the operation's result. diff --git a/service/wafregional/api_op_GetSqlInjectionMatchSet.go b/service/wafregional/api_op_GetSqlInjectionMatchSet.go index c8cdbfffd9a..a9badf38e87 100644 --- a/service/wafregional/api_op_GetSqlInjectionMatchSet.go +++ b/service/wafregional/api_op_GetSqlInjectionMatchSet.go @@ -52,14 +52,14 @@ type GetSqlInjectionMatchSetOutput struct { // GetSqlInjectionMatchSet request. For more information, see the following // topics: // - // * SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and - // an array of SqlInjectionMatchTuple objects + // * SqlInjectionMatchSet: Contains Name, SqlInjectionMatchSetId, and an + // array of SqlInjectionMatchTuple objects // - // * SqlInjectionMatchTuple: Each + // * SqlInjectionMatchTuple: Each // SqlInjectionMatchTuple object contains FieldToMatch and TextTransformation // - // - // * FieldToMatch: Contains Data and Type + // * + // FieldToMatch: Contains Data and Type SqlInjectionMatchSet *types.SqlInjectionMatchSet // Metadata pertaining to the operation's result. diff --git a/service/wafregional/api_op_GetWebACL.go b/service/wafregional/api_op_GetWebACL.go index 4cba29a0ce6..ee29d40a5f3 100644 --- a/service/wafregional/api_op_GetWebACL.go +++ b/service/wafregional/api_op_GetWebACL.go @@ -47,15 +47,14 @@ type GetWebACLOutput struct { // Information about the WebACL that you specified in the GetWebACL request. For // more information, see the following topics: // - // * WebACL: Contains - // DefaultAction, MetricName, Name, an array of Rule objects, and WebACLId + // * WebACL: Contains DefaultAction, + // MetricName, Name, an array of Rule objects, and WebACLId // - // * - // DefaultAction (Data type is WafAction): Contains Type - // - // * Rules: Contains an - // array of ActivatedRule objects, which contain Action, Priority, and RuleId + // * DefaultAction (Data + // type is WafAction): Contains Type // + // * Rules: Contains an array of ActivatedRule + // objects, which contain Action, Priority, and RuleId // // * Action: Contains Type WebACL *types.WebACL diff --git a/service/wafregional/api_op_GetWebACLForResource.go b/service/wafregional/api_op_GetWebACLForResource.go index 1e36cdbd0a8..6e8de4e7b80 100644 --- a/service/wafregional/api_op_GetWebACLForResource.go +++ b/service/wafregional/api_op_GetWebACLForResource.go @@ -41,11 +41,11 @@ type GetWebACLForResourceInput struct { // either an application load balancer or Amazon API Gateway stage. The ARN should // be in one of the following formats: // - // * For an Application Load Balancer: + // * For an Application Load Balancer: // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // - // * For an Amazon API Gateway stage: + // * + // For an Amazon API Gateway stage: // arn:aws:apigateway:region::/restapis/api-id/stages/stage-name // // This member is required. diff --git a/service/wafregional/api_op_GetXssMatchSet.go b/service/wafregional/api_op_GetXssMatchSet.go index 050dcbce13c..801e023b9d0 100644 --- a/service/wafregional/api_op_GetXssMatchSet.go +++ b/service/wafregional/api_op_GetXssMatchSet.go @@ -49,14 +49,14 @@ type GetXssMatchSetOutput struct { // Information about the XssMatchSet that you specified in the GetXssMatchSet // request. For more information, see the following topics: // - // * XssMatchSet: + // * XssMatchSet: // Contains Name, XssMatchSetId, and an array of XssMatchTuple objects // - // * + // * // XssMatchTuple: Each XssMatchTuple object contains FieldToMatch and // TextTransformation // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type XssMatchSet *types.XssMatchSet // Metadata pertaining to the operation's result. diff --git a/service/wafregional/api_op_PutLoggingConfiguration.go b/service/wafregional/api_op_PutLoggingConfiguration.go index d4e83941e0f..1f5d1897b2c 100644 --- a/service/wafregional/api_op_PutLoggingConfiguration.go +++ b/service/wafregional/api_op_PutLoggingConfiguration.go @@ -21,13 +21,13 @@ import ( // access information about all traffic that AWS WAF inspects using the following // steps: // -// * Create an Amazon Kinesis Data Firehose. Create the data firehose -// with a PUT source and in the region that you are operating. However, if you are +// * Create an Amazon Kinesis Data Firehose. Create the data firehose with +// a PUT source and in the region that you are operating. However, if you are // capturing logs for Amazon CloudFront, always create the firehose in US East (N. // Virginia). Do not create the data firehose using a Kinesis stream as your // source. // -// * Associate that firehose to your web ACL using a +// * Associate that firehose to your web ACL using a // PutLoggingConfiguration request. // // When you successfully enable logging using a diff --git a/service/wafregional/api_op_PutPermissionPolicy.go b/service/wafregional/api_op_PutPermissionPolicy.go index b83f09c7035..a32750f2752 100644 --- a/service/wafregional/api_op_PutPermissionPolicy.go +++ b/service/wafregional/api_op_PutPermissionPolicy.go @@ -20,33 +20,32 @@ import ( // use for this action is to share a RuleGroup across accounts. The // PutPermissionPolicy is subject to the following restrictions: // -// * You can -// attach only one policy with each PutPermissionPolicy request. +// * You can attach +// only one policy with each PutPermissionPolicy request. // -// * The policy -// must include an Effect, Action and Principal. +// * The policy must +// include an Effect, Action and Principal. // -// * Effect must specify -// Allow. +// * Effect must specify Allow. // -// * The Action in the policy must be waf:UpdateWebACL, -// waf-regional:UpdateWebACL, waf:GetRuleGroup and waf-regional:GetRuleGroup . Any -// extra or wildcard actions in the policy will be rejected. +// * The +// Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, +// waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions +// in the policy will be rejected. // -// * The policy -// cannot include a Resource parameter. +// * The policy cannot include a Resource +// parameter. // -// * The ARN in the request must be a -// valid WAF RuleGroup ARN and the RuleGroup must exist in the same region. +// * The ARN in the request must be a valid WAF RuleGroup ARN and the +// RuleGroup must exist in the same region. // -// * -// The user making the request must be the owner of the RuleGroup. +// * The user making the request must be +// the owner of the RuleGroup. // -// * Your -// policy must be composed using IAM Policy version 2012-10-17. +// * Your policy must be composed using IAM Policy +// version 2012-10-17. // -// For more -// information, see IAM Policies +// For more information, see IAM Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). An // example of a valid policy parameter is shown in the Examples section below. func (c *Client) PutPermissionPolicy(ctx context.Context, params *PutPermissionPolicyInput, optFns ...func(*Options)) (*PutPermissionPolicyOutput, error) { diff --git a/service/wafregional/api_op_UpdateByteMatchSet.go b/service/wafregional/api_op_UpdateByteMatchSet.go index a06be2d5df5..df0902dccbe 100644 --- a/service/wafregional/api_op_UpdateByteMatchSet.go +++ b/service/wafregional/api_op_UpdateByteMatchSet.go @@ -21,45 +21,44 @@ import ( // ByteMatchSet. For each ByteMatchTuple object, you specify the following // values: // -// * Whether to insert or delete the object from the array. If you -// want to change a ByteMatchSetUpdate object, you delete the existing object and -// add a new one. +// * Whether to insert or delete the object from the array. If you want to +// change a ByteMatchSetUpdate object, you delete the existing object and add a new +// one. // -// * The part of a web request that you want AWS WAF to -// inspect, such as a query string or the value of the User-Agent header. +// * The part of a web request that you want AWS WAF to inspect, such as a +// query string or the value of the User-Agent header. // -// * -// The bytes (typically a string that corresponds with ASCII characters) that you -// want AWS WAF to look for. For more information, including how you specify the -// values for the AWS WAF API and the AWS CLI or SDKs, see TargetString in the -// ByteMatchTuple data type. +// * The bytes (typically a +// string that corresponds with ASCII characters) that you want AWS WAF to look +// for. For more information, including how you specify the values for the AWS WAF +// API and the AWS CLI or SDKs, see TargetString in the ByteMatchTuple data +// type. // -// * Where to look, such as at the beginning or the -// end of a query string. -// -// * Whether to perform any conversions on the request, -// such as converting it to lowercase, before inspecting it for the specified +// * Where to look, such as at the beginning or the end of a query // string. // -// For example, you can add a ByteMatchSetUpdate object that matches web -// requests in which User-Agent headers contain the string BadBot. You can then -// configure AWS WAF to block those requests. To create and configure a -// ByteMatchSet, perform the following steps: +// * Whether to perform any conversions on the request, such as converting +// it to lowercase, before inspecting it for the specified string. +// +// For example, +// you can add a ByteMatchSetUpdate object that matches web requests in which +// User-Agent headers contain the string BadBot. You can then configure AWS WAF to +// block those requests. To create and configure a ByteMatchSet, perform the +// following steps: // -// * Create a ByteMatchSet. For -// more information, see CreateByteMatchSet. +// * Create a ByteMatchSet. For more information, see +// CreateByteMatchSet. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an -// UpdateByteMatchSet request. +// * Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of an UpdateByteMatchSet request. // -// * Submit an UpdateByteMatchSet request to -// specify the part of the request that you want AWS WAF to inspect (for example, -// the header or the URI) and the value that you want AWS WAF to watch for. +// * Submit +// an UpdateByteMatchSet request to specify the part of the request that you want +// AWS WAF to inspect (for example, the header or the URI) and the value that you +// want AWS WAF to watch for. // -// For -// more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide +// For more information about how to use the AWS WAF +// API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateByteMatchSet(ctx context.Context, params *UpdateByteMatchSetInput, optFns ...func(*Options)) (*UpdateByteMatchSetOutput, error) { if params == nil { @@ -92,14 +91,14 @@ type UpdateByteMatchSetInput struct { // An array of ByteMatchSetUpdate objects that you want to insert into or delete // from a ByteMatchSet. For more information, see the applicable data types: // - // * + // * // ByteMatchSetUpdate: Contains Action and ByteMatchTuple // - // * ByteMatchTuple: + // * ByteMatchTuple: // Contains FieldToMatch, PositionalConstraint, TargetString, and // TextTransformation // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.ByteMatchSetUpdate diff --git a/service/wafregional/api_op_UpdateGeoMatchSet.go b/service/wafregional/api_op_UpdateGeoMatchSet.go index dadddf9c04b..5393654f0de 100644 --- a/service/wafregional/api_op_UpdateGeoMatchSet.go +++ b/service/wafregional/api_op_UpdateGeoMatchSet.go @@ -20,27 +20,27 @@ import ( // global use. Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For // each GeoMatchConstraint object, you specify the following values: // -// * Whether -// to insert or delete the object from the array. If you want to change an +// * Whether to +// insert or delete the object from the array. If you want to change an // GeoMatchConstraint object, you delete the existing object and add a new one. // +// * +// The Type. The only valid value for Type is Country. // -// * The Type. The only valid value for Type is Country. -// -// * The Value, which is -// a two character code for the country to add to the GeoMatchConstraint object. -// Valid codes are listed in GeoMatchConstraint$Value. +// * The Value, which is a two +// character code for the country to add to the GeoMatchConstraint object. Valid +// codes are listed in GeoMatchConstraint$Value. // // To create and configure an // GeoMatchSet, perform the following steps: // -// * Submit a CreateGeoMatchSet +// * Submit a CreateGeoMatchSet // request. // -// * Use GetChangeToken to get the change token that you provide in -// the ChangeToken parameter of an UpdateGeoMatchSet request. +// * Use GetChangeToken to get the change token that you provide in the +// ChangeToken parameter of an UpdateGeoMatchSet request. // -// * Submit an +// * Submit an // UpdateGeoMatchSet request to specify the country that you want AWS WAF to watch // for. // @@ -80,12 +80,12 @@ type UpdateGeoMatchSetInput struct { // An array of GeoMatchSetUpdate objects that you want to insert into or delete // from an GeoMatchSet. For more information, see the applicable data types: // - // * + // * // GeoMatchSetUpdate: Contains Action and GeoMatchConstraint // - // * - // GeoMatchConstraint: Contains Type and Value You can have only one Type and Value - // per GeoMatchConstraint. To add multiple countries, include multiple + // * GeoMatchConstraint: + // Contains Type and Value You can have only one Type and Value per + // GeoMatchConstraint. To add multiple countries, include multiple // GeoMatchSetUpdate objects in your request. // // This member is required. diff --git a/service/wafregional/api_op_UpdateIPSet.go b/service/wafregional/api_op_UpdateIPSet.go index 97d61049c1e..9f2b0fe4e8b 100644 --- a/service/wafregional/api_op_UpdateIPSet.go +++ b/service/wafregional/api_op_UpdateIPSet.go @@ -20,50 +20,50 @@ import ( // global use. Inserts or deletes IPSetDescriptor objects in an IPSet. For each // IPSetDescriptor object, you specify the following values: // -// * Whether to -// insert or delete the object from the array. If you want to change an -// IPSetDescriptor object, you delete the existing object and add a new one. +// * Whether to insert +// or delete the object from the array. If you want to change an IPSetDescriptor +// object, you delete the existing object and add a new one. // -// * -// The IP address version, IPv4 or IPv6. +// * The IP address +// version, IPv4 or IPv6. // -// * The IP address in CIDR notation, -// for example, 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to -// 192.0.2.255) or 192.0.2.44/32 (for the individual IP address 192.0.2.44). +// * The IP address in CIDR notation, for example, +// 192.0.2.0/24 (for the range of IP addresses from 192.0.2.0 to 192.0.2.255) or +// 192.0.2.44/32 (for the individual IP address 192.0.2.44). // -// AWS -// WAF supports IPv4 address ranges: /8 and any range between /16 through /32. AWS -// WAF supports IPv6 address ranges: /24, /32, /48, /56, /64, and /128. For more -// information about CIDR notation, see the Wikipedia entry Classless Inter-Domain -// Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). IPv6 -// addresses can be represented using any of the following formats: +// AWS WAF supports IPv4 +// address ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 +// address ranges: /24, /32, /48, /56, /64, and /128. For more information about +// CIDR notation, see the Wikipedia entry Classless Inter-Domain Routing +// (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). IPv6 addresses +// can be represented using any of the following formats: // -// * +// * // 1111:0000:0000:0000:0000:0000:0000:0111/128 // -// * 1111:0:0:0:0:0:0:0111/128 +// * 1111:0:0:0:0:0:0:0111/128 // +// * +// 1111::0111/128 // -// * 1111::0111/128 +// * 1111::111/128 // -// * 1111::111/128 -// -// You use an IPSet to specify which web -// requests you want to allow or block based on the IP addresses that the requests +// You use an IPSet to specify which web requests +// you want to allow or block based on the IP addresses that the requests // originated from. For example, if you're receiving a lot of requests from one or // a small number of IP addresses and you want to block the requests, you can // create an IPSet that specifies those IP addresses, and then configure AWS WAF to // block the requests. To create and configure an IPSet, perform the following // steps: // -// * Submit a CreateIPSet request. +// * Submit a CreateIPSet request. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an UpdateIPSet +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateIPSet // request. // -// * Submit an UpdateIPSet request to specify the IP addresses that -// you want AWS WAF to watch for. +// * Submit an UpdateIPSet request to specify the IP addresses that you +// want AWS WAF to watch for. // // When you update an IPSet, you specify the IP // addresses that you want to add and/or the IP addresses that you want to delete. @@ -103,10 +103,10 @@ type UpdateIPSetInput struct { // An array of IPSetUpdate objects that you want to insert into or delete from an // IPSet. For more information, see the applicable data types: // - // * IPSetUpdate: + // * IPSetUpdate: // Contains Action and IPSetDescriptor // - // * IPSetDescriptor: Contains Type and + // * IPSetDescriptor: Contains Type and // Value // // You can insert a maximum of 1000 addresses in a single request. diff --git a/service/wafregional/api_op_UpdateRateBasedRule.go b/service/wafregional/api_op_UpdateRateBasedRule.go index bae89e16bfc..9ad4b28d35c 100644 --- a/service/wafregional/api_op_UpdateRateBasedRule.go +++ b/service/wafregional/api_op_UpdateRateBasedRule.go @@ -25,29 +25,29 @@ import ( // request must match all the predicates and exceed the RateLimit to be counted or // blocked. For example, suppose you add the following to a RateBasedRule: // -// * -// An IPSet that matches the IP address 192.0.2.44/32 +// * An +// IPSet that matches the IP address 192.0.2.44/32 // -// * A ByteMatchSet that -// matches BadBot in the User-Agent header +// * A ByteMatchSet that matches +// BadBot in the User-Agent header // -// Further, you specify a RateLimit of -// 1,000. You then add the RateBasedRule to a WebACL and specify that you want to -// block requests that satisfy the rule. For a request to be blocked, it must come -// from the IP address 192.0.2.44 and the User-Agent header in the request must -// contain the value BadBot. Further, requests that match these two conditions much -// be received at a rate of more than 1,000 every five minutes. If the rate drops +// Further, you specify a RateLimit of 1,000. You +// then add the RateBasedRule to a WebACL and specify that you want to block +// requests that satisfy the rule. For a request to be blocked, it must come from +// the IP address 192.0.2.44 and the User-Agent header in the request must contain +// the value BadBot. Further, requests that match these two conditions much be +// received at a rate of more than 1,000 every five minutes. If the rate drops // below this limit, AWS WAF no longer blocks the requests. As a second example, // suppose you want to limit requests to a particular page on your site. To do // this, you could add the following to a RateBasedRule: // -// * A ByteMatchSet with +// * A ByteMatchSet with // FieldToMatch of URI // -// * A PositionalConstraint of STARTS_WITH +// * A PositionalConstraint of STARTS_WITH // -// * A -// TargetString of login +// * A TargetString +// of login // // Further, you specify a RateLimit of 1,000. By adding this // RateBasedRule to a WebACL, you could limit requests to your login page without diff --git a/service/wafregional/api_op_UpdateRegexMatchSet.go b/service/wafregional/api_op_UpdateRegexMatchSet.go index 790f199cf75..2d56bae7412 100644 --- a/service/wafregional/api_op_UpdateRegexMatchSet.go +++ b/service/wafregional/api_op_UpdateRegexMatchSet.go @@ -21,41 +21,40 @@ import ( // RegexMatchSet. For each RegexMatchSetUpdate object, you specify the following // values: // -// * Whether to insert or delete the object from the array. If you -// want to change a RegexMatchSetUpdate object, you delete the existing object and -// add a new one. +// * Whether to insert or delete the object from the array. If you want to +// change a RegexMatchSetUpdate object, you delete the existing object and add a +// new one. // -// * The part of a web request that you want AWS WAF to -// inspectupdate, such as a query string or the value of the User-Agent header. +// * The part of a web request that you want AWS WAF to inspectupdate, +// such as a query string or the value of the User-Agent header. // +// * The identifier +// of the pattern (a regular expression) that you want AWS WAF to look for. For +// more information, see RegexPatternSet. // -// * The identifier of the pattern (a regular expression) that you want AWS WAF to -// look for. For more information, see RegexPatternSet. +// * Whether to perform any conversions on +// the request, such as converting it to lowercase, before inspecting it for the +// specified string. // -// * Whether to perform -// any conversions on the request, such as converting it to lowercase, before -// inspecting it for the specified string. +// For example, you can create a RegexPatternSet that matches +// any requests with User-Agent headers that contain the string B[a@]dB[o0]t. You +// can then configure AWS WAF to reject those requests. To create and configure a +// RegexMatchSet, perform the following steps: // -// For example, you can create a -// RegexPatternSet that matches any requests with User-Agent headers that contain -// the string B[a@]dB[o0]t. You can then configure AWS WAF to reject those -// requests. To create and configure a RegexMatchSet, perform the following -// steps: +// * Create a RegexMatchSet. For more +// information, see CreateRegexMatchSet. // -// * Create a RegexMatchSet. For more information, see -// CreateRegexMatchSet. +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateRegexMatchSet +// request. // -// * Use GetChangeToken to get the change token that you -// provide in the ChangeToken parameter of an UpdateRegexMatchSet request. +// * Submit an UpdateRegexMatchSet request to specify the part of the +// request that you want AWS WAF to inspect (for example, the header or the URI) +// and the identifier of the RegexPatternSet that contain the regular expression +// patters you want AWS WAF to watch for. // -// * -// Submit an UpdateRegexMatchSet request to specify the part of the request that -// you want AWS WAF to inspect (for example, the header or the URI) and the -// identifier of the RegexPatternSet that contain the regular expression patters -// you want AWS WAF to watch for. -// -// For more information about how to use the AWS -// WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// For more information about how to use +// the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRegexMatchSet(ctx context.Context, params *UpdateRegexMatchSetInput, optFns ...func(*Options)) (*UpdateRegexMatchSetOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_UpdateRegexPatternSet.go b/service/wafregional/api_op_UpdateRegexPatternSet.go index 63fb16f822d..b71e0108082 100644 --- a/service/wafregional/api_op_UpdateRegexPatternSet.go +++ b/service/wafregional/api_op_UpdateRegexPatternSet.go @@ -20,42 +20,41 @@ import ( // global use. Inserts or deletes RegexPatternString objects in a RegexPatternSet. // For each RegexPatternString object, you specify the following values: // -// * -// Whether to insert or delete the RegexPatternString. +// * Whether +// to insert or delete the RegexPatternString. // -// * The regular -// expression pattern that you want to insert or delete. For more information, see +// * The regular expression pattern +// that you want to insert or delete. For more information, see // RegexPatternSet. // // For example, you can create a RegexPatternString such as // B[a@]dB[o0]t. AWS WAF will match this RegexPatternString to: // -// * BadBot +// * BadBot // +// * +// BadB0t // -// * BadB0t +// * B@dBot // -// * B@dBot +// * B@dB0t // -// * B@dB0t +// To create and configure a RegexPatternSet, perform +// the following steps: // -// To create and configure a RegexPatternSet, -// perform the following steps: +// * Create a RegexPatternSet. For more information, see +// CreateRegexPatternSet. // -// * Create a RegexPatternSet. For more -// information, see CreateRegexPatternSet. +// * Use GetChangeToken to get the change token that you +// provide in the ChangeToken parameter of an UpdateRegexPatternSet request. // -// * Use GetChangeToken to get the -// change token that you provide in the ChangeToken parameter of an -// UpdateRegexPatternSet request. +// * +// Submit an UpdateRegexPatternSet request to specify the regular expression +// pattern that you want AWS WAF to watch for. // -// * Submit an UpdateRegexPatternSet request to -// specify the regular expression pattern that you want AWS WAF to watch for. -// -// For -// more information about how to use the AWS WAF API to allow or block HTTP -// requests, see the AWS WAF Developer Guide -// (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to +// use the AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer +// Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRegexPatternSet(ctx context.Context, params *UpdateRegexPatternSetInput, optFns ...func(*Options)) (*UpdateRegexPatternSetOutput, error) { if params == nil { params = &UpdateRegexPatternSetInput{} diff --git a/service/wafregional/api_op_UpdateRule.go b/service/wafregional/api_op_UpdateRule.go index 11e853a3d95..c021263565b 100644 --- a/service/wafregional/api_op_UpdateRule.go +++ b/service/wafregional/api_op_UpdateRule.go @@ -24,37 +24,37 @@ import ( // specifications to be allowed, blocked, or counted. For example, suppose that you // add the following to a Rule: // -// * A ByteMatchSet that matches the value BadBot -// in the User-Agent header +// * A ByteMatchSet that matches the value BadBot in +// the User-Agent header // -// * An IPSet that matches the IP address -// 192.0.2.44 +// * An IPSet that matches the IP address 192.0.2.44 // -// You then add the Rule to a WebACL and specify that you want to block -// requests that satisfy the Rule. For a request to be blocked, the User-Agent -// header in the request must contain the value BadBot and the request must -// originate from the IP address 192.0.2.44. To create and configure a Rule, -// perform the following steps: +// You +// then add the Rule to a WebACL and specify that you want to block requests that +// satisfy the Rule. For a request to be blocked, the User-Agent header in the +// request must contain the value BadBot and the request must originate from the IP +// address 192.0.2.44. To create and configure a Rule, perform the following +// steps: // -// * Create and update the predicates that you -// want to include in the Rule. +// * Create and update the predicates that you want to include in the +// Rule. // -// * Create the Rule. See CreateRule. +// * Create the Rule. See CreateRule. // -// * Use -// GetChangeToken to get the change token that you provide in the ChangeToken -// parameter of an UpdateRule request. +// * Use GetChangeToken to get the +// change token that you provide in the ChangeToken parameter of an UpdateRule +// request. // -// * Submit an UpdateRule request to add -// predicates to the Rule. +// * Submit an UpdateRule request to add predicates to the Rule. // -// * Create and update a WebACL that contains the -// Rule. See CreateWebACL. +// * +// Create and update a WebACL that contains the Rule. See CreateWebACL. // -// If you want to replace one ByteMatchSet or IPSet with -// another, you delete the existing one and add the new one. For more information -// about how to use the AWS WAF API to allow or block HTTP requests, see the AWS -// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// If you +// want to replace one ByteMatchSet or IPSet with another, you delete the existing +// one and add the new one. For more information about how to use the AWS WAF API +// to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRule(ctx context.Context, params *UpdateRuleInput, optFns ...func(*Options)) (*UpdateRuleOutput, error) { if params == nil { params = &UpdateRuleInput{} @@ -86,13 +86,13 @@ type UpdateRuleInput struct { // An array of RuleUpdate objects that you want to insert into or delete from a // Rule. For more information, see the applicable data types: // - // * RuleUpdate: + // * RuleUpdate: // Contains Action and Predicate // - // * Predicate: Contains DataId, Negated, and + // * Predicate: Contains DataId, Negated, and // Type // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.RuleUpdate diff --git a/service/wafregional/api_op_UpdateRuleGroup.go b/service/wafregional/api_op_UpdateRuleGroup.go index 0d33ed1918e..326c487084e 100644 --- a/service/wafregional/api_op_UpdateRuleGroup.go +++ b/service/wafregional/api_op_UpdateRuleGroup.go @@ -22,22 +22,22 @@ import ( // per rule group. To create and configure a RuleGroup, perform the following // steps: // -// * Create and update the Rules that you want to include in the -// RuleGroup. See CreateRule. +// * Create and update the Rules that you want to include in the RuleGroup. +// See CreateRule. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateRuleGroup request. +// * Use GetChangeToken to get the change token that you provide +// in the ChangeToken parameter of an UpdateRuleGroup request. // +// * Submit an +// UpdateRuleGroup request to add Rules to the RuleGroup. // -// * Submit an UpdateRuleGroup request to add Rules to the RuleGroup. +// * Create and update a +// WebACL that contains the RuleGroup. See CreateWebACL. // -// * Create -// and update a WebACL that contains the RuleGroup. See CreateWebACL. -// -// If you want -// to replace one Rule with another, you delete the existing one and add the new -// one. For more information about how to use the AWS WAF API to allow or block -// HTTP requests, see the AWS WAF Developer Guide +// If you want to replace +// one Rule with another, you delete the existing one and add the new one. For more +// information about how to use the AWS WAF API to allow or block HTTP requests, +// see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateRuleGroup(ctx context.Context, params *UpdateRuleGroupInput, optFns ...func(*Options)) (*UpdateRuleGroupOutput, error) { if params == nil { diff --git a/service/wafregional/api_op_UpdateSizeConstraintSet.go b/service/wafregional/api_op_UpdateSizeConstraintSet.go index 1aef061d7f9..f2bdd4c1d1f 100644 --- a/service/wafregional/api_op_UpdateSizeConstraintSet.go +++ b/service/wafregional/api_op_UpdateSizeConstraintSet.go @@ -21,48 +21,48 @@ import ( // SizeConstraintSet. For each SizeConstraint object, you specify the following // values: // -// * Whether to insert or delete the object from the array. If you -// want to change a SizeConstraintSetUpdate object, you delete the existing object -// and add a new one. +// * Whether to insert or delete the object from the array. If you want to +// change a SizeConstraintSetUpdate object, you delete the existing object and add +// a new one. // -// * The part of a web request that you want AWS WAF to -// evaluate, such as the length of a query string or the length of the User-Agent -// header. +// * The part of a web request that you want AWS WAF to evaluate, such +// as the length of a query string or the length of the User-Agent header. // -// * Whether to perform any transformations on the request, such as -// converting it to lowercase, before checking its length. Note that -// transformations of the request body are not supported because the AWS resource -// forwards only the first 8192 bytes of your request to AWS WAF. You can only -// specify a single type of TextTransformation. +// * +// Whether to perform any transformations on the request, such as converting it to +// lowercase, before checking its length. Note that transformations of the request +// body are not supported because the AWS resource forwards only the first 8192 +// bytes of your request to AWS WAF. You can only specify a single type of +// TextTransformation. // -// * A ComparisonOperator used -// for evaluating the selected part of the request against the specified Size, such -// as equals, greater than, less than, and so on. +// * A ComparisonOperator used for evaluating the selected +// part of the request against the specified Size, such as equals, greater than, +// less than, and so on. // -// * The length, in bytes, that -// you want AWS WAF to watch for in selected part of the request. The length is -// computed after applying the transformation. +// * The length, in bytes, that you want AWS WAF to watch +// for in selected part of the request. The length is computed after applying the +// transformation. // -// For example, you can add a -// SizeConstraintSetUpdate object that matches web requests in which the length of -// the User-Agent header is greater than 100 bytes. You can then configure AWS WAF -// to block those requests. To create and configure a SizeConstraintSet, perform -// the following steps: +// For example, you can add a SizeConstraintSetUpdate object that +// matches web requests in which the length of the User-Agent header is greater +// than 100 bytes. You can then configure AWS WAF to block those requests. To +// create and configure a SizeConstraintSet, perform the following steps: // -// * Create a SizeConstraintSet. For more information, -// see CreateSizeConstraintSet. +// * Create +// a SizeConstraintSet. For more information, see CreateSizeConstraintSet. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateSizeConstraintSet -// request. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateSizeConstraintSet request. // -// * Submit an UpdateSizeConstraintSet request to specify the part of -// the request that you want AWS WAF to inspect (for example, the header or the -// URI) and the value that you want AWS WAF to watch for. +// * Submit an +// UpdateSizeConstraintSet request to specify the part of the request that you want +// AWS WAF to inspect (for example, the header or the URI) and the value that you +// want AWS WAF to watch for. // -// For more information -// about how to use the AWS WAF API to allow or block HTTP requests, see the AWS -// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how to use the AWS WAF +// API to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateSizeConstraintSet(ctx context.Context, params *UpdateSizeConstraintSetInput, optFns ...func(*Options)) (*UpdateSizeConstraintSetOutput, error) { if params == nil { params = &UpdateSizeConstraintSetInput{} @@ -96,13 +96,13 @@ type UpdateSizeConstraintSetInput struct { // delete from a SizeConstraintSet. For more information, see the applicable data // types: // - // * SizeConstraintSetUpdate: Contains Action and SizeConstraint + // * SizeConstraintSetUpdate: Contains Action and SizeConstraint // - // * + // * // SizeConstraint: Contains FieldToMatch, TextTransformation, ComparisonOperator, // and Size // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.SizeConstraintSetUpdate diff --git a/service/wafregional/api_op_UpdateSqlInjectionMatchSet.go b/service/wafregional/api_op_UpdateSqlInjectionMatchSet.go index dfeef69eb21..d56ce301d92 100644 --- a/service/wafregional/api_op_UpdateSqlInjectionMatchSet.go +++ b/service/wafregional/api_op_UpdateSqlInjectionMatchSet.go @@ -21,15 +21,15 @@ import ( // SqlInjectionMatchSet. For each SqlInjectionMatchTuple object, you specify the // following values: // -// * Action: Whether to insert the object into or delete the +// * Action: Whether to insert the object into or delete the // object from the array. To change a SqlInjectionMatchTuple, you delete the // existing object and add a new one. // -// * FieldToMatch: The part of web requests +// * FieldToMatch: The part of web requests // that you want AWS WAF to inspect and, if you want AWS WAF to inspect a header or // custom query parameter, the name of the header or parameter. // -// * +// * // TextTransformation: Which text transformation, if any, to perform on the web // request before inspecting the request for snippets of malicious SQL code. You // can only specify a single type of TextTransformation. @@ -42,19 +42,19 @@ import ( // and then configure AWS WAF to block the requests. To create and configure a // SqlInjectionMatchSet, perform the following steps: // -// * Submit a +// * Submit a // CreateSqlInjectionMatchSet request. // -// * Use GetChangeToken to get the change -// token that you provide in the ChangeToken parameter of an UpdateIPSet request. +// * Use GetChangeToken to get the change +// token that you provide in the ChangeToken parameter of an UpdateIPSet +// request. // +// * Submit an UpdateSqlInjectionMatchSet request to specify the parts of +// web requests that you want AWS WAF to inspect for snippets of SQL code. // -// * Submit an UpdateSqlInjectionMatchSet request to specify the parts of web -// requests that you want AWS WAF to inspect for snippets of SQL code. -// -// For more -// information about how to use the AWS WAF API to allow or block HTTP requests, -// see the AWS WAF Developer Guide +// For +// more information about how to use the AWS WAF API to allow or block HTTP +// requests, see the AWS WAF Developer Guide // (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateSqlInjectionMatchSet(ctx context.Context, params *UpdateSqlInjectionMatchSetInput, optFns ...func(*Options)) (*UpdateSqlInjectionMatchSetOutput, error) { if params == nil { @@ -90,13 +90,13 @@ type UpdateSqlInjectionMatchSetInput struct { // delete from a SqlInjectionMatchSet. For more information, see the applicable // data types: // - // * SqlInjectionMatchSetUpdate: Contains Action and + // * SqlInjectionMatchSetUpdate: Contains Action and // SqlInjectionMatchTuple // - // * SqlInjectionMatchTuple: Contains FieldToMatch and + // * SqlInjectionMatchTuple: Contains FieldToMatch and // TextTransformation // - // * FieldToMatch: Contains Data and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.SqlInjectionMatchSetUpdate diff --git a/service/wafregional/api_op_UpdateWebACL.go b/service/wafregional/api_op_UpdateWebACL.go index 4cb3ccc75f8..2945e62ff35 100644 --- a/service/wafregional/api_op_UpdateWebACL.go +++ b/service/wafregional/api_op_UpdateWebACL.go @@ -21,61 +21,61 @@ import ( // identifies web requests that you want to allow, block, or count. When you update // a WebACL, you specify the following values: // -// * A default action for the -// WebACL, either ALLOW or BLOCK. AWS WAF performs the default action if a request -// doesn't match the criteria in any of the Rules in a WebACL. +// * A default action for the WebACL, +// either ALLOW or BLOCK. AWS WAF performs the default action if a request doesn't +// match the criteria in any of the Rules in a WebACL. // -// * The Rules -// that you want to add or delete. If you want to replace one Rule with another, -// you delete the existing Rule and add the new one. +// * The Rules that you want +// to add or delete. If you want to replace one Rule with another, you delete the +// existing Rule and add the new one. // -// * For each Rule, whether -// you want AWS WAF to allow requests, block requests, or count requests that match -// the conditions in the Rule. +// * For each Rule, whether you want AWS WAF to +// allow requests, block requests, or count requests that match the conditions in +// the Rule. // -// * The order in which you want AWS WAF to -// evaluate the Rules in a WebACL. If you add more than one Rule to a WebACL, AWS -// WAF evaluates each request against the Rules in order based on the value of -// Priority. (The Rule that has the lowest value for Priority is evaluated first.) -// When a web request matches all the predicates (such as ByteMatchSets and IPSets) -// in a Rule, AWS WAF immediately takes the corresponding action, allow or block, -// and doesn't evaluate the request against the remaining Rules in the WebACL, if -// any. +// * The order in which you want AWS WAF to evaluate the Rules in a +// WebACL. If you add more than one Rule to a WebACL, AWS WAF evaluates each +// request against the Rules in order based on the value of Priority. (The Rule +// that has the lowest value for Priority is evaluated first.) When a web request +// matches all the predicates (such as ByteMatchSets and IPSets) in a Rule, AWS WAF +// immediately takes the corresponding action, allow or block, and doesn't evaluate +// the request against the remaining Rules in the WebACL, if any. // -// To create and configure a WebACL, perform the following steps: +// To create and +// configure a WebACL, perform the following steps: // -// * -// Create and update the predicates that you want to include in Rules. For more -// information, see CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, -// UpdateIPSet, CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. +// * Create and update the +// predicates that you want to include in Rules. For more information, see +// CreateByteMatchSet, UpdateByteMatchSet, CreateIPSet, UpdateIPSet, +// CreateSqlInjectionMatchSet, and UpdateSqlInjectionMatchSet. // -// * -// Create and update the Rules that you want to include in the WebACL. For more -// information, see CreateRule and UpdateRule. +// * Create and update +// the Rules that you want to include in the WebACL. For more information, see +// CreateRule and UpdateRule. // -// * Create a WebACL. See -// CreateWebACL. +// * Create a WebACL. See CreateWebACL. // -// * Use GetChangeToken to get the change token that you provide -// in the ChangeToken parameter of an UpdateWebACL request. +// * Use +// GetChangeToken to get the change token that you provide in the ChangeToken +// parameter of an UpdateWebACL request. // -// * Submit an -// UpdateWebACL request to specify the Rules that you want to include in the -// WebACL, to specify the default action, and to associate the WebACL with a -// CloudFront distribution. The ActivatedRule can be a rule group. If you specify a -// rule group as your ActivatedRule , you can exclude specific rules from that rule -// group. If you already have a rule group associated with a web ACL and want to -// submit an UpdateWebACL request to exclude certain rules from that rule group, -// you must first remove the rule group from the web ACL, the re-insert it again, -// specifying the excluded rules. For details, see ActivatedRule$ExcludedRules -// . +// * Submit an UpdateWebACL request to +// specify the Rules that you want to include in the WebACL, to specify the default +// action, and to associate the WebACL with a CloudFront distribution. The +// ActivatedRule can be a rule group. If you specify a rule group as your +// ActivatedRule , you can exclude specific rules from that rule group. If you +// already have a rule group associated with a web ACL and want to submit an +// UpdateWebACL request to exclude certain rules from that rule group, you must +// first remove the rule group from the web ACL, the re-insert it again, specifying +// the excluded rules. For details, see ActivatedRule$ExcludedRules . // -// Be aware that if you try to add a RATE_BASED rule to a web ACL without -// setting the rule type when first creating the rule, the UpdateWebACL request -// will fail because the request tries to add a REGULAR rule (the default rule -// type) with the specified ID, which does not exist. For more information about -// how to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF -// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// Be aware +// that if you try to add a RATE_BASED rule to a web ACL without setting the rule +// type when first creating the rule, the UpdateWebACL request will fail because +// the request tries to add a REGULAR rule (the default rule type) with the +// specified ID, which does not exist. For more information about how to use the +// AWS WAF API to allow or block HTTP requests, see the AWS WAF Developer Guide +// (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateWebACL(ctx context.Context, params *UpdateWebACLInput, optFns ...func(*Options)) (*UpdateWebACLOutput, error) { if params == nil { params = &UpdateWebACLInput{} @@ -113,16 +113,16 @@ type UpdateWebACLInput struct { // you want to insert into or delete from a WebACL. For more information, see the // applicable data types: // - // * WebACLUpdate: Contains Action and ActivatedRule + // * WebACLUpdate: Contains Action and ActivatedRule // - // - // * ActivatedRule: Contains Action, OverrideAction, Priority, RuleId, and Type. + // * + // ActivatedRule: Contains Action, OverrideAction, Priority, RuleId, and Type. // ActivatedRule|OverrideAction applies only when updating or adding a RuleGroup to // a WebACL. In this case, you do not use ActivatedRule|Action. For all other // update requests, ActivatedRule|Action is used instead of // ActivatedRule|OverrideAction. // - // * WafAction: Contains Type + // * WafAction: Contains Type Updates []*types.WebACLUpdate } diff --git a/service/wafregional/api_op_UpdateXssMatchSet.go b/service/wafregional/api_op_UpdateXssMatchSet.go index 5cf78a3aaf6..9d51d45388a 100644 --- a/service/wafregional/api_op_UpdateXssMatchSet.go +++ b/service/wafregional/api_op_UpdateXssMatchSet.go @@ -20,18 +20,18 @@ import ( // global use. Inserts or deletes XssMatchTuple objects (filters) in an // XssMatchSet. For each XssMatchTuple object, you specify the following values: // -// -// * Action: Whether to insert the object into or delete the object from the array. +// * +// Action: Whether to insert the object into or delete the object from the array. // To change an XssMatchTuple, you delete the existing object and add a new one. // +// * +// FieldToMatch: The part of web requests that you want AWS WAF to inspect and, if +// you want AWS WAF to inspect a header or custom query parameter, the name of the +// header or parameter. // -// * FieldToMatch: The part of web requests that you want AWS WAF to inspect and, -// if you want AWS WAF to inspect a header or custom query parameter, the name of -// the header or parameter. -// -// * TextTransformation: Which text transformation, -// if any, to perform on the web request before inspecting the request for -// cross-site scripting attacks. You can only specify a single type of +// * TextTransformation: Which text transformation, if any, +// to perform on the web request before inspecting the request for cross-site +// scripting attacks. You can only specify a single type of // TextTransformation. // // You use XssMatchSet objects to specify which CloudFront @@ -41,20 +41,19 @@ import ( // applicable settings, and then configure AWS WAF to block the requests. To create // and configure an XssMatchSet, perform the following steps: // -// * Submit a +// * Submit a // CreateXssMatchSet request. // -// * Use GetChangeToken to get the change token -// that you provide in the ChangeToken parameter of an UpdateIPSet request. +// * Use GetChangeToken to get the change token that +// you provide in the ChangeToken parameter of an UpdateIPSet request. // -// * -// Submit an UpdateXssMatchSet request to specify the parts of web requests that -// you want AWS WAF to inspect for cross-site scripting attacks. +// * Submit an +// UpdateXssMatchSet request to specify the parts of web requests that you want AWS +// WAF to inspect for cross-site scripting attacks. // -// For more -// information about how to use the AWS WAF API to allow or block HTTP requests, -// see the AWS WAF Developer Guide -// (https://docs.aws.amazon.com/waf/latest/developerguide/). +// For more information about how +// to use the AWS WAF API to allow or block HTTP requests, see the AWS WAF +// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). func (c *Client) UpdateXssMatchSet(ctx context.Context, params *UpdateXssMatchSetInput, optFns ...func(*Options)) (*UpdateXssMatchSetOutput, error) { if params == nil { params = &UpdateXssMatchSetInput{} @@ -81,14 +80,13 @@ type UpdateXssMatchSetInput struct { // An array of XssMatchSetUpdate objects that you want to insert into or delete // from an XssMatchSet. For more information, see the applicable data types: // - // * + // * // XssMatchSetUpdate: Contains Action and XssMatchTuple // - // * XssMatchTuple: - // Contains FieldToMatch and TextTransformation + // * XssMatchTuple: Contains + // FieldToMatch and TextTransformation // - // * FieldToMatch: Contains Data - // and Type + // * FieldToMatch: Contains Data and Type // // This member is required. Updates []*types.XssMatchSetUpdate diff --git a/service/wafregional/types/enums.go b/service/wafregional/types/enums.go index 3b54caa791e..cdc053c7fff 100644 --- a/service/wafregional/types/enums.go +++ b/service/wafregional/types/enums.go @@ -616,13 +616,13 @@ type MatchFieldType string // Enum values for MatchFieldType const ( - MatchFieldTypeUri MatchFieldType = "URI" - MatchFieldTypeQuery_string MatchFieldType = "QUERY_STRING" - MatchFieldTypeHeader MatchFieldType = "HEADER" - MatchFieldTypeMethod MatchFieldType = "METHOD" - MatchFieldTypeBody MatchFieldType = "BODY" - MatchFieldTypeSingle_query_arg MatchFieldType = "SINGLE_QUERY_ARG" - MatchFieldTypeAll_query_args MatchFieldType = "ALL_QUERY_ARGS" + MatchFieldTypeUri MatchFieldType = "URI" + MatchFieldTypeQueryString MatchFieldType = "QUERY_STRING" + MatchFieldTypeHeader MatchFieldType = "HEADER" + MatchFieldTypeMethod MatchFieldType = "METHOD" + MatchFieldTypeBody MatchFieldType = "BODY" + MatchFieldTypeSingleQueryArg MatchFieldType = "SINGLE_QUERY_ARG" + MatchFieldTypeAllQueryArgs MatchFieldType = "ALL_QUERY_ARGS" ) // Values returns all known values for MatchFieldType. Note that this can be @@ -644,13 +644,13 @@ type MigrationErrorType string // Enum values for MigrationErrorType const ( - MigrationErrorTypeEntity_not_supported MigrationErrorType = "ENTITY_NOT_SUPPORTED" - MigrationErrorTypeEntity_not_found MigrationErrorType = "ENTITY_NOT_FOUND" - MigrationErrorTypeS3_bucket_no_permission MigrationErrorType = "S3_BUCKET_NO_PERMISSION" - MigrationErrorTypeS3_bucket_not_accessible MigrationErrorType = "S3_BUCKET_NOT_ACCESSIBLE" - MigrationErrorTypeS3_bucket_not_found MigrationErrorType = "S3_BUCKET_NOT_FOUND" - MigrationErrorTypeS3_bucket_invalid_region MigrationErrorType = "S3_BUCKET_INVALID_REGION" - MigrationErrorTypeS3_internal_error MigrationErrorType = "S3_INTERNAL_ERROR" + MigrationErrorTypeEntityNotSupported MigrationErrorType = "ENTITY_NOT_SUPPORTED" + MigrationErrorTypeEntityNotFound MigrationErrorType = "ENTITY_NOT_FOUND" + MigrationErrorTypeS3BucketNoPermission MigrationErrorType = "S3_BUCKET_NO_PERMISSION" + MigrationErrorTypeS3BucketNotAccessible MigrationErrorType = "S3_BUCKET_NOT_ACCESSIBLE" + MigrationErrorTypeS3BucketNotFound MigrationErrorType = "S3_BUCKET_NOT_FOUND" + MigrationErrorTypeS3BucketInvalidRegion MigrationErrorType = "S3_BUCKET_INVALID_REGION" + MigrationErrorTypeS3InternalError MigrationErrorType = "S3_INTERNAL_ERROR" ) // Values returns all known values for MigrationErrorType. Note that this can be @@ -672,24 +672,24 @@ type ParameterExceptionField string // Enum values for ParameterExceptionField const ( - ParameterExceptionFieldChange_action ParameterExceptionField = "CHANGE_ACTION" - ParameterExceptionFieldWaf_action ParameterExceptionField = "WAF_ACTION" - ParameterExceptionFieldWaf_override_action ParameterExceptionField = "WAF_OVERRIDE_ACTION" - ParameterExceptionFieldPredicate_type ParameterExceptionField = "PREDICATE_TYPE" - ParameterExceptionFieldIpset_type ParameterExceptionField = "IPSET_TYPE" - ParameterExceptionFieldByte_match_field_type ParameterExceptionField = "BYTE_MATCH_FIELD_TYPE" - ParameterExceptionFieldSql_injection_match_field_type ParameterExceptionField = "SQL_INJECTION_MATCH_FIELD_TYPE" - ParameterExceptionFieldByte_match_text_transformation ParameterExceptionField = "BYTE_MATCH_TEXT_TRANSFORMATION" - ParameterExceptionFieldByte_match_positional_constraint ParameterExceptionField = "BYTE_MATCH_POSITIONAL_CONSTRAINT" - ParameterExceptionFieldSize_constraint_comparison_operator ParameterExceptionField = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" - ParameterExceptionFieldGeo_match_location_type ParameterExceptionField = "GEO_MATCH_LOCATION_TYPE" - ParameterExceptionFieldGeo_match_location_value ParameterExceptionField = "GEO_MATCH_LOCATION_VALUE" - ParameterExceptionFieldRate_key ParameterExceptionField = "RATE_KEY" - ParameterExceptionFieldRule_type ParameterExceptionField = "RULE_TYPE" - ParameterExceptionFieldNext_marker ParameterExceptionField = "NEXT_MARKER" - ParameterExceptionFieldResource_arn ParameterExceptionField = "RESOURCE_ARN" - ParameterExceptionFieldTags ParameterExceptionField = "TAGS" - ParameterExceptionFieldTag_keys ParameterExceptionField = "TAG_KEYS" + ParameterExceptionFieldChangeAction ParameterExceptionField = "CHANGE_ACTION" + ParameterExceptionFieldWafAction ParameterExceptionField = "WAF_ACTION" + ParameterExceptionFieldWafOverrideAction ParameterExceptionField = "WAF_OVERRIDE_ACTION" + ParameterExceptionFieldPredicateType ParameterExceptionField = "PREDICATE_TYPE" + ParameterExceptionFieldIpsetType ParameterExceptionField = "IPSET_TYPE" + ParameterExceptionFieldByteMatchFieldType ParameterExceptionField = "BYTE_MATCH_FIELD_TYPE" + ParameterExceptionFieldSqlInjectionMatchFieldType ParameterExceptionField = "SQL_INJECTION_MATCH_FIELD_TYPE" + ParameterExceptionFieldByteMatchTextTransformation ParameterExceptionField = "BYTE_MATCH_TEXT_TRANSFORMATION" + ParameterExceptionFieldByteMatchPositionalConstraint ParameterExceptionField = "BYTE_MATCH_POSITIONAL_CONSTRAINT" + ParameterExceptionFieldSizeConstraintComparisonOperator ParameterExceptionField = "SIZE_CONSTRAINT_COMPARISON_OPERATOR" + ParameterExceptionFieldGeoMatchLocationType ParameterExceptionField = "GEO_MATCH_LOCATION_TYPE" + ParameterExceptionFieldGeoMatchLocationValue ParameterExceptionField = "GEO_MATCH_LOCATION_VALUE" + ParameterExceptionFieldRateKey ParameterExceptionField = "RATE_KEY" + ParameterExceptionFieldRuleType ParameterExceptionField = "RULE_TYPE" + ParameterExceptionFieldNextMarker ParameterExceptionField = "NEXT_MARKER" + ParameterExceptionFieldResourceArn ParameterExceptionField = "RESOURCE_ARN" + ParameterExceptionFieldTags ParameterExceptionField = "TAGS" + ParameterExceptionFieldTagKeys ParameterExceptionField = "TAG_KEYS" ) // Values returns all known values for ParameterExceptionField. Note that this can @@ -722,10 +722,10 @@ type ParameterExceptionReason string // Enum values for ParameterExceptionReason const ( - ParameterExceptionReasonInvalid_option ParameterExceptionReason = "INVALID_OPTION" - ParameterExceptionReasonIllegal_combination ParameterExceptionReason = "ILLEGAL_COMBINATION" - ParameterExceptionReasonIllegal_argument ParameterExceptionReason = "ILLEGAL_ARGUMENT" - ParameterExceptionReasonInvalid_tag_key ParameterExceptionReason = "INVALID_TAG_KEY" + ParameterExceptionReasonInvalidOption ParameterExceptionReason = "INVALID_OPTION" + ParameterExceptionReasonIllegalCombination ParameterExceptionReason = "ILLEGAL_COMBINATION" + ParameterExceptionReasonIllegalArgument ParameterExceptionReason = "ILLEGAL_ARGUMENT" + ParameterExceptionReasonInvalidTagKey ParameterExceptionReason = "INVALID_TAG_KEY" ) // Values returns all known values for ParameterExceptionReason. Note that this can @@ -744,11 +744,11 @@ type PositionalConstraint string // Enum values for PositionalConstraint const ( - PositionalConstraintExactly PositionalConstraint = "EXACTLY" - PositionalConstraintStarts_with PositionalConstraint = "STARTS_WITH" - PositionalConstraintEnds_with PositionalConstraint = "ENDS_WITH" - PositionalConstraintContains PositionalConstraint = "CONTAINS" - PositionalConstraintContains_word PositionalConstraint = "CONTAINS_WORD" + PositionalConstraintExactly PositionalConstraint = "EXACTLY" + PositionalConstraintStartsWith PositionalConstraint = "STARTS_WITH" + PositionalConstraintEndsWith PositionalConstraint = "ENDS_WITH" + PositionalConstraintContains PositionalConstraint = "CONTAINS" + PositionalConstraintContainsWord PositionalConstraint = "CONTAINS_WORD" ) // Values returns all known values for PositionalConstraint. Note that this can be @@ -768,13 +768,13 @@ type PredicateType string // Enum values for PredicateType const ( - PredicateTypeIp_match PredicateType = "IPMatch" - PredicateTypeByte_match PredicateType = "ByteMatch" - PredicateTypeSql_injection_match PredicateType = "SqlInjectionMatch" - PredicateTypeGeo_match PredicateType = "GeoMatch" - PredicateTypeSize_constraint PredicateType = "SizeConstraint" - PredicateTypeXss_match PredicateType = "XssMatch" - PredicateTypeRegex_match PredicateType = "RegexMatch" + PredicateTypeIpMatch PredicateType = "IPMatch" + PredicateTypeByteMatch PredicateType = "ByteMatch" + PredicateTypeSqlInjectionMatch PredicateType = "SqlInjectionMatch" + PredicateTypeGeoMatch PredicateType = "GeoMatch" + PredicateTypeSizeConstraint PredicateType = "SizeConstraint" + PredicateTypeXssMatch PredicateType = "XssMatch" + PredicateTypeRegexMatch PredicateType = "RegexMatch" ) // Values returns all known values for PredicateType. Note that this can be @@ -812,8 +812,8 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeApplication_load_balancer ResourceType = "APPLICATION_LOAD_BALANCER" - ResourceTypeApi_gateway ResourceType = "API_GATEWAY" + ResourceTypeApplicationLoadBalancer ResourceType = "APPLICATION_LOAD_BALANCER" + ResourceTypeApiGateway ResourceType = "API_GATEWAY" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -830,12 +830,12 @@ type TextTransformation string // Enum values for TextTransformation const ( - TextTransformationNone TextTransformation = "NONE" - TextTransformationCompress_white_space TextTransformation = "COMPRESS_WHITE_SPACE" - TextTransformationHtml_entity_decode TextTransformation = "HTML_ENTITY_DECODE" - TextTransformationLowercase TextTransformation = "LOWERCASE" - TextTransformationCmd_line TextTransformation = "CMD_LINE" - TextTransformationUrl_decode TextTransformation = "URL_DECODE" + TextTransformationNone TextTransformation = "NONE" + TextTransformationCompressWhiteSpace TextTransformation = "COMPRESS_WHITE_SPACE" + TextTransformationHtmlEntityDecode TextTransformation = "HTML_ENTITY_DECODE" + TextTransformationLowercase TextTransformation = "LOWERCASE" + TextTransformationCmdLine TextTransformation = "CMD_LINE" + TextTransformationUrlDecode TextTransformation = "URL_DECODE" ) // Values returns all known values for TextTransformation. Note that this can be @@ -894,9 +894,9 @@ type WafRuleType string // Enum values for WafRuleType const ( - WafRuleTypeRegular WafRuleType = "REGULAR" - WafRuleTypeRate_based WafRuleType = "RATE_BASED" - WafRuleTypeGroup WafRuleType = "GROUP" + WafRuleTypeRegular WafRuleType = "REGULAR" + WafRuleTypeRateBased WafRuleType = "RATE_BASED" + WafRuleTypeGroup WafRuleType = "GROUP" ) // Values returns all known values for WafRuleType. Note that this can be expanded diff --git a/service/wafregional/types/errors.go b/service/wafregional/types/errors.go index ad4c83d86f0..b47558794d4 100644 --- a/service/wafregional/types/errors.go +++ b/service/wafregional/types/errors.go @@ -44,28 +44,28 @@ func (e *WAFDisallowedNameException) ErrorFault() smithy.ErrorFault { return smi // The operation failed due to a problem with the migration. The failure cause is // provided in the exception, in the MigrationErrorType: // -// * -// ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the -// IgnoreUnsupportedType is not set to true. +// * ENTITY_NOT_SUPPORTED - +// The web ACL has an unsupported entity but the IgnoreUnsupportedType is not set +// to true. // -// * ENTITY_NOT_FOUND - The web ACL -// doesn't exist. +// * ENTITY_NOT_FOUND - The web ACL doesn't exist. // -// * S3_BUCKET_NO_PERMISSION - You don't have permission to -// perform the PutObject action to the specified Amazon S3 bucket. +// * +// S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject +// action to the specified Amazon S3 bucket. // -// * -// S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to perform -// the PutObject action in the bucket. +// * S3_BUCKET_NOT_ACCESSIBLE - The +// bucket policy doesn't allow AWS WAF to perform the PutObject action in the +// bucket. // -// * S3_BUCKET_NOT_FOUND - The S3 bucket -// doesn't exist. +// * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. // -// * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the -// same Region as the web ACL. +// * +// S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as the web +// ACL. // -// * S3_INTERNAL_ERROR - AWS WAF failed to create -// the template in the S3 bucket for another reason. +// * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 +// bucket for another reason. type WAFEntityMigrationException struct { Message *string @@ -123,23 +123,22 @@ func (e *WAFInvalidAccountException) ErrorFault() smithy.ErrorFault { return smi // The operation failed because there was nothing to do. For example: // -// * You -// tried to remove a Rule from a WebACL, but the Rule isn't in the specified -// WebACL. +// * You tried +// to remove a Rule from a WebACL, but the Rule isn't in the specified WebACL. // -// * You tried to remove an IP address from an IPSet, but the IP -// address isn't in the specified IPSet. +// * +// You tried to remove an IP address from an IPSet, but the IP address isn't in the +// specified IPSet. // -// * You tried to remove a -// ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple isn't in the -// specified WebACL. +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, +// but the ByteMatchTuple isn't in the specified WebACL. // -// * You tried to add a Rule to a WebACL, but the Rule -// already exists in the specified WebACL. +// * You tried to add a Rule +// to a WebACL, but the Rule already exists in the specified WebACL. // -// * You tried to add a ByteMatchTuple -// to a ByteMatchSet, but the ByteMatchTuple already exists in the specified -// WebACL. +// * You tried +// to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple already exists +// in the specified WebACL. type WAFInvalidOperationException struct { Message *string } @@ -159,33 +158,33 @@ func (e *WAFInvalidOperationException) ErrorFault() smithy.ErrorFault { return s // The operation failed because AWS WAF didn't recognize a parameter in the // request. For example: // -// * You specified an invalid parameter name. +// * You specified an invalid parameter name. // -// * You +// * You // specified an invalid value. // -// * You tried to update an object (ByteMatchSet, +// * You tried to update an object (ByteMatchSet, // IPSet, Rule, or WebACL) using an action other than INSERT or DELETE. // -// * You +// * You // tried to create a WebACL with a DefaultActionType other than ALLOW, BLOCK, or // COUNT. // -// * You tried to create a RateBasedRule with a RateKey value other -// than IP. +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. // -// * You tried to update a WebACL with a WafActionType other than -// ALLOW, BLOCK, or COUNT. +// * You tried to update a WebACL with a WafActionType other than ALLOW, +// BLOCK, or COUNT. // -// * You tried to update a ByteMatchSet with a -// FieldToMatchType other than HEADER, METHOD, QUERY_STRING, URI, or BODY. +// * You tried to update a ByteMatchSet with a FieldToMatchType +// other than HEADER, METHOD, QUERY_STRING, URI, or BODY. // -// * -// You tried to update a ByteMatchSet with a Field of HEADER but no value for -// Data. +// * You tried to update a +// ByteMatchSet with a Field of HEADER but no value for Data. // -// * Your request references an ARN that is malformed, or corresponds to -// a resource with which a web ACL cannot be associated. +// * Your request +// references an ARN that is malformed, or corresponds to a resource with which a +// web ACL cannot be associated. type WAFInvalidParameterException struct { Message *string @@ -209,30 +208,29 @@ func (e *WAFInvalidParameterException) ErrorFault() smithy.ErrorFault { return s // The operation failed because the specified policy is not in the proper format. // The policy is subject to the following restrictions: // -// * You can attach only -// one policy with each PutPermissionPolicy request. +// * You can attach only one +// policy with each PutPermissionPolicy request. // -// * The policy must include -// an Effect, Action and Principal. +// * The policy must include an +// Effect, Action and Principal. // -// * Effect must specify Allow. +// * Effect must specify Allow. // -// * The -// Action in the policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, -// waf:GetRuleGroup and waf-regional:GetRuleGroup . Any extra or wildcard actions -// in the policy will be rejected. +// * The Action in the +// policy must be waf:UpdateWebACL, waf-regional:UpdateWebACL, waf:GetRuleGroup and +// waf-regional:GetRuleGroup . Any extra or wildcard actions in the policy will be +// rejected. // -// * The policy cannot include a Resource -// parameter. +// * The policy cannot include a Resource parameter. // -// * The ARN in the request must be a valid WAF RuleGroup ARN and -// the RuleGroup must exist in the same region. +// * The ARN in the +// request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the +// same region. // -// * The user making the request -// must be the owner of the RuleGroup. +// * The user making the request must be the owner of the +// RuleGroup. // -// * Your policy must be composed using -// IAM Policy version 2012-10-17. +// * Your policy must be composed using IAM Policy version 2012-10-17. type WAFInvalidPermissionPolicyException struct { Message *string } @@ -295,17 +293,17 @@ func (e *WAFLimitsExceededException) ErrorFault() smithy.ErrorFault { return smi // The operation failed because you tried to delete an object that isn't empty. For // example: // -// * You tried to delete a WebACL that still contains one or more -// Rule objects. +// * You tried to delete a WebACL that still contains one or more Rule +// objects. // -// * You tried to delete a Rule that still contains one or more +// * You tried to delete a Rule that still contains one or more // ByteMatchSet objects or other predicates. // -// * You tried to delete a -// ByteMatchSet that contains one or more ByteMatchTuple objects. +// * You tried to delete a ByteMatchSet +// that contains one or more ByteMatchTuple objects. // -// * You tried -// to delete an IPSet that references one or more IP addresses. +// * You tried to delete an +// IPSet that references one or more IP addresses. type WAFNonEmptyEntityException struct { Message *string } @@ -325,18 +323,18 @@ func (e *WAFNonEmptyEntityException) ErrorFault() smithy.ErrorFault { return smi // The operation failed because you tried to add an object to or delete an object // from another object that doesn't exist. For example: // -// * You tried to add a -// Rule to or delete a Rule from a WebACL that doesn't exist. +// * You tried to add a Rule +// to or delete a Rule from a WebACL that doesn't exist. // -// * You tried to -// add a ByteMatchSet to or delete a ByteMatchSet from a Rule that doesn't exist. +// * You tried to add a +// ByteMatchSet to or delete a ByteMatchSet from a Rule that doesn't exist. // +// * You +// tried to add an IP address to or delete an IP address from an IPSet that doesn't +// exist. // -// * You tried to add an IP address to or delete an IP address from an IPSet that -// doesn't exist. -// -// * You tried to add a ByteMatchTuple to or delete a -// ByteMatchTuple from a ByteMatchSet that doesn't exist. +// * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from a +// ByteMatchSet that doesn't exist. type WAFNonexistentContainerException struct { Message *string } @@ -375,11 +373,10 @@ func (e *WAFNonexistentItemException) ErrorFault() smithy.ErrorFault { return sm // The operation failed because you tried to delete an object that is still in use. // For example: // -// * You tried to delete a ByteMatchSet that is still referenced -// by a Rule. +// * You tried to delete a ByteMatchSet that is still referenced by a +// Rule. // -// * You tried to delete a Rule that is still referenced by a -// WebACL. +// * You tried to delete a Rule that is still referenced by a WebACL. type WAFReferencedItemException struct { Message *string } diff --git a/service/wafregional/types/types.go b/service/wafregional/types/types.go index 679c79b3922..6f810019e46 100644 --- a/service/wafregional/types/types.go +++ b/service/wafregional/types/types.go @@ -38,15 +38,15 @@ type ActivatedRule struct { // Specifies the action that CloudFront or AWS WAF takes when a web request matches // the conditions in the Rule. Valid values for Action include the following: // + // * + // ALLOW: CloudFront responds with the requested object. // - // * ALLOW: CloudFront responds with the requested object. - // - // * BLOCK: CloudFront + // * BLOCK: CloudFront // responds with an HTTP 403 (Forbidden) status code. // - // * COUNT: AWS WAF - // increments a counter of requests that match the conditions in the rule and then - // continues to inspect the web request based on the remaining rules in the web + // * COUNT: AWS WAF increments + // a counter of requests that match the conditions in the rule and then continues + // to inspect the web request based on the remaining rules in the web // ACL. // // ActivatedRule|OverrideAction applies only when updating or adding a @@ -68,25 +68,24 @@ type ActivatedRule struct { // metrics for each ExcludedRule. If you want to exclude rules from a rule group // that is already associated with a web ACL, perform the following steps: // - // * - // Use the AWS WAF logs to identify the IDs of the rules that you want to exclude. - // For more information about the logs, see Logging Web ACL Traffic Information + // * Use + // the AWS WAF logs to identify the IDs of the rules that you want to exclude. For + // more information about the logs, see Logging Web ACL Traffic Information // (https://docs.aws.amazon.com/waf/latest/developerguide/logging.html). // - // * - // Submit an UpdateWebACL request that has two actions: + // * Submit + // an UpdateWebACL request that has two actions: // - // * The first action - // deletes the existing rule group from the web ACL. That is, in the UpdateWebACL - // request, the first Updates:Action should be DELETE and - // Updates:ActivatedRule:RuleId should be the rule group that contains the rules - // that you want to exclude. + // * The first action deletes the + // existing rule group from the web ACL. That is, in the UpdateWebACL request, the + // first Updates:Action should be DELETE and Updates:ActivatedRule:RuleId should be + // the rule group that contains the rules that you want to exclude. // - // * The second action inserts the same rule - // group back in, but specifying the rules to exclude. That is, the second - // Updates:Action should be INSERT, Updates:ActivatedRule:RuleId should be the rule - // group that you just removed, and ExcludedRules should contain the rules that you - // want to exclude. + // * The second + // action inserts the same rule group back in, but specifying the rules to exclude. + // That is, the second Updates:Action should be INSERT, + // Updates:ActivatedRule:RuleId should be the rule group that you just removed, and + // ExcludedRules should contain the rules that you want to exclude. ExcludedRules []*ExcludedRule // Use the OverrideAction to test your RuleGroup. Any rule in a RuleGroup can @@ -224,21 +223,21 @@ type ByteMatchTuple struct { // underscore (A-Z, a-z, 0-9, or _). In addition, TargetString must be a word, // which means one of the following: // - // * TargetString exactly matches the value - // of the specified part of the web request, such as the value of a header. + // * TargetString exactly matches the value of + // the specified part of the web request, such as the value of a header. // - // * + // * // TargetString is at the beginning of the specified part of the web request and is // followed by a character other than an alphanumeric character or underscore (_), // for example, BadBot;. // - // * TargetString is at the end of the specified part of - // the web request and is preceded by a character other than an alphanumeric - // character or underscore (_), for example, ;BadBot. + // * TargetString is at the end of the specified part of the + // web request and is preceded by a character other than an alphanumeric character + // or underscore (_), for example, ;BadBot. // - // * TargetString is in the - // middle of the specified part of the web request and is preceded and followed by - // characters other than alphanumeric characters or underscore (_), for example, + // * TargetString is in the middle of the + // specified part of the web request and is preceded and followed by characters + // other than alphanumeric characters or underscore (_), for example, // -BadBot;. // // EXACTLY The value of the specified part of the web request must @@ -255,44 +254,44 @@ type ByteMatchTuple struct { // The maximum length of the value is 50 bytes. Valid values depend on the values // that you specified for FieldToMatch: // - // * HEADER: The value that you want AWS - // WAF to search for in the request header that you specified in FieldToMatch, for + // * HEADER: The value that you want AWS WAF + // to search for in the request header that you specified in FieldToMatch, for // example, the value of the User-Agent or Referer header. // - // * METHOD: The HTTP + // * METHOD: The HTTP // method, which indicates the type of operation specified in the request. // CloudFront supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, // POST, and PUT. // - // * QUERY_STRING: The value that you want AWS WAF to search - // for in the query string, which is the part of a URL that appears after a ? + // * QUERY_STRING: The value that you want AWS WAF to search for in + // the query string, which is the part of a URL that appears after a ? // character. // - // * URI: The value that you want AWS WAF to search for in the part - // of a URL that identifies a resource, for example, /images/daily-ad.jpg. - // - // * - // BODY: The part of a request that contains any additional data that you want to - // send to your web server as the HTTP request body, such as data from a form. The - // request body immediately follows the request headers. Note that only the first - // 8192 bytes of the request body are forwarded to AWS WAF for inspection. To allow - // or block requests based on the length of the body, you can create a size - // constraint set. For more information, see CreateSizeConstraintSet. - // - // * - // SINGLE_QUERY_ARG: The parameter in the query string that you will inspect, such - // as UserName or SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 - // characters. - // - // * ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but instead of - // inspecting a single parameter, AWS WAF inspects all parameters within the query - // string for the value or regex pattern that you specify in TargetString. - // - // If - // TargetString includes alphabetic characters A-Z and a-z, note that the value is - // case sensitive. If you're using the AWS WAF API Specify a base64-encoded version - // of the value. The maximum length of the value before you base64-encode it is 50 - // bytes. For example, suppose the value of Type is HEADER and the value of Data is + // * URI: The value that you want AWS WAF to search for in the part of + // a URL that identifies a resource, for example, /images/daily-ad.jpg. + // + // * BODY: + // The part of a request that contains any additional data that you want to send to + // your web server as the HTTP request body, such as data from a form. The request + // body immediately follows the request headers. Note that only the first 8192 + // bytes of the request body are forwarded to AWS WAF for inspection. To allow or + // block requests based on the length of the body, you can create a size constraint + // set. For more information, see CreateSizeConstraintSet. + // + // * SINGLE_QUERY_ARG: The + // parameter in the query string that you will inspect, such as UserName or + // SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters. + // + // * + // ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but instead of inspecting a single + // parameter, AWS WAF inspects all parameters within the query string for the value + // or regex pattern that you specify in TargetString. + // + // If TargetString includes + // alphabetic characters A-Z and a-z, note that the value is case sensitive. If + // you're using the AWS WAF API Specify a base64-encoded version of the value. The + // maximum length of the value before you base64-encode it is 50 bytes. For + // example, suppose the value of Type is HEADER and the value of Data is // User-Agent. If you want to search the User-Agent header for the value BadBot, // you base64-encode BadBot using MIME base64-encoding and include the resulting // value, QmFkQm90, in the value of TargetString. If you're using the AWS CLI or @@ -310,64 +309,64 @@ type ByteMatchTuple struct { // command and using unusual formatting to disguise some or all of the command, use // this option to perform the following transformations: // - // * Delete the - // following characters: \ " ' ^ + // * Delete the following + // characters: \ " ' ^ // - // * Delete spaces before the following - // characters: / ( + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * - // Replace multiple spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \f, formfeed, decimal - // 12 + // * \f, formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage - // return, decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking - // space, decimal 160 + // * non-breaking space, decimal + // 160 // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: - // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)lt; with a "less than" + // symbol // + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation @@ -402,38 +401,38 @@ type FieldToMatch struct { // The part of the web request that you want AWS WAF to search for a specified // string. Parts of a request that you can search include the following: // - // * - // HEADER: A specified request header, for example, the value of the User-Agent or - // Referer header. If you choose HEADER for the type, specify the name of the - // header in Data. + // * HEADER: + // A specified request header, for example, the value of the User-Agent or Referer + // header. If you choose HEADER for the type, specify the name of the header in + // Data. // - // * METHOD: The HTTP method, which indicated the type of - // operation that the request is asking the origin to perform. Amazon CloudFront - // supports the following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and - // PUT. + // * METHOD: The HTTP method, which indicated the type of operation that the + // request is asking the origin to perform. Amazon CloudFront supports the + // following methods: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT. // - // * QUERY_STRING: A query string, which is the part of a URL that - // appears after a ? character, if any. + // * + // QUERY_STRING: A query string, which is the part of a URL that appears after a ? + // character, if any. // - // * URI: The part of a web request that - // identifies a resource, for example, /images/daily-ad.jpg. + // * URI: The part of a web request that identifies a resource, + // for example, /images/daily-ad.jpg. // - // * BODY: The part - // of a request that contains any additional data that you want to send to your web - // server as the HTTP request body, such as data from a form. The request body - // immediately follows the request headers. Note that only the first 8192 bytes of - // the request body are forwarded to AWS WAF for inspection. To allow or block - // requests based on the length of the body, you can create a size constraint set. - // For more information, see CreateSizeConstraintSet. + // * BODY: The part of a request that contains + // any additional data that you want to send to your web server as the HTTP request + // body, such as data from a form. The request body immediately follows the request + // headers. Note that only the first 8192 bytes of the request body are forwarded + // to AWS WAF for inspection. To allow or block requests based on the length of the + // body, you can create a size constraint set. For more information, see + // CreateSizeConstraintSet. // - // * SINGLE_QUERY_ARG: The - // parameter in the query string that you will inspect, such as UserName or - // SalesRegion. The maximum length for SINGLE_QUERY_ARG is 30 characters. + // * SINGLE_QUERY_ARG: The parameter in the query string + // that you will inspect, such as UserName or SalesRegion. The maximum length for + // SINGLE_QUERY_ARG is 30 characters. // - // * - // ALL_QUERY_ARGS: Similar to SINGLE_QUERY_ARG, but rather than inspecting a single - // parameter, AWS WAF will inspect all parameters within the query for the value or - // regex pattern that you specify in TargetString. + // * ALL_QUERY_ARGS: Similar to + // SINGLE_QUERY_ARG, but rather than inspecting a single parameter, AWS WAF will + // inspect all parameters within the query for the value or regex pattern that you + // specify in TargetString. // // This member is required. Type MatchFieldType @@ -577,11 +576,11 @@ type HTTPRequest struct { // with a CloudFront distribution, this is the value of one of the following fields // in CloudFront access logs: // - // * c-ip, if the viewer did not use an HTTP proxy - // or a load balancer to send the request + // * c-ip, if the viewer did not use an HTTP proxy or a + // load balancer to send the request // - // * x-forwarded-for, if the viewer did - // use an HTTP proxy or a load balancer to send the request + // * x-forwarded-for, if the viewer did use an + // HTTP proxy or a load balancer to send the request ClientIP *string // The two-letter country code for the country that the request originated from. @@ -661,26 +660,26 @@ type IPSetDescriptor struct { // Specify an IPv4 address by using CIDR notation. For example: // - // * To configure - // AWS WAF to allow, block, or count requests that originated from the IP address + // * To configure AWS + // WAF to allow, block, or count requests that originated from the IP address // 192.0.2.44, specify 192.0.2.44/32. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from IP addresses from 192.0.2.0 to - // 192.0.2.255, specify 192.0.2.0/24. + // * To configure AWS WAF to allow, block, or + // count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, + // specify 192.0.2.0/24. // - // For more information about CIDR notation, - // see the Wikipedia entry Classless Inter-Domain Routing + // For more information about CIDR notation, see the + // Wikipedia entry Classless Inter-Domain Routing // (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing). Specify an IPv6 // address by using CIDR notation. For example: // - // * To configure AWS WAF to - // allow, block, or count requests that originated from the IP address + // * To configure AWS WAF to allow, + // block, or count requests that originated from the IP address // 1111:0000:0000:0000:0000:0000:0000:0111, specify // 1111:0000:0000:0000:0000:0000:0000:0111/128. // - // * To configure AWS WAF to - // allow, block, or count requests that originated from IP addresses + // * To configure AWS WAF to allow, + // block, or count requests that originated from IP addresses // 1111:0000:0000:0000:0000:0000:0000:0000 to // 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify // 1111:0000:0000:0000:0000:0000:0000:0000/64. @@ -810,15 +809,15 @@ type Predicate struct { // seen from an attacker, you might create a RateBasedRule that includes the // following conditions: // -// * The requests come from 192.0.2.44. +// * The requests come from 192.0.2.44. // -// * They -// contain the value BadBot in the User-Agent header. +// * They contain the +// value BadBot in the User-Agent header. // -// In the rule, you also define -// the rate limit as 1,000. Requests that meet both of these conditions and exceed -// 1,000 requests every five minutes trigger the rule's action (block or count), -// which is defined in the web ACL. +// In the rule, you also define the rate +// limit as 1,000. Requests that meet both of these conditions and exceed 1,000 +// requests every five minutes trigger the rule's action (block or count), which is +// defined in the web ACL. type RateBasedRule struct { // The Predicates object contains one Predicate element for each ByteMatchSet, @@ -895,16 +894,16 @@ type RegexMatchSet struct { // Contains an array of RegexMatchTuple objects. Each RegexMatchTuple object // contains: // - // * The part of a web request that you want AWS WAF to inspect, - // such as a query string or the value of the User-Agent header. + // * The part of a web request that you want AWS WAF to inspect, such as + // a query string or the value of the User-Agent header. // - // * The - // identifier of the pattern (a regular expression) that you want AWS WAF to look - // for. For more information, see RegexPatternSet. + // * The identifier of the + // pattern (a regular expression) that you want AWS WAF to look for. For more + // information, see RegexPatternSet. // - // * Whether to perform any - // conversions on the request, such as converting it to lowercase, before - // inspecting it for the specified string. + // * Whether to perform any conversions on the + // request, such as converting it to lowercase, before inspecting it for the + // specified string. RegexMatchTuples []*RegexMatchTuple } @@ -969,16 +968,16 @@ type RegexMatchSetUpdate struct { // in web requests, the location in requests that you want AWS WAF to search, and // other settings. Each RegexMatchTuple object contains: // -// * The part of a web +// * The part of a web // request that you want AWS WAF to inspect, such as a query string or the value of // the User-Agent header. // -// * The identifier of the pattern (a regular -// expression) that you want AWS WAF to look for. For more information, see -// RegexPatternSet. +// * The identifier of the pattern (a regular expression) +// that you want AWS WAF to look for. For more information, see RegexPatternSet. // -// * Whether to perform any conversions on the request, such -// as converting it to lowercase, before inspecting it for the specified string. +// * +// Whether to perform any conversions on the request, such as converting it to +// lowercase, before inspecting it for the specified string. type RegexMatchTuple struct { // Specifies where in a web request to look for the RegexPatternSet. @@ -1005,64 +1004,64 @@ type RegexMatchTuple struct { // commandline command and using unusual formatting to disguise some or all of the // command, use this option to perform the following transformations: // - // * Delete - // the following characters: \ " ' ^ - // - // * Delete spaces before the following - // characters: / ( + // * Delete the + // following characters: \ " ' ^ // - // * Replace the following characters with a space: , ; + // * Delete spaces before the following characters: + // / ( // - // * - // Replace multiple spaces with one space + // * Replace the following characters with a space: , ; // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Replace multiple + // spaces with one space // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // * \f, formfeed, decimal - // 12 + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \t, tab, decimal 9 + // * \f, formfeed, decimal 12 // - // * \n, newline, decimal 10 + // * \t, tab, + // decimal 9 // - // * \r, carriage - // return, decimal 13 + // * \n, newline, decimal 10 // - // * \v, vertical tab, decimal 11 + // * \r, carriage return, decimal 13 // - // * non-breaking - // space, decimal 160 + // * \v, + // vertical tab, decimal 11 // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: + // * non-breaking space, decimal + // 160 // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)lt; with a "less than" + // symbol // + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation @@ -1158,16 +1157,16 @@ type RegexPatternSetUpdate struct { // objects that identify the web requests that you want to allow, block, or count. // For example, you might create a Rule that includes the following predicates: // +// * +// An IPSet that causes AWS WAF to search for web requests that originate from the +// IP address 192.0.2.44 // -// * An IPSet that causes AWS WAF to search for web requests that originate from -// the IP address 192.0.2.44 -// -// * A ByteMatchSet that causes AWS WAF to search -// for web requests for which the value of the User-Agent header is BadBot. +// * A ByteMatchSet that causes AWS WAF to search for web +// requests for which the value of the User-Agent header is BadBot. // -// To -// match the settings in this Rule, a request must originate from 192.0.2.44 AND -// include a User-Agent header for which the value is BadBot. +// To match the +// settings in this Rule, a request must originate from 192.0.2.44 AND include a +// User-Agent header for which the value is BadBot. type Rule struct { // The Predicates object contains one Predicate element for each ByteMatchSet, @@ -1205,13 +1204,13 @@ type Rule struct { // global use. A collection of predefined rules that you can add to a web ACL. Rule // groups are subject to the following limits: // -// * Three rule groups per -// account. You can request an increase to this limit by contacting customer -// support. +// * Three rule groups per account. +// You can request an increase to this limit by contacting customer support. // -// * One rule group per web ACL. +// * One +// rule group per web ACL. // -// * Ten rules per rule group. +// * Ten rules per rule group. type RuleGroup struct { // A unique identifier for a RuleGroup. You use RuleGroupId to get more information @@ -1425,63 +1424,62 @@ type SizeConstraint struct { // some or all of the command, use this option to perform the following // transformations: // - // * Delete the following characters: \ " ' ^ - // - // * Delete - // spaces before the following characters: / ( + // * Delete the following characters: \ " ' ^ // - // * Replace the following - // characters with a space: , ; + // * Delete spaces + // before the following characters: / ( // - // * Replace multiple spaces with one space + // * Replace the following characters with a + // space: , ; // + // * Replace multiple spaces with one space // - // * Convert uppercase letters (A-Z) to lowercase (a-z) + // * Convert uppercase + // letters (A-Z) to lowercase (a-z) // - // COMPRESS_WHITE_SPACE Use - // this option to replace the following characters with a space character (decimal - // 32): + // COMPRESS_WHITE_SPACE Use this option to + // replace the following characters with a space character (decimal 32): // - // * \f, formfeed, decimal 12 + // * \f, + // formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, decimal 9 // - // * \n, - // newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage return, decimal 13 + // * \r, + // carriage return, decimal 13 // - // * \v, vertical - // tab, decimal 11 + // * \v, vertical tab, decimal 11 // - // * non-breaking space, decimal 160 - // - // COMPRESS_WHITE_SPACE - // also replaces multiple spaces with one space. HTML_ENTITY_DECODE Use this option - // to replace HTML-encoded characters with unencoded characters. HTML_ENTITY_DECODE - // performs the following operations: + // * non-breaking + // space, decimal 160 // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one + // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters + // with unencoded characters. HTML_ENTITY_DECODE performs the following + // operations: // + // * Replaces (ampersand)quot; with " // - // * Replaces (ampersand)nbsp; with a non-breaking space, decimal 160 + // * Replaces (ampersand)nbsp; + // with a non-breaking space, decimal 160 // - // * - // Replaces (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)lt; with a "less + // than" symbol // - // * Replaces (ampersand)gt; - // with > + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. // // This member is required. TextTransformation TextTransformation @@ -1682,64 +1680,64 @@ type SqlInjectionMatchTuple struct { // command and using unusual formatting to disguise some or all of the command, use // this option to perform the following transformations: // - // * Delete the - // following characters: \ " ' ^ + // * Delete the following + // characters: \ " ' ^ // - // * Delete spaces before the following - // characters: / ( + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * - // Replace multiple spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \f, formfeed, decimal - // 12 + // * \f, formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage - // return, decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking - // space, decimal 160 + // * non-breaking space, decimal + // 160 // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces (ampersand)quot; with " + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)lt; with a "less than" + // symbol // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)gt; with > // + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters - // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation @@ -1879,13 +1877,13 @@ type WafAction struct { // Specifies how you want AWS WAF to respond to requests that match the settings in // a Rule. Valid settings include the following: // - // * ALLOW: AWS WAF allows + // * ALLOW: AWS WAF allows // requests // - // * BLOCK: AWS WAF blocks requests + // * BLOCK: AWS WAF blocks requests // - // * COUNT: AWS WAF increments - // a counter of the requests that match all of the conditions in the rule. AWS WAF + // * COUNT: AWS WAF increments a + // counter of the requests that match all of the conditions in the rule. AWS WAF // then continues to inspect the web request based on the remaining rules in the // web ACL. You can't specify COUNT for the default action for a WebACL. // @@ -2117,64 +2115,64 @@ type XssMatchTuple struct { // command and using unusual formatting to disguise some or all of the command, use // this option to perform the following transformations: // - // * Delete the - // following characters: \ " ' ^ + // * Delete the following + // characters: \ " ' ^ // - // * Delete spaces before the following - // characters: / ( + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * - // Replace multiple spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to - // lowercase (a-z) + // * Convert uppercase letters (A-Z) to lowercase + // (a-z) // - // COMPRESS_WHITE_SPACE Use this option to replace the following - // characters with a space character (decimal 32): + // COMPRESS_WHITE_SPACE Use this option to replace the following characters + // with a space character (decimal 32): // - // * \f, formfeed, decimal - // 12 + // * \f, formfeed, decimal 12 // - // * \t, tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage - // return, decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking - // space, decimal 160 - // - // COMPRESS_WHITE_SPACE also replaces multiple spaces with one - // space. HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters - // with unencoded characters. HTML_ENTITY_DECODE performs the following - // operations: + // * non-breaking space, decimal + // 160 // - // * Replaces (ampersand)quot; with " + // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. + // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with + // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // - // * Replaces - // (ampersand)nbsp; with a non-breaking space, decimal 160 + // * + // Replaces (ampersand)quot; with " // - // * Replaces - // (ampersand)lt; with a "less than" symbol + // * Replaces (ampersand)nbsp; with a + // non-breaking space, decimal 160 // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)lt; with a "less than" + // symbol // + // * Replaces (ampersand)gt; with > // - // * Replaces characters that are represented in hexadecimal format, - // (ampersand)#xhhhh;, with the corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters - // that are represented in decimal format, (ampersand)#nnnn;, with the - // corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE Use this option to convert uppercase letters - // (A-Z) to lowercase (a-z). URL_DECODE Use this option to decode a URL-encoded - // value. NONE Specify NONE if you don't want to perform any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want to + // perform any text transformations. // // This member is required. TextTransformation TextTransformation diff --git a/service/wafv2/api_op_AssociateWebACL.go b/service/wafv2/api_op_AssociateWebACL.go index 27010985f3c..551344b1adb 100644 --- a/service/wafv2/api_op_AssociateWebACL.go +++ b/service/wafv2/api_op_AssociateWebACL.go @@ -42,16 +42,16 @@ type AssociateWebACLInput struct { // The Amazon Resource Name (ARN) of the resource to associate with the web ACL. // The ARN must be in one of the following formats: // - // * For an Application Load + // * For an Application Load // Balancer: // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // - // * For an API Gateway REST API: + // * + // For an API Gateway REST API: // arn:aws:apigateway:region::/restapis/api-id/stages/stage-name // - // * For an - // AppSync GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId + // * For an AppSync + // GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId // // This member is required. ResourceArn *string diff --git a/service/wafv2/api_op_CheckCapacity.go b/service/wafv2/api_op_CheckCapacity.go index 79ce962073f..0ee814bdb63 100644 --- a/service/wafv2/api_op_CheckCapacity.go +++ b/service/wafv2/api_op_CheckCapacity.go @@ -51,11 +51,11 @@ type CheckCapacityInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_CreateIPSet.go b/service/wafv2/api_op_CreateIPSet.go index 53d67c9f96c..e9de12eaf38 100644 --- a/service/wafv2/api_op_CreateIPSet.go +++ b/service/wafv2/api_op_CreateIPSet.go @@ -40,20 +40,19 @@ type CreateIPSetInput struct { // IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports // all address ranges for IP versions IPv4 and IPv6. Examples: // - // * To configure - // AWS WAF to allow, block, or count requests that originated from the IP address + // * To configure AWS + // WAF to allow, block, or count requests that originated from the IP address // 192.0.2.44, specify 192.0.2.44/32. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from IP addresses from 192.0.2.0 to - // 192.0.2.255, specify 192.0.2.0/24. + // * To configure AWS WAF to allow, block, or + // count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, + // specify 192.0.2.0/24. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from the IP address - // 1111:0000:0000:0000:0000:0000:0000:0111, specify - // 1111:0000:0000:0000:0000:0000:0000:0111/128. + // * To configure AWS WAF to allow, block, or count requests + // that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, + // specify 1111:0000:0000:0000:0000:0000:0000:0111/128. // - // * To configure AWS WAF to + // * To configure AWS WAF to // allow, block, or count requests that originated from IP addresses // 1111:0000:0000:0000:0000:0000:0000:0000 to // 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify @@ -82,11 +81,11 @@ type CreateIPSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_CreateRegexPatternSet.go b/service/wafv2/api_op_CreateRegexPatternSet.go index a57a55f4d97..06652318c23 100644 --- a/service/wafv2/api_op_CreateRegexPatternSet.go +++ b/service/wafv2/api_op_CreateRegexPatternSet.go @@ -50,11 +50,11 @@ type CreateRegexPatternSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_CreateRuleGroup.go b/service/wafv2/api_op_CreateRuleGroup.go index cc9d79f4232..4d410844514 100644 --- a/service/wafv2/api_op_CreateRuleGroup.go +++ b/service/wafv2/api_op_CreateRuleGroup.go @@ -63,11 +63,11 @@ type CreateRuleGroupInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_CreateWebACL.go b/service/wafv2/api_op_CreateWebACL.go index 72c20a39d7c..cbd6dacbe28 100644 --- a/service/wafv2/api_op_CreateWebACL.go +++ b/service/wafv2/api_op_CreateWebACL.go @@ -57,11 +57,11 @@ type CreateWebACLInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_DeleteIPSet.go b/service/wafv2/api_op_DeleteIPSet.go index da9dedbd65b..ce8de337e54 100644 --- a/service/wafv2/api_op_DeleteIPSet.go +++ b/service/wafv2/api_op_DeleteIPSet.go @@ -61,11 +61,11 @@ type DeleteIPSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_DeleteRegexPatternSet.go b/service/wafv2/api_op_DeleteRegexPatternSet.go index 1b01d6a28c2..ede247da273 100644 --- a/service/wafv2/api_op_DeleteRegexPatternSet.go +++ b/service/wafv2/api_op_DeleteRegexPatternSet.go @@ -60,11 +60,11 @@ type DeleteRegexPatternSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_DeleteRuleGroup.go b/service/wafv2/api_op_DeleteRuleGroup.go index 94829b62259..85a68fe17e6 100644 --- a/service/wafv2/api_op_DeleteRuleGroup.go +++ b/service/wafv2/api_op_DeleteRuleGroup.go @@ -61,11 +61,11 @@ type DeleteRuleGroupInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_DeleteWebACL.go b/service/wafv2/api_op_DeleteWebACL.go index 36c1ea57e7b..21fb856af0e 100644 --- a/service/wafv2/api_op_DeleteWebACL.go +++ b/service/wafv2/api_op_DeleteWebACL.go @@ -62,11 +62,11 @@ type DeleteWebACLInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_DescribeManagedRuleGroup.go b/service/wafv2/api_op_DescribeManagedRuleGroup.go index 44588308965..42c5e95735d 100644 --- a/service/wafv2/api_op_DescribeManagedRuleGroup.go +++ b/service/wafv2/api_op_DescribeManagedRuleGroup.go @@ -45,11 +45,11 @@ type DescribeManagedRuleGroupInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_DisassociateWebACL.go b/service/wafv2/api_op_DisassociateWebACL.go index 6e3acf68ac3..0ebbe00cb94 100644 --- a/service/wafv2/api_op_DisassociateWebACL.go +++ b/service/wafv2/api_op_DisassociateWebACL.go @@ -41,16 +41,16 @@ type DisassociateWebACLInput struct { // The Amazon Resource Name (ARN) of the resource to disassociate from the web ACL. // The ARN must be in one of the following formats: // - // * For an Application Load + // * For an Application Load // Balancer: // arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // - // * For an API Gateway REST API: + // * + // For an API Gateway REST API: // arn:aws:apigateway:region::/restapis/api-id/stages/stage-name // - // * For an - // AppSync GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId + // * For an AppSync + // GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId // // This member is required. ResourceArn *string diff --git a/service/wafv2/api_op_GetIPSet.go b/service/wafv2/api_op_GetIPSet.go index 66d3c0bdea2..1476e503e28 100644 --- a/service/wafv2/api_op_GetIPSet.go +++ b/service/wafv2/api_op_GetIPSet.go @@ -50,11 +50,11 @@ type GetIPSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go b/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go index a12d76fa857..e99d568e605 100644 --- a/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go +++ b/service/wafv2/api_op_GetRateBasedStatementManagedKeys.go @@ -46,11 +46,11 @@ type GetRateBasedStatementManagedKeysInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_GetRegexPatternSet.go b/service/wafv2/api_op_GetRegexPatternSet.go index ad12fb00819..4cb788f920f 100644 --- a/service/wafv2/api_op_GetRegexPatternSet.go +++ b/service/wafv2/api_op_GetRegexPatternSet.go @@ -49,11 +49,11 @@ type GetRegexPatternSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_GetRuleGroup.go b/service/wafv2/api_op_GetRuleGroup.go index 4c5ecc8f465..902d2d7140a 100644 --- a/service/wafv2/api_op_GetRuleGroup.go +++ b/service/wafv2/api_op_GetRuleGroup.go @@ -50,11 +50,11 @@ type GetRuleGroupInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_GetSampledRequests.go b/service/wafv2/api_op_GetSampledRequests.go index b6ae7356077..74ce738bbed 100644 --- a/service/wafv2/api_op_GetSampledRequests.go +++ b/service/wafv2/api_op_GetSampledRequests.go @@ -60,11 +60,11 @@ type GetSampledRequestsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_GetWebACL.go b/service/wafv2/api_op_GetWebACL.go index a54f260edf8..687daa30bd7 100644 --- a/service/wafv2/api_op_GetWebACL.go +++ b/service/wafv2/api_op_GetWebACL.go @@ -50,11 +50,11 @@ type GetWebACLInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_ListAvailableManagedRuleGroups.go b/service/wafv2/api_op_ListAvailableManagedRuleGroups.go index cd29863ec04..e207db4a2fd 100644 --- a/service/wafv2/api_op_ListAvailableManagedRuleGroups.go +++ b/service/wafv2/api_op_ListAvailableManagedRuleGroups.go @@ -40,11 +40,11 @@ type ListAvailableManagedRuleGroupsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_ListIPSets.go b/service/wafv2/api_op_ListIPSets.go index 3d62732c434..66f51a4d20f 100644 --- a/service/wafv2/api_op_ListIPSets.go +++ b/service/wafv2/api_op_ListIPSets.go @@ -38,11 +38,11 @@ type ListIPSetsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_ListLoggingConfigurations.go b/service/wafv2/api_op_ListLoggingConfigurations.go index 64e38f01d12..dc03cebaa69 100644 --- a/service/wafv2/api_op_ListLoggingConfigurations.go +++ b/service/wafv2/api_op_ListLoggingConfigurations.go @@ -49,11 +49,11 @@ type ListLoggingConfigurationsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. Scope types.Scope } diff --git a/service/wafv2/api_op_ListRegexPatternSets.go b/service/wafv2/api_op_ListRegexPatternSets.go index 9ace5bdfbd0..6f6b830b708 100644 --- a/service/wafv2/api_op_ListRegexPatternSets.go +++ b/service/wafv2/api_op_ListRegexPatternSets.go @@ -39,11 +39,11 @@ type ListRegexPatternSetsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_ListRuleGroups.go b/service/wafv2/api_op_ListRuleGroups.go index fff3423120d..d24a903cd5d 100644 --- a/service/wafv2/api_op_ListRuleGroups.go +++ b/service/wafv2/api_op_ListRuleGroups.go @@ -39,11 +39,11 @@ type ListRuleGroupsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_ListWebACLs.go b/service/wafv2/api_op_ListWebACLs.go index 528eaf68a28..af5beb4b5cb 100644 --- a/service/wafv2/api_op_ListWebACLs.go +++ b/service/wafv2/api_op_ListWebACLs.go @@ -38,11 +38,11 @@ type ListWebACLsInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_PutLoggingConfiguration.go b/service/wafv2/api_op_PutLoggingConfiguration.go index 44d342f2921..fa3cdebaaa2 100644 --- a/service/wafv2/api_op_PutLoggingConfiguration.go +++ b/service/wafv2/api_op_PutLoggingConfiguration.go @@ -19,7 +19,7 @@ import ( // according to the configuration provided. You can access information about all // traffic that AWS WAF inspects using the following steps: // -// * Create an Amazon +// * Create an Amazon // Kinesis Data Firehose. Create the data firehose with a PUT source and in the // Region that you are operating. If you are capturing logs for Amazon CloudFront, // always create the firehose in US East (N. Virginia). Give the data firehose a @@ -27,13 +27,13 @@ import ( // aws-waf-logs-us-east-2-analytics. Do not create the data firehose using a // Kinesis stream as your source. // -// * Associate that firehose to your web ACL -// using a PutLoggingConfiguration request. +// * Associate that firehose to your web ACL using +// a PutLoggingConfiguration request. // -// When you successfully enable logging -// using a PutLoggingConfiguration request, AWS WAF will create a service linked -// role with the necessary permissions to write logs to the Amazon Kinesis Data -// Firehose. For more information, see Logging Web ACL Traffic Information +// When you successfully enable logging using a +// PutLoggingConfiguration request, AWS WAF will create a service linked role with +// the necessary permissions to write logs to the Amazon Kinesis Data Firehose. For +// more information, see Logging Web ACL Traffic Information // (https://docs.aws.amazon.com/waf/latest/developerguide/logging.html) in the AWS // WAF Developer Guide. func (c *Client) PutLoggingConfiguration(ctx context.Context, params *PutLoggingConfigurationInput, optFns ...func(*Options)) (*PutLoggingConfigurationOutput, error) { diff --git a/service/wafv2/api_op_PutPermissionPolicy.go b/service/wafv2/api_op_PutPermissionPolicy.go index 4b3a9320713..0f3ff0a2d35 100644 --- a/service/wafv2/api_op_PutPermissionPolicy.go +++ b/service/wafv2/api_op_PutPermissionPolicy.go @@ -14,15 +14,14 @@ import ( // across accounts. You must be the owner of the rule group to perform this // operation. This action is subject to the following restrictions: // -// * You can +// * You can // attach only one policy with each PutPermissionPolicy request. // -// * The ARN in -// the request must be a valid WAF RuleGroup ARN and the rule group must exist in -// the same region. +// * The ARN in the +// request must be a valid WAF RuleGroup ARN and the rule group must exist in the +// same region. // -// * The user making the request must be the owner of the -// rule group. +// * The user making the request must be the owner of the rule group. func (c *Client) PutPermissionPolicy(ctx context.Context, params *PutPermissionPolicyInput, optFns ...func(*Options)) (*PutPermissionPolicyOutput, error) { if params == nil { params = &PutPermissionPolicyInput{} @@ -43,20 +42,20 @@ type PutPermissionPolicyInput struct { // The policy to attach to the specified rule group. The policy specifications must // conform to the following: // - // * The policy must be composed using IAM Policy + // * The policy must be composed using IAM Policy // version 2012-10-17 or version 2015-01-01. // - // * The policy must include + // * The policy must include // specifications for Effect, Action, and Principal. // - // * Effect must specify + // * Effect must specify // Allow. // - // * Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and + // * Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and // wafv2:PutFirewallManagerRuleGroups. AWS WAF rejects any extra actions or // wildcard actions in the policy. // - // * The policy must not include a Resource + // * The policy must not include a Resource // parameter. // // For more information, see IAM Policies diff --git a/service/wafv2/api_op_UpdateIPSet.go b/service/wafv2/api_op_UpdateIPSet.go index 377d1e3870f..8417bb691a7 100644 --- a/service/wafv2/api_op_UpdateIPSet.go +++ b/service/wafv2/api_op_UpdateIPSet.go @@ -37,20 +37,19 @@ type UpdateIPSetInput struct { // IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports // all address ranges for IP versions IPv4 and IPv6. Examples: // - // * To configure - // AWS WAF to allow, block, or count requests that originated from the IP address + // * To configure AWS + // WAF to allow, block, or count requests that originated from the IP address // 192.0.2.44, specify 192.0.2.44/32. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from IP addresses from 192.0.2.0 to - // 192.0.2.255, specify 192.0.2.0/24. + // * To configure AWS WAF to allow, block, or + // count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, + // specify 192.0.2.0/24. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from the IP address - // 1111:0000:0000:0000:0000:0000:0000:0111, specify - // 1111:0000:0000:0000:0000:0000:0000:0111/128. + // * To configure AWS WAF to allow, block, or count requests + // that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, + // specify 1111:0000:0000:0000:0000:0000:0000:0111/128. // - // * To configure AWS WAF to + // * To configure AWS WAF to // allow, block, or count requests that originated from IP addresses // 1111:0000:0000:0000:0000:0000:0000:0000 to // 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify @@ -91,11 +90,11 @@ type UpdateIPSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_UpdateRegexPatternSet.go b/service/wafv2/api_op_UpdateRegexPatternSet.go index 9f37a0a7ce7..1a1fb5ebe9f 100644 --- a/service/wafv2/api_op_UpdateRegexPatternSet.go +++ b/service/wafv2/api_op_UpdateRegexPatternSet.go @@ -65,11 +65,11 @@ type UpdateRegexPatternSetInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_UpdateRuleGroup.go b/service/wafv2/api_op_UpdateRuleGroup.go index cf5631cc7b0..32d95d094d3 100644 --- a/service/wafv2/api_op_UpdateRuleGroup.go +++ b/service/wafv2/api_op_UpdateRuleGroup.go @@ -65,11 +65,11 @@ type UpdateRuleGroupInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/api_op_UpdateWebACL.go b/service/wafv2/api_op_UpdateWebACL.go index cb30efb22b6..e2a965c1863 100644 --- a/service/wafv2/api_op_UpdateWebACL.go +++ b/service/wafv2/api_op_UpdateWebACL.go @@ -74,11 +74,11 @@ type UpdateWebACLInput struct { // an API Gateway REST API, or an AppSync GraphQL API. To work with CloudFront, you // must also specify the Region US East (N. Virginia) as follows: // - // * CLI - - // Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify + // the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // - // * API and SDKs - For all calls, use the Region endpoint + // * API and SDKs - For all calls, use the Region endpoint // us-east-1. // // This member is required. diff --git a/service/wafv2/doc.go b/service/wafv2/doc.go index b727ab8129f..71f0897e807 100644 --- a/service/wafv2/doc.go +++ b/service/wafv2/doc.go @@ -31,33 +31,32 @@ // using the endpoints listed in AWS Service Endpoints for AWS WAF // (https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region). // -// * -// For regional applications, you can use any of the endpoints in the list. A -// regional application can be an Application Load Balancer (ALB), an API Gateway -// REST API, or an AppSync GraphQL API. +// * For +// regional applications, you can use any of the endpoints in the list. A regional +// application can be an Application Load Balancer (ALB), an API Gateway REST API, +// or an AppSync GraphQL API. // -// * For AWS CloudFront applications, you -// must use the API endpoint listed for US East (N. Virginia): -// us-east-1. +// * For AWS CloudFront applications, you must use the +// API endpoint listed for US East (N. Virginia): us-east-1. // -// Alternatively, you can use one of the AWS SDKs to access an API -// that's tailored to the programming language or platform that you're using. For -// more information, see AWS SDKs (http://aws.amazon.com/tools/#SDKs). We currently -// provide two versions of the AWS WAF API: this API and the prior versions, the -// classic AWS WAF APIs. This new API provides the same functionality as the older -// versions, with the following major improvements: +// Alternatively, you +// can use one of the AWS SDKs to access an API that's tailored to the programming +// language or platform that you're using. For more information, see AWS SDKs +// (http://aws.amazon.com/tools/#SDKs). We currently provide two versions of the +// AWS WAF API: this API and the prior versions, the classic AWS WAF APIs. This new +// API provides the same functionality as the older versions, with the following +// major improvements: // -// * You use one API for both -// global and regional applications. Where you need to distinguish the scope, you -// specify a Scope parameter and set it to CLOUDFRONT or REGIONAL. +// * You use one API for both global and regional +// applications. Where you need to distinguish the scope, you specify a Scope +// parameter and set it to CLOUDFRONT or REGIONAL. // -// * You can -// define a Web ACL or rule group with a single call, and update it with a single -// call. You define all rule specifications in JSON format, and pass them to your -// rule group or Web ACL calls. +// * You can define a Web ACL or +// rule group with a single call, and update it with a single call. You define all +// rule specifications in JSON format, and pass them to your rule group or Web ACL +// calls. // -// * The limits AWS WAF places on the use of -// rules more closely reflects the cost of running each type of rule. Rule groups -// include capacity settings, so you know the maximum cost of a rule group when you -// use it. +// * The limits AWS WAF places on the use of rules more closely reflects +// the cost of running each type of rule. Rule groups include capacity settings, so +// you know the maximum cost of a rule group when you use it. package wafv2 diff --git a/service/wafv2/types/enums.go b/service/wafv2/types/enums.go index 317daa578b3..61f3c2e71ba 100644 --- a/service/wafv2/types/enums.go +++ b/service/wafv2/types/enums.go @@ -544,8 +544,8 @@ type FallbackBehavior string // Enum values for FallbackBehavior const ( - FallbackBehaviorMatch FallbackBehavior = "MATCH" - FallbackBehaviorNo_match FallbackBehavior = "NO_MATCH" + FallbackBehaviorMatch FallbackBehavior = "MATCH" + FallbackBehaviorNoMatch FallbackBehavior = "NO_MATCH" ) // Values returns all known values for FallbackBehavior. Note that this can be @@ -600,49 +600,49 @@ type ParameterExceptionField string // Enum values for ParameterExceptionField const ( - ParameterExceptionFieldWeb_acl ParameterExceptionField = "WEB_ACL" - ParameterExceptionFieldRule_group ParameterExceptionField = "RULE_GROUP" - ParameterExceptionFieldRegex_pattern_set ParameterExceptionField = "REGEX_PATTERN_SET" - ParameterExceptionFieldIp_set ParameterExceptionField = "IP_SET" - ParameterExceptionFieldManaged_rule_set ParameterExceptionField = "MANAGED_RULE_SET" - ParameterExceptionFieldRule ParameterExceptionField = "RULE" - ParameterExceptionFieldExcluded_rule ParameterExceptionField = "EXCLUDED_RULE" - ParameterExceptionFieldStatement ParameterExceptionField = "STATEMENT" - ParameterExceptionFieldByte_match_statement ParameterExceptionField = "BYTE_MATCH_STATEMENT" - ParameterExceptionFieldSqli_match_statement ParameterExceptionField = "SQLI_MATCH_STATEMENT" - ParameterExceptionFieldXss_match_statement ParameterExceptionField = "XSS_MATCH_STATEMENT" - ParameterExceptionFieldSize_constraint_statement ParameterExceptionField = "SIZE_CONSTRAINT_STATEMENT" - ParameterExceptionFieldGeo_match_statement ParameterExceptionField = "GEO_MATCH_STATEMENT" - ParameterExceptionFieldRate_based_statement ParameterExceptionField = "RATE_BASED_STATEMENT" - ParameterExceptionFieldRule_group_reference_statement ParameterExceptionField = "RULE_GROUP_REFERENCE_STATEMENT" - ParameterExceptionFieldRegex_pattern_reference_statement ParameterExceptionField = "REGEX_PATTERN_REFERENCE_STATEMENT" - ParameterExceptionFieldIp_set_reference_statement ParameterExceptionField = "IP_SET_REFERENCE_STATEMENT" - ParameterExceptionFieldManaged_rule_set_statement ParameterExceptionField = "MANAGED_RULE_SET_STATEMENT" - ParameterExceptionFieldAnd_statement ParameterExceptionField = "AND_STATEMENT" - ParameterExceptionFieldOr_statement ParameterExceptionField = "OR_STATEMENT" - ParameterExceptionFieldNot_statement ParameterExceptionField = "NOT_STATEMENT" - ParameterExceptionFieldIp_address ParameterExceptionField = "IP_ADDRESS" - ParameterExceptionFieldIp_address_version ParameterExceptionField = "IP_ADDRESS_VERSION" - ParameterExceptionFieldField_to_match ParameterExceptionField = "FIELD_TO_MATCH" - ParameterExceptionFieldText_transformation ParameterExceptionField = "TEXT_TRANSFORMATION" - ParameterExceptionFieldSingle_query_argument ParameterExceptionField = "SINGLE_QUERY_ARGUMENT" - ParameterExceptionFieldSingle_header ParameterExceptionField = "SINGLE_HEADER" - ParameterExceptionFieldDefault_action ParameterExceptionField = "DEFAULT_ACTION" - ParameterExceptionFieldRule_action ParameterExceptionField = "RULE_ACTION" - ParameterExceptionFieldEntity_limit ParameterExceptionField = "ENTITY_LIMIT" - ParameterExceptionFieldOverride_action ParameterExceptionField = "OVERRIDE_ACTION" - ParameterExceptionFieldScope_value ParameterExceptionField = "SCOPE_VALUE" - ParameterExceptionFieldResource_arn ParameterExceptionField = "RESOURCE_ARN" - ParameterExceptionFieldResource_type ParameterExceptionField = "RESOURCE_TYPE" - ParameterExceptionFieldTags ParameterExceptionField = "TAGS" - ParameterExceptionFieldTag_keys ParameterExceptionField = "TAG_KEYS" - ParameterExceptionFieldMetric_name ParameterExceptionField = "METRIC_NAME" - ParameterExceptionFieldFirewall_manager_statement ParameterExceptionField = "FIREWALL_MANAGER_STATEMENT" - ParameterExceptionFieldFallback_behavior ParameterExceptionField = "FALLBACK_BEHAVIOR" - ParameterExceptionFieldPosition ParameterExceptionField = "POSITION" - ParameterExceptionFieldForwarded_ip_config ParameterExceptionField = "FORWARDED_IP_CONFIG" - ParameterExceptionFieldIp_set_forwarded_ip_config ParameterExceptionField = "IP_SET_FORWARDED_IP_CONFIG" - ParameterExceptionFieldHeader_name ParameterExceptionField = "HEADER_NAME" + ParameterExceptionFieldWebAcl ParameterExceptionField = "WEB_ACL" + ParameterExceptionFieldRuleGroup ParameterExceptionField = "RULE_GROUP" + ParameterExceptionFieldRegexPatternSet ParameterExceptionField = "REGEX_PATTERN_SET" + ParameterExceptionFieldIpSet ParameterExceptionField = "IP_SET" + ParameterExceptionFieldManagedRuleSet ParameterExceptionField = "MANAGED_RULE_SET" + ParameterExceptionFieldRule ParameterExceptionField = "RULE" + ParameterExceptionFieldExcludedRule ParameterExceptionField = "EXCLUDED_RULE" + ParameterExceptionFieldStatement ParameterExceptionField = "STATEMENT" + ParameterExceptionFieldByteMatchStatement ParameterExceptionField = "BYTE_MATCH_STATEMENT" + ParameterExceptionFieldSqliMatchStatement ParameterExceptionField = "SQLI_MATCH_STATEMENT" + ParameterExceptionFieldXssMatchStatement ParameterExceptionField = "XSS_MATCH_STATEMENT" + ParameterExceptionFieldSizeConstraintStatement ParameterExceptionField = "SIZE_CONSTRAINT_STATEMENT" + ParameterExceptionFieldGeoMatchStatement ParameterExceptionField = "GEO_MATCH_STATEMENT" + ParameterExceptionFieldRateBasedStatement ParameterExceptionField = "RATE_BASED_STATEMENT" + ParameterExceptionFieldRuleGroupReferenceStatement ParameterExceptionField = "RULE_GROUP_REFERENCE_STATEMENT" + ParameterExceptionFieldRegexPatternReferenceStatement ParameterExceptionField = "REGEX_PATTERN_REFERENCE_STATEMENT" + ParameterExceptionFieldIpSetReferenceStatement ParameterExceptionField = "IP_SET_REFERENCE_STATEMENT" + ParameterExceptionFieldManagedRuleSetStatement ParameterExceptionField = "MANAGED_RULE_SET_STATEMENT" + ParameterExceptionFieldAndStatement ParameterExceptionField = "AND_STATEMENT" + ParameterExceptionFieldOrStatement ParameterExceptionField = "OR_STATEMENT" + ParameterExceptionFieldNotStatement ParameterExceptionField = "NOT_STATEMENT" + ParameterExceptionFieldIpAddress ParameterExceptionField = "IP_ADDRESS" + ParameterExceptionFieldIpAddressVersion ParameterExceptionField = "IP_ADDRESS_VERSION" + ParameterExceptionFieldFieldToMatch ParameterExceptionField = "FIELD_TO_MATCH" + ParameterExceptionFieldTextTransformation ParameterExceptionField = "TEXT_TRANSFORMATION" + ParameterExceptionFieldSingleQueryArgument ParameterExceptionField = "SINGLE_QUERY_ARGUMENT" + ParameterExceptionFieldSingleHeader ParameterExceptionField = "SINGLE_HEADER" + ParameterExceptionFieldDefaultAction ParameterExceptionField = "DEFAULT_ACTION" + ParameterExceptionFieldRuleAction ParameterExceptionField = "RULE_ACTION" + ParameterExceptionFieldEntityLimit ParameterExceptionField = "ENTITY_LIMIT" + ParameterExceptionFieldOverrideAction ParameterExceptionField = "OVERRIDE_ACTION" + ParameterExceptionFieldScopeValue ParameterExceptionField = "SCOPE_VALUE" + ParameterExceptionFieldResourceArn ParameterExceptionField = "RESOURCE_ARN" + ParameterExceptionFieldResourceType ParameterExceptionField = "RESOURCE_TYPE" + ParameterExceptionFieldTags ParameterExceptionField = "TAGS" + ParameterExceptionFieldTagKeys ParameterExceptionField = "TAG_KEYS" + ParameterExceptionFieldMetricName ParameterExceptionField = "METRIC_NAME" + ParameterExceptionFieldFirewallManagerStatement ParameterExceptionField = "FIREWALL_MANAGER_STATEMENT" + ParameterExceptionFieldFallbackBehavior ParameterExceptionField = "FALLBACK_BEHAVIOR" + ParameterExceptionFieldPosition ParameterExceptionField = "POSITION" + ParameterExceptionFieldForwardedIpConfig ParameterExceptionField = "FORWARDED_IP_CONFIG" + ParameterExceptionFieldIpSetForwardedIpConfig ParameterExceptionField = "IP_SET_FORWARDED_IP_CONFIG" + ParameterExceptionFieldHeaderName ParameterExceptionField = "HEADER_NAME" ) // Values returns all known values for ParameterExceptionField. Note that this can @@ -700,11 +700,11 @@ type PositionalConstraint string // Enum values for PositionalConstraint const ( - PositionalConstraintExactly PositionalConstraint = "EXACTLY" - PositionalConstraintStarts_with PositionalConstraint = "STARTS_WITH" - PositionalConstraintEnds_with PositionalConstraint = "ENDS_WITH" - PositionalConstraintContains PositionalConstraint = "CONTAINS" - PositionalConstraintContains_word PositionalConstraint = "CONTAINS_WORD" + PositionalConstraintExactly PositionalConstraint = "EXACTLY" + PositionalConstraintStartsWith PositionalConstraint = "STARTS_WITH" + PositionalConstraintEndsWith PositionalConstraint = "ENDS_WITH" + PositionalConstraintContains PositionalConstraint = "CONTAINS" + PositionalConstraintContainsWord PositionalConstraint = "CONTAINS_WORD" ) // Values returns all known values for PositionalConstraint. Note that this can be @@ -724,8 +724,8 @@ type RateBasedStatementAggregateKeyType string // Enum values for RateBasedStatementAggregateKeyType const ( - RateBasedStatementAggregateKeyTypeIp RateBasedStatementAggregateKeyType = "IP" - RateBasedStatementAggregateKeyTypeForwarded_ip RateBasedStatementAggregateKeyType = "FORWARDED_IP" + RateBasedStatementAggregateKeyTypeIp RateBasedStatementAggregateKeyType = "IP" + RateBasedStatementAggregateKeyTypeForwardedIp RateBasedStatementAggregateKeyType = "FORWARDED_IP" ) // Values returns all known values for RateBasedStatementAggregateKeyType. Note @@ -743,9 +743,9 @@ type ResourceType string // Enum values for ResourceType const ( - ResourceTypeApplication_load_balancer ResourceType = "APPLICATION_LOAD_BALANCER" - ResourceTypeApi_gateway ResourceType = "API_GATEWAY" - ResourceTypeAppsync ResourceType = "APPSYNC" + ResourceTypeApplicationLoadBalancer ResourceType = "APPLICATION_LOAD_BALANCER" + ResourceTypeApiGateway ResourceType = "API_GATEWAY" + ResourceTypeAppsync ResourceType = "APPSYNC" ) // Values returns all known values for ResourceType. Note that this can be expanded @@ -781,12 +781,12 @@ type TextTransformationType string // Enum values for TextTransformationType const ( - TextTransformationTypeNone TextTransformationType = "NONE" - TextTransformationTypeCompress_white_space TextTransformationType = "COMPRESS_WHITE_SPACE" - TextTransformationTypeHtml_entity_decode TextTransformationType = "HTML_ENTITY_DECODE" - TextTransformationTypeLowercase TextTransformationType = "LOWERCASE" - TextTransformationTypeCmd_line TextTransformationType = "CMD_LINE" - TextTransformationTypeUrl_decode TextTransformationType = "URL_DECODE" + TextTransformationTypeNone TextTransformationType = "NONE" + TextTransformationTypeCompressWhiteSpace TextTransformationType = "COMPRESS_WHITE_SPACE" + TextTransformationTypeHtmlEntityDecode TextTransformationType = "HTML_ENTITY_DECODE" + TextTransformationTypeLowercase TextTransformationType = "LOWERCASE" + TextTransformationTypeCmdLine TextTransformationType = "CMD_LINE" + TextTransformationTypeUrlDecode TextTransformationType = "URL_DECODE" ) // Values returns all known values for TextTransformationType. Note that this can diff --git a/service/wafv2/types/errors.go b/service/wafv2/types/errors.go index 52a6d4b2a0f..411e1d76416 100644 --- a/service/wafv2/types/errors.go +++ b/service/wafv2/types/errors.go @@ -81,18 +81,18 @@ func (e *WAFInvalidOperationException) ErrorFault() smithy.ErrorFault { return s // The operation failed because AWS WAF didn't recognize a parameter in the // request. For example: // -// * You specified an invalid parameter name or value. +// * You specified an invalid parameter name or value. // +// * +// Your nested statement isn't valid. You might have tried to nest a statement that +// can’t be nested. // -// * Your nested statement isn't valid. You might have tried to nest a statement -// that can’t be nested. +// * You tried to update a WebACL with a DefaultAction that isn't +// among the types available at DefaultAction. // -// * You tried to update a WebACL with a DefaultAction -// that isn't among the types available at DefaultAction. -// -// * Your request -// references an ARN that is malformed, or corresponds to a resource with which a -// Web ACL cannot be associated. +// * Your request references an ARN +// that is malformed, or corresponds to a resource with which a Web ACL cannot be +// associated. type WAFInvalidParameterException struct { Message *string @@ -116,21 +116,21 @@ func (e *WAFInvalidParameterException) ErrorFault() smithy.ErrorFault { return s // The operation failed because the specified policy isn't in the proper format. // The policy specifications must conform to the following: // -// * The policy must -// be composed using IAM Policy version 2012-10-17 or version 2015-01-01. +// * The policy must be +// composed using IAM Policy version 2012-10-17 or version 2015-01-01. // -// * -// The policy must include specifications for Effect, Action, and Principal. +// * The +// policy must include specifications for Effect, Action, and Principal. // -// * -// Effect must specify Allow. +// * Effect +// must specify Allow. // -// * Action must specify wafv2:CreateWebACL, +// * Action must specify wafv2:CreateWebACL, // wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. AWS WAF rejects any // extra actions or wildcard actions in the policy. // -// * The policy must not -// include a Resource parameter. +// * The policy must not include +// a Resource parameter. // // For more information, see IAM Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). diff --git a/service/wafv2/types/types.go b/service/wafv2/types/types.go index fda12e3aebf..cd5c163c874 100644 --- a/service/wafv2/types/types.go +++ b/service/wafv2/types/types.go @@ -86,14 +86,14 @@ type ByteMatchStatement struct { // characters or underscore (A-Z, a-z, 0-9, or _). In addition, SearchString must // be a word, which means that both of the following are true: // - // * SearchString - // is at the beginning of the specified part of the web request or is preceded by a + // * SearchString is + // at the beginning of the specified part of the web request or is preceded by a // character other than an alphanumeric character or underscore (_). Examples // include the value of a header and ;BadBot. // - // * SearchString is at the end of - // the specified part of the web request or is followed by a character other than - // an alphanumeric character or underscore (_), for example, BadBot; and + // * SearchString is at the end of the + // specified part of the web request or is followed by a character other than an + // alphanumeric character or underscore (_), for example, BadBot; and // -BadBot;. // // EXACTLY The value of the specified part of the web request must @@ -110,18 +110,18 @@ type ByteMatchStatement struct { // maximum length of the value is 50 bytes. Valid values depend on the component // that you specify for inspection in FieldToMatch: // - // * Method: The HTTP method - // that you want AWS WAF to search for. This indicates the type of operation - // specified in the request. + // * Method: The HTTP method that + // you want AWS WAF to search for. This indicates the type of operation specified + // in the request. // - // * UriPath: The value that you want AWS WAF to - // search for in the URI path, for example, /images/daily-ad.jpg. + // * UriPath: The value that you want AWS WAF to search for in the + // URI path, for example, /images/daily-ad.jpg. // - // If SearchString - // includes alphabetic characters A-Z and a-z, note that the value is case - // sensitive. If you're using the AWS WAF API Specify a base64-encoded version of - // the value. The maximum length of the value before you base64-encode it is 50 - // bytes. For example, suppose the value of Type is HEADER and the value of Data is + // If SearchString includes + // alphabetic characters A-Z and a-z, note that the value is case sensitive. If + // you're using the AWS WAF API Specify a base64-encoded version of the value. The + // maximum length of the value before you base64-encode it is 50 bytes. For + // example, suppose the value of Type is HEADER and the value of Data is // User-Agent. If you want to search the User-Agent header for the value BadBot, // you base64-encode BadBot using MIME base64-encoding and include the resulting // value, QmFkQm90, in the value of SearchString. If you're using the AWS CLI or @@ -254,12 +254,12 @@ type FirewallManagerRuleGroup struct { // action settings. In a Rule, you must specify either this OverrideAction setting // or the rule Action setting, but not both: // - // * If the rule statement - // references a rule group, use this override action setting and not the action - // setting. + // * If the rule statement references a + // rule group, use this override action setting and not the action setting. // - // * If the rule statement does not reference a rule group, use the - // rule action setting and not this rule override action setting. + // * If + // the rule statement does not reference a rule group, use the rule action setting + // and not this rule override action setting. // // This member is required. OverrideAction *OverrideAction @@ -325,12 +325,11 @@ type ForwardedIPConfig struct { // present in the request, AWS WAF doesn't apply the rule to the web request at // all. You can specify the following fallback behaviors: // - // * MATCH - Treat the - // web request as matching the rule statement. AWS WAF applies the rule action to - // the request. + // * MATCH - Treat the web + // request as matching the rule statement. AWS WAF applies the rule action to the + // request. // - // * NO_MATCH - Treat the web request as not matching the rule - // statement. + // * NO_MATCH - Treat the web request as not matching the rule statement. // // This member is required. FallbackBehavior FallbackBehavior @@ -392,11 +391,11 @@ type HTTPRequest struct { // with a CloudFront distribution, this is the value of one of the following fields // in CloudFront access logs: // - // * c-ip, if the viewer did not use an HTTP proxy - // or a load balancer to send the request + // * c-ip, if the viewer did not use an HTTP proxy or a + // load balancer to send the request // - // * x-forwarded-for, if the viewer did - // use an HTTP proxy or a load balancer to send the request + // * x-forwarded-for, if the viewer did use an + // HTTP proxy or a load balancer to send the request ClientIP *string // The two-letter country code for the country that the request originated from. @@ -441,20 +440,19 @@ type IPSet struct { // IP addresses in Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports // all address ranges for IP versions IPv4 and IPv6. Examples: // - // * To configure - // AWS WAF to allow, block, or count requests that originated from the IP address + // * To configure AWS + // WAF to allow, block, or count requests that originated from the IP address // 192.0.2.44, specify 192.0.2.44/32. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from IP addresses from 192.0.2.0 to - // 192.0.2.255, specify 192.0.2.0/24. + // * To configure AWS WAF to allow, block, or + // count requests that originated from IP addresses from 192.0.2.0 to 192.0.2.255, + // specify 192.0.2.0/24. // - // * To configure AWS WAF to allow, block, - // or count requests that originated from the IP address - // 1111:0000:0000:0000:0000:0000:0000:0111, specify - // 1111:0000:0000:0000:0000:0000:0000:0111/128. + // * To configure AWS WAF to allow, block, or count requests + // that originated from the IP address 1111:0000:0000:0000:0000:0000:0000:0111, + // specify 1111:0000:0000:0000:0000:0000:0000:0111/128. // - // * To configure AWS WAF to + // * To configure AWS WAF to // allow, block, or count requests that originated from IP addresses // 1111:0000:0000:0000:0000:0000:0000:0000 to // 1111:0000:0000:0000:ffff:ffff:ffff:ffff, specify @@ -503,12 +501,11 @@ type IPSetForwardedIPConfig struct { // present in the request, AWS WAF doesn't apply the rule to the web request at // all. You can specify the following fallback behaviors: // - // * MATCH - Treat the - // web request as matching the rule statement. AWS WAF applies the rule action to - // the request. + // * MATCH - Treat the web + // request as matching the rule statement. AWS WAF applies the rule action to the + // request. // - // * NO_MATCH - Treat the web request as not matching the rule - // statement. + // * NO_MATCH - Treat the web request as not matching the rule statement. // // This member is required. FallbackBehavior FallbackBehavior @@ -527,16 +524,16 @@ type IPSetForwardedIPConfig struct { // identifies the original client and the rest identify proxies that the request // went through. The options for this setting are the following: // - // * FIRST - - // Inspect the first IP address in the list of IP addresses in the header. This is - // usually the client's original IP. + // * FIRST - Inspect + // the first IP address in the list of IP addresses in the header. This is usually + // the client's original IP. // - // * LAST - Inspect the last IP address in - // the list of IP addresses in the header. + // * LAST - Inspect the last IP address in the list of + // IP addresses in the header. // - // * ANY - Inspect all IP addresses in - // the header for a match. If the header contains more than 10 IP addresses, AWS - // WAF inspects the last 10. + // * ANY - Inspect all IP addresses in the header for + // a match. If the header contains more than 10 IP addresses, AWS WAF inspects the + // last 10. // // This member is required. Position ForwardedIPPosition @@ -747,12 +744,12 @@ type OrStatement struct { // action settings. In a Rule, you must specify either this OverrideAction setting // or the rule Action setting, but not both: // -// * If the rule statement -// references a rule group, use this override action setting and not the action -// setting. +// * If the rule statement references a +// rule group, use this override action setting and not the action setting. // -// * If the rule statement does not reference a rule group, use the -// rule action setting and not this rule override action setting. +// * If +// the rule statement does not reference a rule group, use the rule action setting +// and not this rule override action setting. type OverrideAction struct { // Override the rule action setting to count. @@ -788,31 +785,31 @@ type QueryString struct { // create a rate-based rule with a nested AND rule statement that contains the // following nested statements: // -// * An IP match statement with an IP set that +// * An IP match statement with an IP set that // specified the address 192.0.2.44. // -// * A string match statement that searches -// in the User-Agent header for the string BadBot. +// * A string match statement that searches in +// the User-Agent header for the string BadBot. // -// In this rate-based rule, you -// also define a rate limit. For this example, the rate limit is 1,000. Requests -// that meet both of the conditions in the statements are counted. If the count -// exceeds 1,000 requests per five minutes, the rule action triggers. Requests that -// do not meet both conditions are not counted towards the rate limit and are not -// affected by this rule. You cannot nest a RateBasedStatement, for example for use -// inside a NotStatement or OrStatement. It can only be referenced as a top-level -// statement within a rule. +// In this rate-based rule, you also +// define a rate limit. For this example, the rate limit is 1,000. Requests that +// meet both of the conditions in the statements are counted. If the count exceeds +// 1,000 requests per five minutes, the rule action triggers. Requests that do not +// meet both conditions are not counted towards the rate limit and are not affected +// by this rule. You cannot nest a RateBasedStatement, for example for use inside a +// NotStatement or OrStatement. It can only be referenced as a top-level statement +// within a rule. type RateBasedStatement struct { // Setting that indicates how to aggregate the request counts. The options are the // following: // - // * IP - Aggregate the request counts on the IP address from the - // web request origin. + // * IP - Aggregate the request counts on the IP address from the web + // request origin. // - // * FORWARDED_IP - Aggregate the request counts on the - // first IP address in an HTTP header. If you use this, configure the - // ForwardedIPConfig, to specify the header to use. + // * FORWARDED_IP - Aggregate the request counts on the first IP + // address in an HTTP header. If you use this, configure the ForwardedIPConfig, to + // specify the header to use. // // This member is required. AggregateKeyType RateBasedStatementAggregateKeyType @@ -1002,11 +999,11 @@ type Rule struct { // ManagedRuleGroupStatement. You must specify either this Action setting or the // rule OverrideAction setting, but not both: // - // * If the rule statement does not + // * If the rule statement does not // reference a rule group, use this rule action setting and not the rule override // action setting. // - // * If the rule statement references a rule group, use the + // * If the rule statement references a rule group, use the // override action setting and not this action setting. Action *RuleAction @@ -1017,12 +1014,12 @@ type Rule struct { // action settings. In a Rule, you must specify either this OverrideAction setting // or the rule Action setting, but not both: // - // * If the rule statement - // references a rule group, use this override action setting and not the action - // setting. + // * If the rule statement references a + // rule group, use this override action setting and not the action setting. // - // * If the rule statement does not reference a rule group, use the - // rule action setting and not this rule override action setting. + // * If + // the rule statement does not reference a rule group, use the rule action setting + // and not this rule override action setting. OverrideAction *OverrideAction } @@ -1377,20 +1374,20 @@ type Statement struct { // create a rate-based rule with a nested AND rule statement that contains the // following nested statements: // - // * An IP match statement with an IP set that + // * An IP match statement with an IP set that // specified the address 192.0.2.44. // - // * A string match statement that searches - // in the User-Agent header for the string BadBot. - // - // In this rate-based rule, you - // also define a rate limit. For this example, the rate limit is 1,000. Requests - // that meet both of the conditions in the statements are counted. If the count - // exceeds 1,000 requests per five minutes, the rule action triggers. Requests that - // do not meet both conditions are not counted towards the rate limit and are not - // affected by this rule. You cannot nest a RateBasedStatement, for example for use - // inside a NotStatement or OrStatement. It can only be referenced as a top-level - // statement within a rule. + // * A string match statement that searches in + // the User-Agent header for the string BadBot. + // + // In this rate-based rule, you also + // define a rate limit. For this example, the rate limit is 1,000. Requests that + // meet both of the conditions in the statements are counted. If the count exceeds + // 1,000 requests per five minutes, the rule action triggers. Requests that do not + // meet both conditions are not counted towards the rate limit and are not affected + // by this rule. You cannot nest a RateBasedStatement, for example for use inside a + // NotStatement or OrStatement. It can only be referenced as a top-level statement + // within a rule. RateBasedStatement *RateBasedStatement // A rule statement used to search web request components for matches with regular @@ -1516,64 +1513,64 @@ type TextTransformation struct { // and using unusual formatting to disguise some or all of the command, use this // option to perform the following transformations: // - // * Delete the following + // * Delete the following // characters: \ " ' ^ // - // * Delete spaces before the following characters: / ( - // + // * Delete spaces before the following characters: / ( // - // * Replace the following characters with a space: , ; + // * + // Replace the following characters with a space: , ; // - // * Replace multiple - // spaces with one space + // * Replace multiple spaces + // with one space // - // * Convert uppercase letters (A-Z) to lowercase + // * Convert uppercase letters (A-Z) to lowercase // (a-z) // // COMPRESS_WHITE_SPACE Use this option to replace the following characters // with a space character (decimal 32): // - // * \f, formfeed, decimal 12 + // * \f, formfeed, decimal 12 // - // * \t, - // tab, decimal 9 + // * \t, tab, + // decimal 9 // - // * \n, newline, decimal 10 + // * \n, newline, decimal 10 // - // * \r, carriage return, - // decimal 13 + // * \r, carriage return, decimal 13 // - // * \v, vertical tab, decimal 11 + // * \v, + // vertical tab, decimal 11 // - // * non-breaking space, - // decimal 160 + // * non-breaking space, decimal + // 160 // // COMPRESS_WHITE_SPACE also replaces multiple spaces with one space. // HTML_ENTITY_DECODE Use this option to replace HTML-encoded characters with // unencoded characters. HTML_ENTITY_DECODE performs the following operations: // + // * + // Replaces (ampersand)quot; with " // - // * Replaces (ampersand)quot; with " - // - // * Replaces (ampersand)nbsp; with a + // * Replaces (ampersand)nbsp; with a // non-breaking space, decimal 160 // - // * Replaces (ampersand)lt; with a "less - // than" symbol + // * Replaces (ampersand)lt; with a "less than" + // symbol // - // * Replaces (ampersand)gt; with > + // * Replaces (ampersand)gt; with > // - // * Replaces characters - // that are represented in hexadecimal format, (ampersand)#xhhhh;, with the - // corresponding characters + // * Replaces characters that are + // represented in hexadecimal format, (ampersand)#xhhhh;, with the corresponding + // characters // - // * Replaces characters that are represented in - // decimal format, (ampersand)#nnnn;, with the corresponding characters + // * Replaces characters that are represented in decimal format, + // (ampersand)#nnnn;, with the corresponding characters // - // LOWERCASE - // Use this option to convert uppercase letters (A-Z) to lowercase (a-z). - // URL_DECODE Use this option to decode a URL-encoded value. NONE Specify NONE if - // you don't want any text transformations. + // LOWERCASE Use this option + // to convert uppercase letters (A-Z) to lowercase (a-z). URL_DECODE Use this + // option to decode a URL-encoded value. NONE Specify NONE if you don't want any + // text transformations. // // This member is required. Type TextTransformationType diff --git a/service/workdocs/doc.go b/service/workdocs/doc.go index 3c41c329e16..3cb4a51a73d 100644 --- a/service/workdocs/doc.go +++ b/service/workdocs/doc.go @@ -5,13 +5,13 @@ // // The WorkDocs API is designed for the following use cases: // -// * File Migration: +// * File Migration: // File migration applications are supported for users who want to migrate their // files from an on-premises or off-premises file system or service. Users can // insert files into a user directory structure, as well as allow for basic // metadata changes, such as modifications to the permissions of files. // -// * +// * // Security: Support security applications are supported for users who have // additional security needs, such as antivirus or data loss prevention. The API // actions, along with AWS CloudTrail, allow these applications to detect when @@ -19,16 +19,16 @@ // actions and replace the target file. If the target file violates the policy, the // application can also choose to email the user. // -// * eDiscovery/Analytics: -// General administrative applications are supported, such as eDiscovery and -// analytics. These applications can choose to mimic or record the actions in an -// Amazon WorkDocs site, along with AWS CloudTrail, to replicate data for -// eDiscovery, backup, or analytical applications. +// * eDiscovery/Analytics: General +// administrative applications are supported, such as eDiscovery and analytics. +// These applications can choose to mimic or record the actions in an Amazon +// WorkDocs site, along with AWS CloudTrail, to replicate data for eDiscovery, +// backup, or analytical applications. // -// All Amazon WorkDocs API actions -// are Amazon authenticated and certificate-signed. They not only require the use -// of the AWS SDK, but also allow for the exclusive use of IAM users and roles to -// help facilitate access, trust, and permission policies. By creating a role and +// All Amazon WorkDocs API actions are Amazon +// authenticated and certificate-signed. They not only require the use of the AWS +// SDK, but also allow for the exclusive use of IAM users and roles to help +// facilitate access, trust, and permission policies. By creating a role and // allowing an IAM user to access the Amazon WorkDocs site, the IAM user gains full // administrative visibility into the entire Amazon WorkDocs site (or as set in the // IAM policy). This includes, but is not limited to, the ability to modify file diff --git a/service/workdocs/types/enums.go b/service/workdocs/types/enums.go index 2252027752e..5a1594f668a 100644 --- a/service/workdocs/types/enums.go +++ b/service/workdocs/types/enums.go @@ -6,39 +6,39 @@ type ActivityType string // Enum values for ActivityType const ( - ActivityTypeDocument_checked_in ActivityType = "DOCUMENT_CHECKED_IN" - ActivityTypeDocument_checked_out ActivityType = "DOCUMENT_CHECKED_OUT" - ActivityTypeDocument_renamed ActivityType = "DOCUMENT_RENAMED" - ActivityTypeDocument_version_uploaded ActivityType = "DOCUMENT_VERSION_UPLOADED" - ActivityTypeDocument_version_deleted ActivityType = "DOCUMENT_VERSION_DELETED" - ActivityTypeDocument_version_viewed ActivityType = "DOCUMENT_VERSION_VIEWED" - ActivityTypeDocument_version_downloaded ActivityType = "DOCUMENT_VERSION_DOWNLOADED" - ActivityTypeDocument_recycled ActivityType = "DOCUMENT_RECYCLED" - ActivityTypeDocument_restored ActivityType = "DOCUMENT_RESTORED" - ActivityTypeDocument_reverted ActivityType = "DOCUMENT_REVERTED" - ActivityTypeDocument_shared ActivityType = "DOCUMENT_SHARED" - ActivityTypeDocument_unshared ActivityType = "DOCUMENT_UNSHARED" - ActivityTypeDocument_share_permission_changed ActivityType = "DOCUMENT_SHARE_PERMISSION_CHANGED" - ActivityTypeDocument_shareable_link_created ActivityType = "DOCUMENT_SHAREABLE_LINK_CREATED" - ActivityTypeDocument_shareable_link_removed ActivityType = "DOCUMENT_SHAREABLE_LINK_REMOVED" - ActivityTypeDocument_shareable_link_permission_changed ActivityType = "DOCUMENT_SHAREABLE_LINK_PERMISSION_CHANGED" - ActivityTypeDocument_moved ActivityType = "DOCUMENT_MOVED" - ActivityTypeDocument_comment_added ActivityType = "DOCUMENT_COMMENT_ADDED" - ActivityTypeDocument_comment_deleted ActivityType = "DOCUMENT_COMMENT_DELETED" - ActivityTypeDocument_annotation_added ActivityType = "DOCUMENT_ANNOTATION_ADDED" - ActivityTypeDocument_annotation_deleted ActivityType = "DOCUMENT_ANNOTATION_DELETED" - ActivityTypeFolder_created ActivityType = "FOLDER_CREATED" - ActivityTypeFolder_deleted ActivityType = "FOLDER_DELETED" - ActivityTypeFolder_renamed ActivityType = "FOLDER_RENAMED" - ActivityTypeFolder_recycled ActivityType = "FOLDER_RECYCLED" - ActivityTypeFolder_restored ActivityType = "FOLDER_RESTORED" - ActivityTypeFolder_shared ActivityType = "FOLDER_SHARED" - ActivityTypeFolder_unshared ActivityType = "FOLDER_UNSHARED" - ActivityTypeFolder_share_permission_changed ActivityType = "FOLDER_SHARE_PERMISSION_CHANGED" - ActivityTypeFolder_shareable_link_created ActivityType = "FOLDER_SHAREABLE_LINK_CREATED" - ActivityTypeFolder_shareable_link_removed ActivityType = "FOLDER_SHAREABLE_LINK_REMOVED" - ActivityTypeFolder_shareable_link_permission_changed ActivityType = "FOLDER_SHAREABLE_LINK_PERMISSION_CHANGED" - ActivityTypeFolder_moved ActivityType = "FOLDER_MOVED" + ActivityTypeDocumentCheckedIn ActivityType = "DOCUMENT_CHECKED_IN" + ActivityTypeDocumentCheckedOut ActivityType = "DOCUMENT_CHECKED_OUT" + ActivityTypeDocumentRenamed ActivityType = "DOCUMENT_RENAMED" + ActivityTypeDocumentVersionUploaded ActivityType = "DOCUMENT_VERSION_UPLOADED" + ActivityTypeDocumentVersionDeleted ActivityType = "DOCUMENT_VERSION_DELETED" + ActivityTypeDocumentVersionViewed ActivityType = "DOCUMENT_VERSION_VIEWED" + ActivityTypeDocumentVersionDownloaded ActivityType = "DOCUMENT_VERSION_DOWNLOADED" + ActivityTypeDocumentRecycled ActivityType = "DOCUMENT_RECYCLED" + ActivityTypeDocumentRestored ActivityType = "DOCUMENT_RESTORED" + ActivityTypeDocumentReverted ActivityType = "DOCUMENT_REVERTED" + ActivityTypeDocumentShared ActivityType = "DOCUMENT_SHARED" + ActivityTypeDocumentUnshared ActivityType = "DOCUMENT_UNSHARED" + ActivityTypeDocumentSharePermissionChanged ActivityType = "DOCUMENT_SHARE_PERMISSION_CHANGED" + ActivityTypeDocumentShareableLinkCreated ActivityType = "DOCUMENT_SHAREABLE_LINK_CREATED" + ActivityTypeDocumentShareableLinkRemoved ActivityType = "DOCUMENT_SHAREABLE_LINK_REMOVED" + ActivityTypeDocumentShareableLinkPermissionChanged ActivityType = "DOCUMENT_SHAREABLE_LINK_PERMISSION_CHANGED" + ActivityTypeDocumentMoved ActivityType = "DOCUMENT_MOVED" + ActivityTypeDocumentCommentAdded ActivityType = "DOCUMENT_COMMENT_ADDED" + ActivityTypeDocumentCommentDeleted ActivityType = "DOCUMENT_COMMENT_DELETED" + ActivityTypeDocumentAnnotationAdded ActivityType = "DOCUMENT_ANNOTATION_ADDED" + ActivityTypeDocumentAnnotationDeleted ActivityType = "DOCUMENT_ANNOTATION_DELETED" + ActivityTypeFolderCreated ActivityType = "FOLDER_CREATED" + ActivityTypeFolderDeleted ActivityType = "FOLDER_DELETED" + ActivityTypeFolderRenamed ActivityType = "FOLDER_RENAMED" + ActivityTypeFolderRecycled ActivityType = "FOLDER_RECYCLED" + ActivityTypeFolderRestored ActivityType = "FOLDER_RESTORED" + ActivityTypeFolderShared ActivityType = "FOLDER_SHARED" + ActivityTypeFolderUnshared ActivityType = "FOLDER_UNSHARED" + ActivityTypeFolderSharePermissionChanged ActivityType = "FOLDER_SHARE_PERMISSION_CHANGED" + ActivityTypeFolderShareableLinkCreated ActivityType = "FOLDER_SHAREABLE_LINK_CREATED" + ActivityTypeFolderShareableLinkRemoved ActivityType = "FOLDER_SHAREABLE_LINK_REMOVED" + ActivityTypeFolderShareableLinkPermissionChanged ActivityType = "FOLDER_SHAREABLE_LINK_PERMISSION_CHANGED" + ActivityTypeFolderMoved ActivityType = "FOLDER_MOVED" ) // Values returns all known values for ActivityType. Note that this can be expanded @@ -142,8 +142,8 @@ type DocumentSourceType string // Enum values for DocumentSourceType const ( - DocumentSourceTypeOriginal DocumentSourceType = "ORIGINAL" - DocumentSourceTypeWith_comments DocumentSourceType = "WITH_COMMENTS" + DocumentSourceTypeOriginal DocumentSourceType = "ORIGINAL" + DocumentSourceTypeWithComments DocumentSourceType = "WITH_COMMENTS" ) // Values returns all known values for DocumentSourceType. Note that this can be @@ -178,9 +178,9 @@ type DocumentThumbnailType string // Enum values for DocumentThumbnailType const ( - DocumentThumbnailTypeSmall DocumentThumbnailType = "SMALL" - DocumentThumbnailTypeSmall_hq DocumentThumbnailType = "SMALL_HQ" - DocumentThumbnailTypeLarge DocumentThumbnailType = "LARGE" + DocumentThumbnailTypeSmall DocumentThumbnailType = "SMALL" + DocumentThumbnailTypeSmallHq DocumentThumbnailType = "SMALL_HQ" + DocumentThumbnailTypeLarge DocumentThumbnailType = "LARGE" ) // Values returns all known values for DocumentThumbnailType. Note that this can be @@ -241,9 +241,9 @@ const ( LocaleTypeEs LocaleType = "es" LocaleTypeJa LocaleType = "ja" LocaleTypeRu LocaleType = "ru" - LocaleTypeZh_cn LocaleType = "zh_CN" - LocaleTypeZh_tw LocaleType = "zh_TW" - LocaleTypePt_br LocaleType = "pt_BR" + LocaleTypeZhCn LocaleType = "zh_CN" + LocaleTypeZhTw LocaleType = "zh_TW" + LocaleTypePtBr LocaleType = "pt_BR" LocaleTypeDefault LocaleType = "default" ) @@ -312,7 +312,7 @@ type ResourceCollectionType string // Enum values for ResourceCollectionType const ( - ResourceCollectionTypeShared_with_me ResourceCollectionType = "SHARED_WITH_ME" + ResourceCollectionTypeSharedWithMe ResourceCollectionType = "SHARED_WITH_ME" ) // Values returns all known values for ResourceCollectionType. Note that this can @@ -494,8 +494,8 @@ type UserFilterType string // Enum values for UserFilterType const ( - UserFilterTypeAll UserFilterType = "ALL" - UserFilterTypeActive_pending UserFilterType = "ACTIVE_PENDING" + UserFilterTypeAll UserFilterType = "ALL" + UserFilterTypeActivePending UserFilterType = "ACTIVE_PENDING" ) // Values returns all known values for UserFilterType. Note that this can be @@ -512,11 +512,11 @@ type UserSortType string // Enum values for UserSortType const ( - UserSortTypeUser_name UserSortType = "USER_NAME" - UserSortTypeFull_name UserSortType = "FULL_NAME" - UserSortTypeStorage_limit UserSortType = "STORAGE_LIMIT" - UserSortTypeUser_status UserSortType = "USER_STATUS" - UserSortTypeStorage_used UserSortType = "STORAGE_USED" + UserSortTypeUserName UserSortType = "USER_NAME" + UserSortTypeFullName UserSortType = "FULL_NAME" + UserSortTypeStorageLimit UserSortType = "STORAGE_LIMIT" + UserSortTypeUserStatus UserSortType = "USER_STATUS" + UserSortTypeStorageUsed UserSortType = "STORAGE_USED" ) // Values returns all known values for UserSortType. Note that this can be expanded diff --git a/service/worklink/types/enums.go b/service/worklink/types/enums.go index 78638410f49..b83b62571a4 100644 --- a/service/worklink/types/enums.go +++ b/service/worklink/types/enums.go @@ -22,8 +22,8 @@ type DeviceStatus string // Enum values for DeviceStatus const ( - DeviceStatusActive DeviceStatus = "ACTIVE" - DeviceStatusSigned_out DeviceStatus = "SIGNED_OUT" + DeviceStatusActive DeviceStatus = "ACTIVE" + DeviceStatusSignedOut DeviceStatus = "SIGNED_OUT" ) // Values returns all known values for DeviceStatus. Note that this can be expanded @@ -40,14 +40,14 @@ type DomainStatus string // Enum values for DomainStatus const ( - DomainStatusPending_validation DomainStatus = "PENDING_VALIDATION" - DomainStatusAssociating DomainStatus = "ASSOCIATING" - DomainStatusActive DomainStatus = "ACTIVE" - DomainStatusInactive DomainStatus = "INACTIVE" - DomainStatusDisassociating DomainStatus = "DISASSOCIATING" - DomainStatusDisassociated DomainStatus = "DISASSOCIATED" - DomainStatusFailed_to_associate DomainStatus = "FAILED_TO_ASSOCIATE" - DomainStatusFailed_to_disassociate DomainStatus = "FAILED_TO_DISASSOCIATE" + DomainStatusPendingValidation DomainStatus = "PENDING_VALIDATION" + DomainStatusAssociating DomainStatus = "ASSOCIATING" + DomainStatusActive DomainStatus = "ACTIVE" + DomainStatusInactive DomainStatus = "INACTIVE" + DomainStatusDisassociating DomainStatus = "DISASSOCIATING" + DomainStatusDisassociated DomainStatus = "DISASSOCIATED" + DomainStatusFailedToAssociate DomainStatus = "FAILED_TO_ASSOCIATE" + DomainStatusFailedToDisassociate DomainStatus = "FAILED_TO_DISASSOCIATE" ) // Values returns all known values for DomainStatus. Note that this can be expanded @@ -70,12 +70,12 @@ type FleetStatus string // Enum values for FleetStatus const ( - FleetStatusCreating FleetStatus = "CREATING" - FleetStatusActive FleetStatus = "ACTIVE" - FleetStatusDeleting FleetStatus = "DELETING" - FleetStatusDeleted FleetStatus = "DELETED" - FleetStatusFailed_to_create FleetStatus = "FAILED_TO_CREATE" - FleetStatusFailed_to_delete FleetStatus = "FAILED_TO_DELETE" + FleetStatusCreating FleetStatus = "CREATING" + FleetStatusActive FleetStatus = "ACTIVE" + FleetStatusDeleting FleetStatus = "DELETING" + FleetStatusDeleted FleetStatus = "DELETED" + FleetStatusFailedToCreate FleetStatus = "FAILED_TO_CREATE" + FleetStatusFailedToDelete FleetStatus = "FAILED_TO_DELETE" ) // Values returns all known values for FleetStatus. Note that this can be expanded diff --git a/service/workmail/doc.go b/service/workmail/doc.go index cc815c80f8d..37ca239966a 100644 --- a/service/workmail/doc.go +++ b/service/workmail/doc.go @@ -11,24 +11,23 @@ // and the location in which your data is stored. The WorkMail API is designed for // the following scenarios: // -// * Listing and describing organizations +// * Listing and describing organizations // -// * -// Managing users +// * Managing +// users // -// * Managing groups +// * Managing groups // -// * Managing resources +// * Managing resources // -// All WorkMail -// API operations are Amazon-authenticated and certificate-signed. They not only -// require the use of the AWS SDK, but also allow for the exclusive use of AWS -// Identity and Access Management users and roles to help facilitate access, trust, -// and permission policies. By creating a role and allowing an IAM user to access -// the WorkMail site, the IAM user gains full administrative visibility into the -// entire WorkMail organization (or as set in the IAM policy). This includes, but -// is not limited to, the ability to create, update, and delete users, groups, and -// resources. This allows developers to perform the scenarios listed above, as well -// as give users the ability to grant access on a selective basis using the IAM -// model. +// All WorkMail API operations are +// Amazon-authenticated and certificate-signed. They not only require the use of +// the AWS SDK, but also allow for the exclusive use of AWS Identity and Access +// Management users and roles to help facilitate access, trust, and permission +// policies. By creating a role and allowing an IAM user to access the WorkMail +// site, the IAM user gains full administrative visibility into the entire WorkMail +// organization (or as set in the IAM policy). This includes, but is not limited +// to, the ability to create, update, and delete users, groups, and resources. This +// allows developers to perform the scenarios listed above, as well as give users +// the ability to grant access on a selective basis using the IAM model. package workmail diff --git a/service/workmail/types/enums.go b/service/workmail/types/enums.go index b58e52d7c24..e7bc191a675 100644 --- a/service/workmail/types/enums.go +++ b/service/workmail/types/enums.go @@ -44,11 +44,11 @@ type FolderName string // Enum values for FolderName const ( - FolderNameInbox FolderName = "INBOX" - FolderNameDeleted_items FolderName = "DELETED_ITEMS" - FolderNameSent_items FolderName = "SENT_ITEMS" - FolderNameDrafts FolderName = "DRAFTS" - FolderNameJunk_email FolderName = "JUNK_EMAIL" + FolderNameInbox FolderName = "INBOX" + FolderNameDeletedItems FolderName = "DELETED_ITEMS" + FolderNameSentItems FolderName = "SENT_ITEMS" + FolderNameDrafts FolderName = "DRAFTS" + FolderNameJunkEmail FolderName = "JUNK_EMAIL" ) // Values returns all known values for FolderName. Note that this can be expanded @@ -108,9 +108,9 @@ type PermissionType string // Enum values for PermissionType const ( - PermissionTypeFull_access PermissionType = "FULL_ACCESS" - PermissionTypeSend_as PermissionType = "SEND_AS" - PermissionTypeSend_on_behalf PermissionType = "SEND_ON_BEHALF" + PermissionTypeFullAccess PermissionType = "FULL_ACCESS" + PermissionTypeSendAs PermissionType = "SEND_AS" + PermissionTypeSendOnBehalf PermissionType = "SEND_ON_BEHALF" ) // Values returns all known values for PermissionType. Note that this can be @@ -146,9 +146,9 @@ type RetentionAction string // Enum values for RetentionAction const ( - RetentionActionNone RetentionAction = "NONE" - RetentionActionDelete RetentionAction = "DELETE" - RetentionActionPermanently_delete RetentionAction = "PERMANENTLY_DELETE" + RetentionActionNone RetentionAction = "NONE" + RetentionActionDelete RetentionAction = "DELETE" + RetentionActionPermanentlyDelete RetentionAction = "PERMANENTLY_DELETE" ) // Values returns all known values for RetentionAction. Note that this can be @@ -166,9 +166,9 @@ type UserRole string // Enum values for UserRole const ( - UserRoleUser UserRole = "USER" - UserRoleResource UserRole = "RESOURCE" - UserRoleSystem_user UserRole = "SYSTEM_USER" + UserRoleUser UserRole = "USER" + UserRoleResource UserRole = "RESOURCE" + UserRoleSystemUser UserRole = "SYSTEM_USER" ) // Values returns all known values for UserRole. Note that this can be expanded in diff --git a/service/workspaces/api_op_UpdateConnectionAliasPermission.go b/service/workspaces/api_op_UpdateConnectionAliasPermission.go index 8fb5902b817..ee09cd2dba7 100644 --- a/service/workspaces/api_op_UpdateConnectionAliasPermission.go +++ b/service/workspaces/api_op_UpdateConnectionAliasPermission.go @@ -19,13 +19,13 @@ import ( // for Amazon WorkSpaces // (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). // -// -// * Before performing this operation, call DescribeConnectionAliases +// * +// Before performing this operation, call DescribeConnectionAliases // (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html) // to make sure that the current state of the connection alias is CREATED. // -// * -// To delete a connection alias that has been shared, the shared account must first +// * To +// delete a connection alias that has been shared, the shared account must first // disassociate the connection alias from any directories it has been associated // with. Then you must unshare the connection alias from the account it has been // shared with. You can delete a connection alias only after it is no longer shared diff --git a/service/workspaces/api_op_UpdateWorkspaceImagePermission.go b/service/workspaces/api_op_UpdateWorkspaceImagePermission.go index 56c143c722d..eaa3c916cb7 100644 --- a/service/workspaces/api_op_UpdateWorkspaceImagePermission.go +++ b/service/workspaces/api_op_UpdateWorkspaceImagePermission.go @@ -17,14 +17,14 @@ import ( // see Share or Unshare a Custom WorkSpaces Image // (https://docs.aws.amazon.com/workspaces/latest/adminguide/share-custom-image.html). // -// -// * To delete an image that has been shared, you must unshare the image before you +// * +// To delete an image that has been shared, you must unshare the image before you // delete it. // -// * Sharing Bring Your Own License (BYOL) images across AWS -// accounts isn't supported at this time in the AWS GovCloud (US-West) Region. To -// share BYOL images across accounts in the AWS GovCloud (US-West) Region, contact -// AWS Support. +// * Sharing Bring Your Own License (BYOL) images across AWS accounts +// isn't supported at this time in the AWS GovCloud (US-West) Region. To share BYOL +// images across accounts in the AWS GovCloud (US-West) Region, contact AWS +// Support. func (c *Client) UpdateWorkspaceImagePermission(ctx context.Context, params *UpdateWorkspaceImagePermissionInput, optFns ...func(*Options)) (*UpdateWorkspaceImagePermissionOutput, error) { if params == nil { params = &UpdateWorkspaceImagePermissionInput{} diff --git a/service/workspaces/types/enums.go b/service/workspaces/types/enums.go index 48597c941d4..48f5f686f9b 100644 --- a/service/workspaces/types/enums.go +++ b/service/workspaces/types/enums.go @@ -24,8 +24,8 @@ type Application string // Enum values for Application const ( - ApplicationMicrosoft_office_2016 Application = "Microsoft_Office_2016" - ApplicationMicrosoft_office_2019 Application = "Microsoft_Office_2019" + ApplicationMicrosoftOffice2016 Application = "Microsoft_Office_2016" + ApplicationMicrosoftOffice2019 Application = "Microsoft_Office_2019" ) // Values returns all known values for Application. Note that this can be expanded @@ -42,11 +42,11 @@ type AssociationStatus string // Enum values for AssociationStatus const ( - AssociationStatusNot_associated AssociationStatus = "NOT_ASSOCIATED" - AssociationStatusAssociated_with_owner_account AssociationStatus = "ASSOCIATED_WITH_OWNER_ACCOUNT" - AssociationStatusAssociated_with_shared_account AssociationStatus = "ASSOCIATED_WITH_SHARED_ACCOUNT" - AssociationStatusPending_association AssociationStatus = "PENDING_ASSOCIATION" - AssociationStatusPending_disassociation AssociationStatus = "PENDING_DISASSOCIATION" + AssociationStatusNotAssociated AssociationStatus = "NOT_ASSOCIATED" + AssociationStatusAssociatedWithOwnerAccount AssociationStatus = "ASSOCIATED_WITH_OWNER_ACCOUNT" + AssociationStatusAssociatedWithSharedAccount AssociationStatus = "ASSOCIATED_WITH_SHARED_ACCOUNT" + AssociationStatusPendingAssociation AssociationStatus = "PENDING_ASSOCIATION" + AssociationStatusPendingDisassociation AssociationStatus = "PENDING_DISASSOCIATION" ) // Values returns all known values for AssociationStatus. Note that this can be @@ -208,9 +208,9 @@ type ModificationResourceEnum string // Enum values for ModificationResourceEnum const ( - ModificationResourceEnumRoot_volume ModificationResourceEnum = "ROOT_VOLUME" - ModificationResourceEnumUser_volume ModificationResourceEnum = "USER_VOLUME" - ModificationResourceEnumCompute_type ModificationResourceEnum = "COMPUTE_TYPE" + ModificationResourceEnumRootVolume ModificationResourceEnum = "ROOT_VOLUME" + ModificationResourceEnumUserVolume ModificationResourceEnum = "USER_VOLUME" + ModificationResourceEnumComputeType ModificationResourceEnum = "COMPUTE_TYPE" ) // Values returns all known values for ModificationResourceEnum. Note that this can @@ -228,8 +228,8 @@ type ModificationStateEnum string // Enum values for ModificationStateEnum const ( - ModificationStateEnumUpdate_initiated ModificationStateEnum = "UPDATE_INITIATED" - ModificationStateEnumUpdate_in_progress ModificationStateEnum = "UPDATE_IN_PROGRESS" + ModificationStateEnumUpdateInitiated ModificationStateEnum = "UPDATE_INITIATED" + ModificationStateEnumUpdateInProgress ModificationStateEnum = "UPDATE_IN_PROGRESS" ) // Values returns all known values for ModificationStateEnum. Note that this can be @@ -282,8 +282,8 @@ type RunningMode string // Enum values for RunningMode const ( - RunningModeAuto_stop RunningMode = "AUTO_STOP" - RunningModeAlways_on RunningMode = "ALWAYS_ON" + RunningModeAutoStop RunningMode = "AUTO_STOP" + RunningModeAlwaysOn RunningMode = "ALWAYS_ON" ) // Values returns all known values for RunningMode. Note that this can be expanded @@ -300,8 +300,8 @@ type TargetWorkspaceState string // Enum values for TargetWorkspaceState const ( - TargetWorkspaceStateAvailable TargetWorkspaceState = "AVAILABLE" - TargetWorkspaceStateAdmin_maintenance TargetWorkspaceState = "ADMIN_MAINTENANCE" + TargetWorkspaceStateAvailable TargetWorkspaceState = "AVAILABLE" + TargetWorkspaceStateAdminMaintenance TargetWorkspaceState = "ADMIN_MAINTENANCE" ) // Values returns all known values for TargetWorkspaceState. Note that this can be @@ -360,8 +360,8 @@ type WorkspaceDirectoryType string // Enum values for WorkspaceDirectoryType const ( - WorkspaceDirectoryTypeSimple_ad WorkspaceDirectoryType = "SIMPLE_AD" - WorkspaceDirectoryTypeAd_connector WorkspaceDirectoryType = "AD_CONNECTOR" + WorkspaceDirectoryTypeSimpleAd WorkspaceDirectoryType = "SIMPLE_AD" + WorkspaceDirectoryTypeAdConnector WorkspaceDirectoryType = "AD_CONNECTOR" ) // Values returns all known values for WorkspaceDirectoryType. Note that this can @@ -378,9 +378,9 @@ type WorkspaceImageIngestionProcess string // Enum values for WorkspaceImageIngestionProcess const ( - WorkspaceImageIngestionProcessByol_regular WorkspaceImageIngestionProcess = "BYOL_REGULAR" - WorkspaceImageIngestionProcessByol_graphics WorkspaceImageIngestionProcess = "BYOL_GRAPHICS" - WorkspaceImageIngestionProcessByol_graphicspro WorkspaceImageIngestionProcess = "BYOL_GRAPHICSPRO" + WorkspaceImageIngestionProcessByolRegular WorkspaceImageIngestionProcess = "BYOL_REGULAR" + WorkspaceImageIngestionProcessByolGraphics WorkspaceImageIngestionProcess = "BYOL_GRAPHICS" + WorkspaceImageIngestionProcessByolGraphicspro WorkspaceImageIngestionProcess = "BYOL_GRAPHICSPRO" ) // Values returns all known values for WorkspaceImageIngestionProcess. Note that @@ -438,23 +438,23 @@ type WorkspaceState string // Enum values for WorkspaceState const ( - WorkspaceStatePending WorkspaceState = "PENDING" - WorkspaceStateAvailable WorkspaceState = "AVAILABLE" - WorkspaceStateImpaired WorkspaceState = "IMPAIRED" - WorkspaceStateUnhealthy WorkspaceState = "UNHEALTHY" - WorkspaceStateRebooting WorkspaceState = "REBOOTING" - WorkspaceStateStarting WorkspaceState = "STARTING" - WorkspaceStateRebuilding WorkspaceState = "REBUILDING" - WorkspaceStateRestoring WorkspaceState = "RESTORING" - WorkspaceStateMaintenance WorkspaceState = "MAINTENANCE" - WorkspaceStateAdmin_maintenance WorkspaceState = "ADMIN_MAINTENANCE" - WorkspaceStateTerminating WorkspaceState = "TERMINATING" - WorkspaceStateTerminated WorkspaceState = "TERMINATED" - WorkspaceStateSuspended WorkspaceState = "SUSPENDED" - WorkspaceStateUpdating WorkspaceState = "UPDATING" - WorkspaceStateStopping WorkspaceState = "STOPPING" - WorkspaceStateStopped WorkspaceState = "STOPPED" - WorkspaceStateError WorkspaceState = "ERROR" + WorkspaceStatePending WorkspaceState = "PENDING" + WorkspaceStateAvailable WorkspaceState = "AVAILABLE" + WorkspaceStateImpaired WorkspaceState = "IMPAIRED" + WorkspaceStateUnhealthy WorkspaceState = "UNHEALTHY" + WorkspaceStateRebooting WorkspaceState = "REBOOTING" + WorkspaceStateStarting WorkspaceState = "STARTING" + WorkspaceStateRebuilding WorkspaceState = "REBUILDING" + WorkspaceStateRestoring WorkspaceState = "RESTORING" + WorkspaceStateMaintenance WorkspaceState = "MAINTENANCE" + WorkspaceStateAdminMaintenance WorkspaceState = "ADMIN_MAINTENANCE" + WorkspaceStateTerminating WorkspaceState = "TERMINATING" + WorkspaceStateTerminated WorkspaceState = "TERMINATED" + WorkspaceStateSuspended WorkspaceState = "SUSPENDED" + WorkspaceStateUpdating WorkspaceState = "UPDATING" + WorkspaceStateStopping WorkspaceState = "STOPPING" + WorkspaceStateStopped WorkspaceState = "STOPPED" + WorkspaceStateError WorkspaceState = "ERROR" ) // Values returns all known values for WorkspaceState. Note that this can be diff --git a/service/workspaces/types/types.go b/service/workspaces/types/types.go index 9c43250d875..a9410e9c3a4 100644 --- a/service/workspaces/types/types.go +++ b/service/workspaces/types/types.go @@ -483,13 +483,13 @@ type WorkspaceCreationProperties struct { // number of domain components (DCs) is two or more. For example, // OU=WorkSpaces_machines,DC=machines,DC=example,DC=com. // - // * To avoid errors, + // * To avoid errors, // certain characters in the distinguished name must be escaped. For more // information, see Distinguished Names // (https://docs.microsoft.com/previous-versions/windows/desktop/ldap/distinguished-names) // in the Microsoft documentation. // - // * The API doesn't validate whether the OU + // * The API doesn't validate whether the OU // exists. DefaultOu *string diff --git a/service/xray/api_op_CreateGroup.go b/service/xray/api_op_CreateGroup.go index 539e5a843e3..107bb296d1b 100644 --- a/service/xray/api_op_CreateGroup.go +++ b/service/xray/api_op_CreateGroup.go @@ -40,11 +40,11 @@ type CreateGroupInput struct { // The structure containing configurations related to insights. // - // * The + // * The // InsightsEnabled boolean can be set to true to enable insights for the new group // or false to disable insights for the new group. // - // * The NotifcationsEnabled + // * The NotifcationsEnabled // boolean can be set to true to enable insights notifications for the new group. // Notifications may only be enabled on a group with InsightsEnabled set to true. InsightsConfiguration *types.InsightsConfiguration @@ -54,22 +54,22 @@ type CreateGroupInput struct { // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the AWS // General Reference. The following restrictions apply to tags: // - // * Maximum - // number of user-applied tags per resource: 50 + // * Maximum number + // of user-applied tags per resource: 50 // - // * Maximum tag key length: 128 - // Unicode characters + // * Maximum tag key length: 128 Unicode + // characters // - // * Maximum tag value length: 256 Unicode characters + // * Maximum tag value length: 256 Unicode characters // + // * Valid values + // for key and value: a-z, A-Z, 0-9, space, and the following characters: _ . : / = + // + - and @ // - // * Valid values for key and value: a-z, A-Z, 0-9, space, and the following - // characters: _ . : / = + - and @ + // * Tag keys and values are case sensitive. // - // * Tag keys and values are case sensitive. - // - // - // * Don't use aws: as a prefix for keys; it's reserved for AWS use. + // * Don't use aws: as a + // prefix for keys; it's reserved for AWS use. Tags []*types.Tag } diff --git a/service/xray/api_op_CreateSamplingRule.go b/service/xray/api_op_CreateSamplingRule.go index 0bba72f4582..d4161ff24da 100644 --- a/service/xray/api_op_CreateSamplingRule.go +++ b/service/xray/api_op_CreateSamplingRule.go @@ -45,23 +45,22 @@ type CreateSamplingRuleInput struct { // resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in // the AWS General Reference. The following restrictions apply to tags: // - // * - // Maximum number of user-applied tags per resource: 50 + // * Maximum + // number of user-applied tags per resource: 50 // - // * Maximum tag key - // length: 128 Unicode characters + // * Maximum tag key length: 128 + // Unicode characters // - // * Maximum tag value length: 256 Unicode - // characters + // * Maximum tag value length: 256 Unicode characters // - // * Valid values for key and value: a-z, A-Z, 0-9, space, and the - // following characters: _ . : / = + - and @ + // * Valid + // values for key and value: a-z, A-Z, 0-9, space, and the following characters: _ + // . : / = + - and @ // - // * Tag keys and values are case - // sensitive. + // * Tag keys and values are case sensitive. // - // * Don't use aws: as a prefix for keys; it's reserved for AWS - // use. + // * Don't use aws: + // as a prefix for keys; it's reserved for AWS use. Tags []*types.Tag } diff --git a/service/xray/api_op_PutEncryptionConfig.go b/service/xray/api_op_PutEncryptionConfig.go index 555ec751b22..d00d58e82a9 100644 --- a/service/xray/api_op_PutEncryptionConfig.go +++ b/service/xray/api_op_PutEncryptionConfig.go @@ -37,15 +37,15 @@ type PutEncryptionConfigInput struct { // An AWS KMS customer master key (CMK) in one of the following formats: // - // * - // Alias - The name of the key. For example, alias/MyKey. + // * Alias - + // The name of the key. For example, alias/MyKey. // - // * Key ID - The KMS - // key ID of the key. For example, ae4aa6d49-a4d8-9df9-a475-4ff6d7898456. AWS X-Ray - // does not support asymmetric CMKs. + // * Key ID - The KMS key ID of the + // key. For example, ae4aa6d49-a4d8-9df9-a475-4ff6d7898456. AWS X-Ray does not + // support asymmetric CMKs. // - // * ARN - The full Amazon Resource Name of - // the key ID or alias. For example, + // * ARN - The full Amazon Resource Name of the key ID or + // alias. For example, // arn:aws:kms:us-east-2:123456789012:key/ae4aa6d49-a4d8-9df9-a475-4ff6d7898456. // Use this format to specify a key in a different account. // diff --git a/service/xray/api_op_PutTraceSegments.go b/service/xray/api_op_PutTraceSegments.go index 061a5385d7d..caa1d912e6f 100644 --- a/service/xray/api_op_PutTraceSegments.go +++ b/service/xray/api_op_PutTraceSegments.go @@ -20,44 +20,44 @@ import ( // (https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html) // in the AWS X-Ray Developer Guide. Required segment document fields // -// * name - -// The name of the service that handled the request. +// * name - The +// name of the service that handled the request. // -// * id - A 64-bit -// identifier for the segment, unique among segments in the same trace, in 16 -// hexadecimal digits. +// * id - A 64-bit identifier for +// the segment, unique among segments in the same trace, in 16 hexadecimal +// digits. // -// * trace_id - A unique identifier that connects all -// segments and subsegments originating from a single client request. +// * trace_id - A unique identifier that connects all segments and +// subsegments originating from a single client request. // -// * -// start_time - Time the segment or subsegment was created, in floating point -// seconds in epoch time, accurate to milliseconds. For example, 1480615200.010 or -// 1.480615200010E9. +// * start_time - Time the +// segment or subsegment was created, in floating point seconds in epoch time, +// accurate to milliseconds. For example, 1480615200.010 or 1.480615200010E9. // -// * end_time - Time the segment or subsegment was closed. -// For example, 1480615200.090 or 1.480615200090E9. Specify either an end_time or +// * +// end_time - Time the segment or subsegment was closed. For example, +// 1480615200.090 or 1.480615200090E9. Specify either an end_time or // in_progress. // -// * in_progress - Set to true instead of specifying an end_time -// to record that a segment has been started, but is not complete. Send an -// in-progress segment when your application receives a request that will take a -// long time to serve, to trace that the request was received. When the response is -// sent, send the complete segment to overwrite the in-progress segment. +// * in_progress - Set to true instead of specifying an end_time to +// record that a segment has been started, but is not complete. Send an in-progress +// segment when your application receives a request that will take a long time to +// serve, to trace that the request was received. When the response is sent, send +// the complete segment to overwrite the in-progress segment. // -// A -// trace_id consists of three numbers separated by hyphens. For example, +// A trace_id consists +// of three numbers separated by hyphens. For example, // 1-58406520-a006649127e371903a2de979. This includes: Trace ID Format // -// * The +// * The // version number, for instance, 1. // -// * The time of the original request, in -// Unix epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, -// 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal. +// * The time of the original request, in Unix +// epoch time, in 8 hexadecimal digits. For example, 10:00AM December 2nd, 2016 PST +// in epoch time is 1480615200 seconds, or 58406520 in hexadecimal. // -// * -// A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits. +// * A 96-bit +// identifier for the trace, globally unique, in 24 hexadecimal digits. func (c *Client) PutTraceSegments(ctx context.Context, params *PutTraceSegmentsInput, optFns ...func(*Options)) (*PutTraceSegmentsOutput, error) { if params == nil { params = &PutTraceSegmentsInput{} diff --git a/service/xray/api_op_TagResource.go b/service/xray/api_op_TagResource.go index 0c7a575dfa5..a5b3e2777e4 100644 --- a/service/xray/api_op_TagResource.go +++ b/service/xray/api_op_TagResource.go @@ -39,23 +39,23 @@ type TagResourceInput struct { // AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) // in the AWS General Reference. The following restrictions apply to tags: // - // * + // * // Maximum number of user-applied tags per resource: 50 // - // * Maximum tag key - // length: 128 Unicode characters + // * Maximum tag key length: + // 128 Unicode characters // - // * Maximum tag value length: 256 Unicode - // characters + // * Maximum tag value length: 256 Unicode characters // - // * Valid values for key and value: a-z, A-Z, 0-9, space, and the - // following characters: _ . : / = + - and @ + // * + // Valid values for key and value: a-z, A-Z, 0-9, space, and the following + // characters: _ . : / = + - and @ // - // * Tag keys and values are case - // sensitive. + // * Tag keys and values are case sensitive. // - // * Don't use aws: as a prefix for keys; it's reserved for AWS - // use. You cannot edit or delete system tags. + // * + // Don't use aws: as a prefix for keys; it's reserved for AWS use. You cannot edit + // or delete system tags. // // This member is required. Tags []*types.Tag diff --git a/service/xray/api_op_UpdateGroup.go b/service/xray/api_op_UpdateGroup.go index 653a64936df..c523834493a 100644 --- a/service/xray/api_op_UpdateGroup.go +++ b/service/xray/api_op_UpdateGroup.go @@ -40,13 +40,13 @@ type UpdateGroupInput struct { // The structure containing configurations related to insights. // - // * The + // * The // InsightsEnabled boolean can be set to true to enable insights for the group or // false to disable insights for the group. // - // * The NotifcationsEnabled boolean - // can be set to true to enable insights notifications for the group. Notifications - // can only be enabled on a group with InsightsEnabled set to true. + // * The NotifcationsEnabled boolean can + // be set to true to enable insights notifications for the group. Notifications can + // only be enabled on a group with InsightsEnabled set to true. InsightsConfiguration *types.InsightsConfiguration } diff --git a/service/xray/types/types.go b/service/xray/types/types.go index 8af9da66262..5d921ae8cfb 100644 --- a/service/xray/types/types.go +++ b/service/xray/types/types.go @@ -249,13 +249,13 @@ type Group struct { // The structure containing configurations related to insights. // - // * The + // * The // InsightsEnabled boolean can be set to true to enable insights for the group or // false to disable insights for the group. // - // * The NotifcationsEnabled boolean - // can be set to true to enable insights notifications through Amazon EventBridge - // for the group. + // * The NotifcationsEnabled boolean can + // be set to true to enable insights notifications through Amazon EventBridge for + // the group. InsightsConfiguration *InsightsConfiguration } @@ -273,12 +273,12 @@ type GroupSummary struct { // The structure containing configurations related to insights. // - // * The + // * The // InsightsEnabled boolean can be set to true to enable insights for the group or // false to disable insights for the group. // - // * The NotificationsEnabled boolean - // can be set to true to enable insights notifications. Notifications can only be + // * The NotificationsEnabled boolean can + // be set to true to enable insights notifications. Notifications can only be // enabled on a group with InsightsEnabled set to true. InsightsConfiguration *InsightsConfiguration } @@ -670,19 +670,19 @@ type Service struct { // The type of service. // - // * AWS Resource - The type of an AWS resource. For - // example, AWS::EC2::Instance for an application running on Amazon EC2 or + // * AWS Resource - The type of an AWS resource. For example, + // AWS::EC2::Instance for an application running on Amazon EC2 or // AWS::DynamoDB::Table for an Amazon DynamoDB table that the application used. // - // - // * AWS Service - The type of an AWS service. For example, AWS::DynamoDB for + // * + // AWS Service - The type of an AWS service. For example, AWS::DynamoDB for // downstream calls to Amazon DynamoDB that didn't target a specific table. // - // * + // * // client - Represents the clients that sent requests to a root service. // - // * - // remote - A downstream service of indeterminate type. + // * remote + // - A downstream service of indeterminate type. Type *string } @@ -726,14 +726,14 @@ type ServiceStatistics struct { // resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in // the AWS General Reference. The following restrictions apply to tags: // -// * -// Maximum number of user-applied tags per resource: 50 +// * Maximum +// number of user-applied tags per resource: 50 // -// * Tag keys and values -// are case sensitive. +// * Tag keys and values are case +// sensitive. // -// * Don't use aws: as a prefix for keys; it's reserved -// for AWS use. You cannot edit or delete system tags. +// * Don't use aws: as a prefix for keys; it's reserved for AWS use. +// You cannot edit or delete system tags. type Tag struct { // A tag key, such as Stage or Name. A tag key cannot be empty. The key can be a